summaryrefslogtreecommitdiffstats
path: root/vendor
diff options
context:
space:
mode:
authorChristopher Speller <crspeller@gmail.com>2018-01-29 14:17:40 -0800
committerGitHub <noreply@github.com>2018-01-29 14:17:40 -0800
commit961c04cae992eadb42d286d2f85f8a675bdc68c8 (patch)
tree3408f2d06f847e966c53485e2d54c692cdd037c1 /vendor
parent8d66523ba7d9a77129844be476732ebfd5272d64 (diff)
downloadchat-961c04cae992eadb42d286d2f85f8a675bdc68c8.tar.gz
chat-961c04cae992eadb42d286d2f85f8a675bdc68c8.tar.bz2
chat-961c04cae992eadb42d286d2f85f8a675bdc68c8.zip
Upgrading server dependancies (#8154)
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/NYTimes/gziphandler/gzip.go2
-rw-r--r--vendor/github.com/NYTimes/gziphandler/gzip_test.go19
-rw-r--r--vendor/github.com/avct/uasurfer/.gitignore56
-rw-r--r--vendor/github.com/avct/uasurfer/.travis.yml11
-rw-r--r--vendor/github.com/avct/uasurfer/README.md169
-rw-r--r--vendor/github.com/avct/uasurfer/browser.go192
-rw-r--r--vendor/github.com/avct/uasurfer/const_string.go49
-rw-r--r--vendor/github.com/avct/uasurfer/device.go60
-rw-r--r--vendor/github.com/avct/uasurfer/system.go332
-rw-r--r--vendor/github.com/avct/uasurfer/uasurfer.go227
-rw-r--r--vendor/github.com/avct/uasurfer/uasurfer_test.go1064
-rw-r--r--vendor/github.com/cpanato/html2text/.travis.yml14
-rw-r--r--vendor/github.com/cpanato/html2text/README.md108
-rw-r--r--vendor/github.com/cpanato/html2text/html2text.go312
-rw-r--r--vendor/github.com/cpanato/html2text/html2text_test.go674
-rwxr-xr-xvendor/github.com/cpanato/html2text/testdata/utf8.html22
-rwxr-xr-xvendor/github.com/cpanato/html2text/testdata/utf8_with_bom.xhtml24
-rw-r--r--vendor/github.com/davecgh/go-spew/.travis.yml27
-rw-r--r--vendor/github.com/davecgh/go-spew/LICENSE4
-rw-r--r--vendor/github.com/davecgh/go-spew/README.md21
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/bypass.go8
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/bypasssafe.go2
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/common.go4
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/common_test.go2
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/config.go11
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/doc.go11
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/dump.go18
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/dump_test.go4
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go8
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/example_test.go2
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/format.go6
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/format_test.go8
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/internal_test.go7
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go2
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/spew.go2
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/spew_test.go13
-rw-r--r--vendor/github.com/disintegration/imaging/helpers.go43
-rw-r--r--vendor/github.com/fsnotify/fsnotify/.travis.yml4
-rw-r--r--vendor/github.com/fsnotify/fsnotify/AUTHORS6
-rw-r--r--vendor/github.com/fsnotify/fsnotify/CHANGELOG.md16
-rw-r--r--vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md6
-rw-r--r--vendor/github.com/fsnotify/fsnotify/README.md37
-rw-r--r--vendor/github.com/fsnotify/fsnotify/fsnotify.go4
-rw-r--r--vendor/github.com/fsnotify/fsnotify/fsnotify_test.go32
-rw-r--r--vendor/github.com/fsnotify/fsnotify/inotify.go66
-rw-r--r--vendor/github.com/fsnotify/fsnotify/inotify_test.go109
-rw-r--r--vendor/github.com/fsnotify/fsnotify/integration_darwin_test.go4
-rw-r--r--vendor/github.com/fsnotify/fsnotify/kqueue.go62
-rw-r--r--vendor/github.com/go-ldap/ldap/.travis.yml6
-rw-r--r--vendor/github.com/go-ldap/ldap/Makefile12
-rw-r--r--vendor/github.com/go-ldap/ldap/atomic_value.go13
-rw-r--r--vendor/github.com/go-ldap/ldap/atomic_value_go13.go28
-rw-r--r--vendor/github.com/go-ldap/ldap/conn.go73
-rw-r--r--vendor/github.com/go-ldap/ldap/conn_test.go10
-rw-r--r--vendor/github.com/go-ldap/ldap/debug.go2
-rw-r--r--vendor/github.com/go-ldap/ldap/dn.go7
-rw-r--r--vendor/github.com/go-ldap/ldap/dn_test.go12
-rw-r--r--vendor/github.com/go-ldap/ldap/error.go7
-rw-r--r--vendor/github.com/go-ldap/ldap/error_test.go4
-rw-r--r--vendor/github.com/go-ldap/ldap/example_test.go12
-rw-r--r--vendor/github.com/go-ldap/ldap/filter.go5
-rw-r--r--vendor/github.com/go-ldap/ldap/filter_test.go6
-rw-r--r--vendor/github.com/go-ldap/ldap/ldap.go2
-rw-r--r--vendor/github.com/go-ldap/ldap/passwdmodify.go8
-rw-r--r--vendor/github.com/go-ldap/ldap/search_test.go6
-rw-r--r--vendor/github.com/go-redis/redis/.travis.yml2
-rw-r--r--vendor/github.com/go-redis/redis/README.md5
-rw-r--r--vendor/github.com/go-redis/redis/cluster.go76
-rw-r--r--vendor/github.com/go-redis/redis/cluster_test.go26
-rw-r--r--vendor/github.com/go-redis/redis/command.go42
-rw-r--r--vendor/github.com/go-redis/redis/commands.go10
-rw-r--r--vendor/github.com/go-redis/redis/commands_test.go11
-rw-r--r--vendor/github.com/go-redis/redis/example_test.go4
-rw-r--r--vendor/github.com/go-redis/redis/internal/hashtag/hashtag.go8
-rw-r--r--vendor/github.com/go-redis/redis/internal/proto/scan.go5
-rw-r--r--vendor/github.com/go-redis/redis/internal/safe.go4
-rw-r--r--vendor/github.com/go-redis/redis/internal/unsafe.go19
-rw-r--r--vendor/github.com/go-redis/redis/options_test.go2
-rw-r--r--vendor/github.com/go-redis/redis/parser.go14
-rw-r--r--vendor/github.com/go-redis/redis/redis.go2
-rw-r--r--vendor/github.com/go-redis/redis/ring.go9
-rw-r--r--vendor/github.com/go-redis/redis/universal.go2
-rw-r--r--vendor/github.com/go-sql-driver/mysql/.gitignore1
-rw-r--r--vendor/github.com/go-sql-driver/mysql/.travis.yml93
-rw-r--r--vendor/github.com/go-sql-driver/mysql/.travis/docker.cnf5
-rwxr-xr-xvendor/github.com/go-sql-driver/mysql/.travis/wait_mysql.sh8
-rw-r--r--vendor/github.com/go-sql-driver/mysql/AUTHORS27
-rw-r--r--vendor/github.com/go-sql-driver/mysql/README.md113
-rw-r--r--vendor/github.com/go-sql-driver/mysql/appengine.go2
-rw-r--r--vendor/github.com/go-sql-driver/mysql/benchmark_go18_test.go93
-rw-r--r--vendor/github.com/go-sql-driver/mysql/benchmark_test.go6
-rw-r--r--vendor/github.com/go-sql-driver/mysql/collations.go1
-rw-r--r--vendor/github.com/go-sql-driver/mysql/connection.go148
-rw-r--r--vendor/github.com/go-sql-driver/mysql/connection_go18.go202
-rw-r--r--vendor/github.com/go-sql-driver/mysql/connection_go18_test.go30
-rw-r--r--vendor/github.com/go-sql-driver/mysql/const.go9
-rw-r--r--vendor/github.com/go-sql-driver/mysql/driver.go14
-rw-r--r--vendor/github.com/go-sql-driver/mysql/driver_go18_test.go798
-rw-r--r--vendor/github.com/go-sql-driver/mysql/driver_test.go308
-rw-r--r--vendor/github.com/go-sql-driver/mysql/dsn.go132
-rw-r--r--vendor/github.com/go-sql-driver/mysql/dsn_test.go86
-rw-r--r--vendor/github.com/go-sql-driver/mysql/errors.go79
-rw-r--r--vendor/github.com/go-sql-driver/mysql/fields.go194
-rw-r--r--vendor/github.com/go-sql-driver/mysql/infile.go3
-rw-r--r--vendor/github.com/go-sql-driver/mysql/packets.go213
-rw-r--r--vendor/github.com/go-sql-driver/mysql/packets_test.go24
-rw-r--r--vendor/github.com/go-sql-driver/mysql/rows.go174
-rw-r--r--vendor/github.com/go-sql-driver/mysql/statement.go85
-rw-r--r--vendor/github.com/go-sql-driver/mysql/statement_test.go126
-rw-r--r--vendor/github.com/go-sql-driver/mysql/transaction.go4
-rw-r--r--vendor/github.com/go-sql-driver/mysql/utils.go84
-rw-r--r--vendor/github.com/go-sql-driver/mysql/utils_go17.go40
-rw-r--r--vendor/github.com/go-sql-driver/mysql/utils_go18.go49
-rw-r--r--vendor/github.com/go-sql-driver/mysql/utils_go18_test.go54
-rw-r--r--vendor/github.com/go-sql-driver/mysql/utils_test.go80
-rw-r--r--vendor/github.com/golang/protobuf/jsonpb/jsonpb.go3
-rw-r--r--vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go1
-rw-r--r--vendor/github.com/golang/protobuf/proto/discard.go151
-rw-r--r--vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md11
-rw-r--r--vendor/github.com/gorilla/mux/README.md209
-rw-r--r--vendor/github.com/gorilla/mux/doc.go65
-rw-r--r--vendor/github.com/gorilla/mux/example_route_test.go51
-rw-r--r--vendor/github.com/gorilla/mux/middleware.go28
-rw-r--r--vendor/github.com/gorilla/mux/middleware_test.go336
-rw-r--r--vendor/github.com/gorilla/mux/mux.go20
-rw-r--r--vendor/github.com/gorilla/mux/mux_test.go314
-rw-r--r--vendor/github.com/gorilla/mux/old_test.go2
-rw-r--r--vendor/github.com/gorilla/mux/regexp.go74
-rw-r--r--vendor/github.com/gorilla/mux/route.go26
-rw-r--r--vendor/github.com/gorilla/mux/test_helpers.go18
-rw-r--r--vendor/github.com/gorilla/websocket/.travis.yml1
-rw-r--r--vendor/github.com/gorilla/websocket/client.go140
-rw-r--r--vendor/github.com/gorilla/websocket/client_server_test.go124
-rw-r--r--vendor/github.com/gorilla/websocket/client_test.go40
-rw-r--r--vendor/github.com/gorilla/websocket/conn.go44
-rw-r--r--vendor/github.com/gorilla/websocket/conn_test.go3
-rw-r--r--vendor/github.com/gorilla/websocket/doc.go56
-rw-r--r--vendor/github.com/gorilla/websocket/examples/chat/README.md2
-rw-r--r--vendor/github.com/gorilla/websocket/examples/chat/client.go4
-rw-r--r--vendor/github.com/gorilla/websocket/examples/echo/server.go1
-rw-r--r--vendor/github.com/gorilla/websocket/json.go11
-rw-r--r--vendor/github.com/gorilla/websocket/mask.go1
-rw-r--r--vendor/github.com/gorilla/websocket/proxy.go77
-rw-r--r--vendor/github.com/gorilla/websocket/server.go39
-rw-r--r--vendor/github.com/gorilla/websocket/server_test.go18
-rw-r--r--vendor/github.com/gorilla/websocket/util.go33
-rw-r--r--vendor/github.com/gorilla/websocket/util_test.go49
-rw-r--r--vendor/github.com/gorilla/websocket/x_net_proxy.go473
-rw-r--r--vendor/github.com/hashicorp/go-immutable-radix/iradix_test.go12
-rw-r--r--vendor/github.com/lib/pq/conn.go10
-rw-r--r--vendor/github.com/lib/pq/conn_test.go58
-rw-r--r--vendor/github.com/lib/pq/error.go1
-rw-r--r--vendor/github.com/lib/pq/notify.go4
-rw-r--r--vendor/github.com/mailru/easyjson/.gitignore4
-rw-r--r--vendor/github.com/mailru/easyjson/.travis.yml9
-rw-r--r--vendor/github.com/mailru/easyjson/LICENSE7
-rw-r--r--vendor/github.com/mailru/easyjson/Makefile56
-rw-r--r--vendor/github.com/mailru/easyjson/README.md331
-rw-r--r--vendor/github.com/mailru/easyjson/benchmark/codec_test.go279
-rw-r--r--vendor/github.com/mailru/easyjson/benchmark/data.go148
-rw-r--r--vendor/github.com/mailru/easyjson/benchmark/data_codec.go6914
-rw-r--r--vendor/github.com/mailru/easyjson/benchmark/data_ffjson.go6723
-rw-r--r--vendor/github.com/mailru/easyjson/benchmark/data_var.go350
-rw-r--r--vendor/github.com/mailru/easyjson/benchmark/default_test.go118
-rw-r--r--vendor/github.com/mailru/easyjson/benchmark/dummy_test.go11
-rw-r--r--vendor/github.com/mailru/easyjson/benchmark/easyjson_test.go184
-rw-r--r--vendor/github.com/mailru/easyjson/benchmark/example.json415
-rw-r--r--vendor/github.com/mailru/easyjson/benchmark/ffjson_test.go190
-rw-r--r--vendor/github.com/mailru/easyjson/benchmark/jsoniter_test.go119
-rwxr-xr-xvendor/github.com/mailru/easyjson/benchmark/ujson.sh7
-rw-r--r--vendor/github.com/mailru/easyjson/bootstrap/bootstrap.go188
-rw-r--r--vendor/github.com/mailru/easyjson/buffer/pool.go270
-rw-r--r--vendor/github.com/mailru/easyjson/buffer/pool_test.go107
-rw-r--r--vendor/github.com/mailru/easyjson/easyjson/main.go106
-rw-r--r--vendor/github.com/mailru/easyjson/gen/decoder.go489
-rw-r--r--vendor/github.com/mailru/easyjson/gen/encoder.go382
-rw-r--r--vendor/github.com/mailru/easyjson/gen/generator.go523
-rw-r--r--vendor/github.com/mailru/easyjson/gen/generator_test.go87
-rw-r--r--vendor/github.com/mailru/easyjson/helpers.go78
-rw-r--r--vendor/github.com/mailru/easyjson/jlexer/bytestostr.go24
-rw-r--r--vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go13
-rw-r--r--vendor/github.com/mailru/easyjson/jlexer/error.go15
-rw-r--r--vendor/github.com/mailru/easyjson/jlexer/lexer.go1141
-rw-r--r--vendor/github.com/mailru/easyjson/jlexer/lexer_test.go311
-rw-r--r--vendor/github.com/mailru/easyjson/jwriter/writer.go377
-rw-r--r--vendor/github.com/mailru/easyjson/opt/gotemplate_Bool.go79
-rw-r--r--vendor/github.com/mailru/easyjson/opt/gotemplate_Float32.go79
-rw-r--r--vendor/github.com/mailru/easyjson/opt/gotemplate_Float64.go79
-rw-r--r--vendor/github.com/mailru/easyjson/opt/gotemplate_Int.go79
-rw-r--r--vendor/github.com/mailru/easyjson/opt/gotemplate_Int16.go79
-rw-r--r--vendor/github.com/mailru/easyjson/opt/gotemplate_Int32.go79
-rw-r--r--vendor/github.com/mailru/easyjson/opt/gotemplate_Int64.go79
-rw-r--r--vendor/github.com/mailru/easyjson/opt/gotemplate_Int8.go79
-rw-r--r--vendor/github.com/mailru/easyjson/opt/gotemplate_String.go79
-rw-r--r--vendor/github.com/mailru/easyjson/opt/gotemplate_Uint.go79
-rw-r--r--vendor/github.com/mailru/easyjson/opt/gotemplate_Uint16.go79
-rw-r--r--vendor/github.com/mailru/easyjson/opt/gotemplate_Uint32.go79
-rw-r--r--vendor/github.com/mailru/easyjson/opt/gotemplate_Uint64.go79
-rw-r--r--vendor/github.com/mailru/easyjson/opt/gotemplate_Uint8.go79
-rw-r--r--vendor/github.com/mailru/easyjson/opt/optional/opt.go80
-rw-r--r--vendor/github.com/mailru/easyjson/opt/opts.go22
-rw-r--r--vendor/github.com/mailru/easyjson/parser/parser.go97
-rw-r--r--vendor/github.com/mailru/easyjson/parser/parser_unix.go42
-rw-r--r--vendor/github.com/mailru/easyjson/parser/parser_windows.go49
-rw-r--r--vendor/github.com/mailru/easyjson/raw.go45
-rw-r--r--vendor/github.com/mailru/easyjson/tests/basic_test.go231
-rw-r--r--vendor/github.com/mailru/easyjson/tests/data.go759
-rw-r--r--vendor/github.com/mailru/easyjson/tests/errors.go26
-rw-r--r--vendor/github.com/mailru/easyjson/tests/errors_test.go285
-rw-r--r--vendor/github.com/mailru/easyjson/tests/named_type.go22
-rw-r--r--vendor/github.com/mailru/easyjson/tests/nested_easy.go25
-rw-r--r--vendor/github.com/mailru/easyjson/tests/nothing.go3
-rw-r--r--vendor/github.com/mailru/easyjson/tests/omitempty.go12
-rw-r--r--vendor/github.com/mailru/easyjson/tests/opt_test.go70
-rw-r--r--vendor/github.com/mailru/easyjson/tests/required_test.go28
-rw-r--r--vendor/github.com/mailru/easyjson/tests/snake.go10
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/.travis.yml6
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/Makefile.TRAVIS15
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore1
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile7
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go25
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/fixtures_test.go103
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/testdata/README.THIRD_PARTY4
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/testdata/test.pb.go4029
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/testdata/test.proto540
-rw-r--r--vendor/github.com/miekg/dns/README.md2
-rw-r--r--vendor/github.com/miekg/dns/server.go13
-rw-r--r--vendor/github.com/miekg/dns/version.go2
-rw-r--r--vendor/github.com/minio/go-homedir/README.md16
-rw-r--r--vendor/github.com/minio/go-homedir/dir_posix.go61
-rw-r--r--vendor/github.com/minio/go-homedir/dir_windows.go25
-rw-r--r--vendor/github.com/minio/go-homedir/homedir.go67
-rw-r--r--vendor/github.com/minio/minio-go/.gitignore3
-rw-r--r--vendor/github.com/minio/minio-go/.travis.yml26
-rw-r--r--vendor/github.com/minio/minio-go/MAINTAINERS.md27
-rw-r--r--vendor/github.com/minio/minio-go/Makefile15
-rw-r--r--vendor/github.com/minio/minio-go/NOTICE2
-rw-r--r--vendor/github.com/minio/minio-go/README.md28
-rw-r--r--vendor/github.com/minio/minio-go/README_zh_CN.md246
-rw-r--r--vendor/github.com/minio/minio-go/api-compose-object.go139
-rw-r--r--vendor/github.com/minio/minio-go/api-compose-object_test.go3
-rw-r--r--vendor/github.com/minio/minio-go/api-datatypes.go5
-rw-r--r--vendor/github.com/minio/minio-go/api-error-response.go71
-rw-r--r--vendor/github.com/minio/minio-go/api-error-response_test.go108
-rw-r--r--vendor/github.com/minio/minio-go/api-get-object-context.go (renamed from vendor/github.com/minio/minio-go/transport_1_5.go)25
-rw-r--r--vendor/github.com/minio/minio-go/api-get-object-file.go35
-rw-r--r--vendor/github.com/minio/minio-go/api-get-object.go101
-rw-r--r--vendor/github.com/minio/minio-go/api-get-options.go (renamed from vendor/github.com/minio/minio-go/request-headers.go)67
-rw-r--r--vendor/github.com/minio/minio-go/api-get-policy.go12
-rw-r--r--vendor/github.com/minio/minio-go/api-list.go40
-rw-r--r--vendor/github.com/minio/minio-go/api-notification.go29
-rw-r--r--vendor/github.com/minio/minio-go/api-presigned.go58
-rw-r--r--vendor/github.com/minio/minio-go/api-put-bucket.go45
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-common.go34
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-context.go39
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-copy.go3
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-encrypted.go20
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-file-context.go64
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-file.go51
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-multipart.go97
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object-streaming.go115
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object.go240
-rw-r--r--vendor/github.com/minio/minio-go/api-put-object_test.go62
-rw-r--r--vendor/github.com/minio/minio-go/api-remove.go46
-rw-r--r--vendor/github.com/minio/minio-go/api-s3-datatypes.go5
-rw-r--r--vendor/github.com/minio/minio-go/api-stat.go32
-rw-r--r--vendor/github.com/minio/minio-go/api.go185
-rw-r--r--vendor/github.com/minio/minio-go/api_unit_test.go176
-rw-r--r--vendor/github.com/minio/minio-go/appveyor.yml4
-rw-r--r--vendor/github.com/minio/minio-go/bucket-cache.go7
-rw-r--r--vendor/github.com/minio/minio-go/bucket-cache_test.go9
-rw-r--r--vendor/github.com/minio/minio-go/bucket-notification.go3
-rw-r--r--vendor/github.com/minio/minio-go/constants.go8
-rw-r--r--vendor/github.com/minio/minio-go/core.go67
-rw-r--r--vendor/github.com/minio/minio-go/core_test.go334
-rw-r--r--vendor/github.com/minio/minio-go/docs/API.md740
-rw-r--r--vendor/github.com/minio/minio-go/docs/checker.go.template21
-rw-r--r--vendor/github.com/minio/minio-go/docs/validator.go227
-rw-r--r--vendor/github.com/minio/minio-go/docs/zh_CN/API.md1820
-rw-r--r--vendor/github.com/minio/minio-go/docs/zh_CN/CONTRIBUTING.md22
-rw-r--r--vendor/github.com/minio/minio-go/examples/minio/listenbucketnotification.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/bucketexists.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/composeobject.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/copyobject.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/fgetobject-context.go54
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/fgetobject.go5
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/fputencrypted-object.go80
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/fputobject-context.go53
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/fputobject.go7
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/get-encrypted-object.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/getbucketnotification.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/getbucketpolicy.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/getobject-context.go73
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/getobject.go5
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/listbucketpolicies.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/listbuckets.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/listincompleteuploads.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/listobjects-N.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/listobjects.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/listobjectsV2.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/makebucket.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/presignedgetobject.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/presignedheadobject.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/presignedputobject.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/put-encrypted-object.go5
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/putobject-context.go68
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/putobject-getobject-sse.go20
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/putobject-progress.go9
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/putobject-s3-accelerate.go10
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/putobject-streaming.go5
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/putobject.go9
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/removeallbucketnotification.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/removebucket.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/removeincompleteupload.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/removeobject.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/removeobjects.go12
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/setbucketnotification.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/setbucketpolicy.go3
-rw-r--r--vendor/github.com/minio/minio-go/examples/s3/statobject.go5
-rw-r--r--vendor/github.com/minio/minio-go/functional_tests.go4347
-rw-r--r--vendor/github.com/minio/minio-go/get-options_test.go (renamed from vendor/github.com/minio/minio-go/request-headers_test.go)11
-rw-r--r--vendor/github.com/minio/minio-go/hook-reader.go3
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/chain.go34
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/chain_test.go11
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/credentials.go2
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/credentials_test.go2
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/doc.go17
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go2
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go2
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/env_test.go2
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go4
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go4
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/file_test.go2
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go17
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/iam_aws_test.go17
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go3
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/static.go2
-rw-r--r--vendor/github.com/minio/minio-go/pkg/credentials/static_test.go2
-rw-r--r--vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go3
-rw-r--r--vendor/github.com/minio/minio-go/pkg/encrypt/interface.go3
-rw-r--r--vendor/github.com/minio/minio-go/pkg/encrypt/keys.go3
-rw-r--r--vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go3
-rw-r--r--vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition_test.go3
-rw-r--r--vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go3
-rw-r--r--vendor/github.com/minio/minio-go/pkg/policy/bucket-policy_test.go3
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go7
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming_test.go11
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go65
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2_test.go3
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go5
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4_test.go50
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go3
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/test-utils_test.go3
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/utils.go12
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3signer/utils_test.go6
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3utils/utils.go68
-rw-r--r--vendor/github.com/minio/minio-go/pkg/s3utils/utils_test.go107
-rw-r--r--vendor/github.com/minio/minio-go/pkg/set/stringset.go3
-rw-r--r--vendor/github.com/minio/minio-go/pkg/set/stringset_test.go3
-rw-r--r--vendor/github.com/minio/minio-go/post-policy.go39
-rw-r--r--vendor/github.com/minio/minio-go/retry-continous.go17
-rw-r--r--vendor/github.com/minio/minio-go/retry.go5
-rw-r--r--vendor/github.com/minio/minio-go/s3-endpoints.go8
-rw-r--r--vendor/github.com/minio/minio-go/s3-error.go3
-rw-r--r--vendor/github.com/minio/minio-go/test-utils_test.go3
-rw-r--r--vendor/github.com/minio/minio-go/transport.go2
-rw-r--r--vendor/github.com/minio/minio-go/transport_1_6.go40
-rw-r--r--vendor/github.com/minio/minio-go/utils.go107
-rw-r--r--vendor/github.com/minio/minio-go/utils_test.go116
-rw-r--r--vendor/github.com/mitchellh/go-homedir/LICENSE (renamed from vendor/github.com/minio/go-homedir/LICENSE)0
-rw-r--r--vendor/github.com/mitchellh/go-homedir/README.md14
-rw-r--r--vendor/github.com/mitchellh/go-homedir/homedir.go137
-rw-r--r--vendor/github.com/mitchellh/go-homedir/homedir_test.go (renamed from vendor/github.com/minio/go-homedir/homedir_test.go)2
-rw-r--r--vendor/github.com/mitchellh/mapstructure/.travis.yml11
-rw-r--r--vendor/github.com/mitchellh/mapstructure/README.md2
-rw-r--r--vendor/github.com/mitchellh/mapstructure/mapstructure.go89
-rw-r--r--vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go18
-rw-r--r--vendor/github.com/mitchellh/mapstructure/mapstructure_test.go175
-rw-r--r--vendor/github.com/olivere/elastic/.gitignore33
-rw-r--r--vendor/github.com/olivere/elastic/.travis.yml15
-rw-r--r--vendor/github.com/olivere/elastic/CHANGELOG-3.0.md363
-rw-r--r--vendor/github.com/olivere/elastic/CHANGELOG-5.0.md195
-rw-r--r--vendor/github.com/olivere/elastic/CHANGELOG-6.0.md18
-rw-r--r--vendor/github.com/olivere/elastic/CODE_OF_CONDUCT.md46
-rw-r--r--vendor/github.com/olivere/elastic/CONTRIBUTING.md40
-rw-r--r--vendor/github.com/olivere/elastic/CONTRIBUTORS123
-rw-r--r--vendor/github.com/olivere/elastic/ISSUE_TEMPLATE.md18
-rw-r--r--vendor/github.com/olivere/elastic/LICENSE (renamed from vendor/github.com/cpanato/html2text/LICENSE)19
-rw-r--r--vendor/github.com/olivere/elastic/README.md391
-rw-r--r--vendor/github.com/olivere/elastic/acknowledged_response.go13
-rw-r--r--vendor/github.com/olivere/elastic/backoff.go148
-rw-r--r--vendor/github.com/olivere/elastic/backoff_test.go140
-rw-r--r--vendor/github.com/olivere/elastic/bulk.go417
-rw-r--r--vendor/github.com/olivere/elastic/bulk_delete_request.go166
-rw-r--r--vendor/github.com/olivere/elastic/bulk_delete_request_easyjson.go230
-rw-r--r--vendor/github.com/olivere/elastic/bulk_delete_request_test.go79
-rw-r--r--vendor/github.com/olivere/elastic/bulk_index_request.go239
-rw-r--r--vendor/github.com/olivere/elastic/bulk_index_request_easyjson.go262
-rw-r--r--vendor/github.com/olivere/elastic/bulk_index_request_test.go116
-rw-r--r--vendor/github.com/olivere/elastic/bulk_processor.go547
-rw-r--r--vendor/github.com/olivere/elastic/bulk_processor_test.go425
-rw-r--r--vendor/github.com/olivere/elastic/bulk_request.go17
-rw-r--r--vendor/github.com/olivere/elastic/bulk_test.go600
-rw-r--r--vendor/github.com/olivere/elastic/bulk_update_request.go298
-rw-r--r--vendor/github.com/olivere/elastic/bulk_update_request_easyjson.go461
-rw-r--r--vendor/github.com/olivere/elastic/bulk_update_request_test.go149
-rw-r--r--vendor/github.com/olivere/elastic/canonicalize.go34
-rw-r--r--vendor/github.com/olivere/elastic/canonicalize_test.go72
-rw-r--r--vendor/github.com/olivere/elastic/clear_scroll.go108
-rw-r--r--vendor/github.com/olivere/elastic/clear_scroll_test.go87
-rw-r--r--vendor/github.com/olivere/elastic/client.go1786
-rw-r--r--vendor/github.com/olivere/elastic/client_test.go1319
-rw-r--r--vendor/github.com/olivere/elastic/cluster-test/Makefile16
-rw-r--r--vendor/github.com/olivere/elastic/cluster-test/README.md63
-rw-r--r--vendor/github.com/olivere/elastic/cluster-test/cluster-test.go361
-rw-r--r--vendor/github.com/olivere/elastic/cluster_health.go248
-rw-r--r--vendor/github.com/olivere/elastic/cluster_health_test.go119
-rw-r--r--vendor/github.com/olivere/elastic/cluster_state.go288
-rw-r--r--vendor/github.com/olivere/elastic/cluster_state_test.go93
-rw-r--r--vendor/github.com/olivere/elastic/cluster_stats.go352
-rw-r--r--vendor/github.com/olivere/elastic/cluster_stats_test.go92
-rw-r--r--vendor/github.com/olivere/elastic/config/config.go90
-rw-r--r--vendor/github.com/olivere/elastic/config/config_test.go45
-rw-r--r--vendor/github.com/olivere/elastic/config/doc.go9
-rw-r--r--vendor/github.com/olivere/elastic/connection.go90
-rw-r--r--vendor/github.com/olivere/elastic/count.go315
-rw-r--r--vendor/github.com/olivere/elastic/count_test.go127
-rw-r--r--vendor/github.com/olivere/elastic/decoder.go26
-rw-r--r--vendor/github.com/olivere/elastic/decoder_test.go50
-rw-r--r--vendor/github.com/olivere/elastic/delete.go226
-rw-r--r--vendor/github.com/olivere/elastic/delete_by_query.go654
-rw-r--r--vendor/github.com/olivere/elastic/delete_by_query_test.go146
-rw-r--r--vendor/github.com/olivere/elastic/delete_test.go134
-rw-r--r--vendor/github.com/olivere/elastic/doc.go51
-rw-r--r--vendor/github.com/olivere/elastic/errors.go147
-rw-r--r--vendor/github.com/olivere/elastic/errors_test.go295
-rw-r--r--vendor/github.com/olivere/elastic/etc/elasticsearch.yml15
-rw-r--r--vendor/github.com/olivere/elastic/etc/ingest-geoip/.gitkeep0
-rw-r--r--vendor/github.com/olivere/elastic/etc/jvm.options100
-rw-r--r--vendor/github.com/olivere/elastic/etc/log4j2.properties74
-rw-r--r--vendor/github.com/olivere/elastic/etc/scripts/.gitkeep0
-rw-r--r--vendor/github.com/olivere/elastic/example_test.go530
-rw-r--r--vendor/github.com/olivere/elastic/exists.go181
-rw-r--r--vendor/github.com/olivere/elastic/exists_test.go53
-rw-r--r--vendor/github.com/olivere/elastic/explain.go326
-rw-r--r--vendor/github.com/olivere/elastic/explain_test.go44
-rw-r--r--vendor/github.com/olivere/elastic/fetch_source_context.go90
-rw-r--r--vendor/github.com/olivere/elastic/fetch_source_context_test.go125
-rw-r--r--vendor/github.com/olivere/elastic/field_caps.go202
-rw-r--r--vendor/github.com/olivere/elastic/field_caps_test.go146
-rw-r--r--vendor/github.com/olivere/elastic/geo_point.go48
-rw-r--r--vendor/github.com/olivere/elastic/geo_point_test.go24
-rw-r--r--vendor/github.com/olivere/elastic/get.go260
-rw-r--r--vendor/github.com/olivere/elastic/get_test.go166
-rw-r--r--vendor/github.com/olivere/elastic/highlight.go469
-rw-r--r--vendor/github.com/olivere/elastic/highlight_test.go211
-rw-r--r--vendor/github.com/olivere/elastic/index.go297
-rw-r--r--vendor/github.com/olivere/elastic/index_test.go280
-rw-r--r--vendor/github.com/olivere/elastic/indices_analyze.go284
-rw-r--r--vendor/github.com/olivere/elastic/indices_analyze_test.go85
-rw-r--r--vendor/github.com/olivere/elastic/indices_close.go159
-rw-r--r--vendor/github.com/olivere/elastic/indices_close_test.go84
-rw-r--r--vendor/github.com/olivere/elastic/indices_create.go136
-rw-r--r--vendor/github.com/olivere/elastic/indices_create_test.go63
-rw-r--r--vendor/github.com/olivere/elastic/indices_delete.go133
-rw-r--r--vendor/github.com/olivere/elastic/indices_delete_template.go128
-rw-r--r--vendor/github.com/olivere/elastic/indices_delete_test.go23
-rw-r--r--vendor/github.com/olivere/elastic/indices_exists.go155
-rw-r--r--vendor/github.com/olivere/elastic/indices_exists_template.go118
-rw-r--r--vendor/github.com/olivere/elastic/indices_exists_template_test.go68
-rw-r--r--vendor/github.com/olivere/elastic/indices_exists_test.go23
-rw-r--r--vendor/github.com/olivere/elastic/indices_exists_type.go165
-rw-r--r--vendor/github.com/olivere/elastic/indices_exists_type_test.go135
-rw-r--r--vendor/github.com/olivere/elastic/indices_flush.go173
-rw-r--r--vendor/github.com/olivere/elastic/indices_flush_test.go70
-rw-r--r--vendor/github.com/olivere/elastic/indices_forcemerge.go193
-rw-r--r--vendor/github.com/olivere/elastic/indices_forcemerge_test.go57
-rw-r--r--vendor/github.com/olivere/elastic/indices_get.go206
-rw-r--r--vendor/github.com/olivere/elastic/indices_get_aliases.go161
-rw-r--r--vendor/github.com/olivere/elastic/indices_get_aliases_test.go181
-rw-r--r--vendor/github.com/olivere/elastic/indices_get_field_mapping.go187
-rw-r--r--vendor/github.com/olivere/elastic/indices_get_field_mapping_test.go55
-rw-r--r--vendor/github.com/olivere/elastic/indices_get_mapping.go174
-rw-r--r--vendor/github.com/olivere/elastic/indices_get_mapping_test.go50
-rw-r--r--vendor/github.com/olivere/elastic/indices_get_settings.go187
-rw-r--r--vendor/github.com/olivere/elastic/indices_get_settings_test.go82
-rw-r--r--vendor/github.com/olivere/elastic/indices_get_template.go133
-rw-r--r--vendor/github.com/olivere/elastic/indices_get_template_test.go41
-rw-r--r--vendor/github.com/olivere/elastic/indices_get_test.go98
-rw-r--r--vendor/github.com/olivere/elastic/indices_open.go163
-rw-r--r--vendor/github.com/olivere/elastic/indices_open_test.go (renamed from vendor/gopkg.in/olivere/elastic.v5/delete_template_test.go)8
-rw-r--r--vendor/github.com/olivere/elastic/indices_put_alias.go302
-rw-r--r--vendor/github.com/olivere/elastic/indices_put_alias_test.go222
-rw-r--r--vendor/github.com/olivere/elastic/indices_put_mapping.go228
-rw-r--r--vendor/github.com/olivere/elastic/indices_put_mapping_test.go95
-rw-r--r--vendor/github.com/olivere/elastic/indices_put_settings.go191
-rw-r--r--vendor/github.com/olivere/elastic/indices_put_settings_test.go95
-rw-r--r--vendor/github.com/olivere/elastic/indices_put_template.go207
-rw-r--r--vendor/github.com/olivere/elastic/indices_refresh.go98
-rw-r--r--vendor/github.com/olivere/elastic/indices_refresh_test.go81
-rw-r--r--vendor/github.com/olivere/elastic/indices_rollover.go272
-rw-r--r--vendor/github.com/olivere/elastic/indices_rollover_test.go116
-rw-r--r--vendor/github.com/olivere/elastic/indices_segments.go237
-rw-r--r--vendor/github.com/olivere/elastic/indices_segments_test.go86
-rw-r--r--vendor/github.com/olivere/elastic/indices_shrink.go179
-rw-r--r--vendor/github.com/olivere/elastic/indices_shrink_test.go34
-rw-r--r--vendor/github.com/olivere/elastic/indices_stats.go384
-rw-r--r--vendor/github.com/olivere/elastic/indices_stats_test.go86
-rw-r--r--vendor/github.com/olivere/elastic/ingest_delete_pipeline.go129
-rw-r--r--vendor/github.com/olivere/elastic/ingest_delete_pipeline_test.go31
-rw-r--r--vendor/github.com/olivere/elastic/ingest_get_pipeline.go121
-rw-r--r--vendor/github.com/olivere/elastic/ingest_get_pipeline_test.go121
-rw-r--r--vendor/github.com/olivere/elastic/ingest_put_pipeline.go158
-rw-r--r--vendor/github.com/olivere/elastic/ingest_put_pipeline_test.go31
-rw-r--r--vendor/github.com/olivere/elastic/ingest_simulate_pipeline.go161
-rw-r--r--vendor/github.com/olivere/elastic/ingest_simulate_pipeline_test.go35
-rw-r--r--vendor/github.com/olivere/elastic/inner_hit.go160
-rw-r--r--vendor/github.com/olivere/elastic/inner_hit_test.go44
-rw-r--r--vendor/github.com/olivere/elastic/logger.go10
-rw-r--r--vendor/github.com/olivere/elastic/mget.go257
-rw-r--r--vendor/github.com/olivere/elastic/mget_test.go96
-rw-r--r--vendor/github.com/olivere/elastic/msearch.go101
-rw-r--r--vendor/github.com/olivere/elastic/msearch_test.go198
-rw-r--r--vendor/github.com/olivere/elastic/mtermvectors.go475
-rw-r--r--vendor/github.com/olivere/elastic/mtermvectors_test.go134
-rw-r--r--vendor/github.com/olivere/elastic/nodes_info.go313
-rw-r--r--vendor/github.com/olivere/elastic/nodes_info_test.go43
-rw-r--r--vendor/github.com/olivere/elastic/nodes_stats.go703
-rw-r--r--vendor/github.com/olivere/elastic/nodes_stats_test.go138
-rw-r--r--vendor/github.com/olivere/elastic/percolate_test.go68
-rw-r--r--vendor/github.com/olivere/elastic/ping.go127
-rw-r--r--vendor/github.com/olivere/elastic/ping_test.go65
-rw-r--r--vendor/github.com/olivere/elastic/plugins.go40
-rw-r--r--vendor/github.com/olivere/elastic/plugins_test.go32
-rw-r--r--vendor/github.com/olivere/elastic/query.go13
-rw-r--r--vendor/github.com/olivere/elastic/recipes/bulk_insert/bulk_insert.go173
-rw-r--r--vendor/github.com/olivere/elastic/recipes/connect/connect.go43
-rw-r--r--vendor/github.com/olivere/elastic/recipes/sliced_scroll/sliced_scroll.go161
-rw-r--r--vendor/github.com/olivere/elastic/reindex.go685
-rw-r--r--vendor/github.com/olivere/elastic/reindex_test.go401
-rw-r--r--vendor/github.com/olivere/elastic/request.go79
-rw-r--r--vendor/github.com/olivere/elastic/request_test.go72
-rw-r--r--vendor/github.com/olivere/elastic/rescore.go44
-rw-r--r--vendor/github.com/olivere/elastic/rescorer.go64
-rw-r--r--vendor/github.com/olivere/elastic/response.go41
-rw-r--r--vendor/github.com/olivere/elastic/response_test.go48
-rw-r--r--vendor/github.com/olivere/elastic/retrier.go61
-rw-r--r--vendor/github.com/olivere/elastic/retrier_test.go174
-rw-r--r--vendor/github.com/olivere/elastic/retry.go56
-rw-r--r--vendor/github.com/olivere/elastic/retry_test.go44
-rwxr-xr-xvendor/github.com/olivere/elastic/run-es.sh3
-rw-r--r--vendor/github.com/olivere/elastic/script.go127
-rw-r--r--vendor/github.com/olivere/elastic/script_test.go61
-rw-r--r--vendor/github.com/olivere/elastic/scroll.go470
-rw-r--r--vendor/github.com/olivere/elastic/scroll_test.go387
-rw-r--r--vendor/github.com/olivere/elastic/search.go581
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs.go1450
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_children.go76
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_children_test.go46
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_count_thresholds.go13
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_date_histogram.go285
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_date_histogram_test.go49
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_date_range.go246
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_date_range_test.go155
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_filter.go77
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_filter_test.go66
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_filters.go138
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_filters_test.go99
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_geo_distance.go198
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_geo_distance_test.go93
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_geohash_grid.go102
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_geohash_grid_test.go84
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_global.go71
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_global_test.go44
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_histogram.go265
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_histogram_test.go61
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_ip_range.go195
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_ip_range_test.go90
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_missing.go81
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_missing_test.go44
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_nested.go82
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_nested_test.go62
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_range.go244
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_range_test.go178
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_reverse_nested.go86
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_reverse_nested_test.go83
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_sampler.go111
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_sampler_test.go30
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_significant_terms.go389
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_significant_terms_test.go211
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_significant_text.go245
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_significant_text_test.go66
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_terms.go368
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_bucket_terms_test.go155
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_matrix_stats.go120
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_matrix_stats_test.go53
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_avg.go101
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_avg_test.go61
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_cardinality.go120
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_cardinality_test.go78
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_extended_stats.go99
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_extended_stats_test.go44
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_geo_bounds.go105
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_geo_bounds_test.go61
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_max.go99
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_max_test.go61
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_min.go100
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_min_test.go61
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks.go131
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks_test.go78
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_percentiles.go130
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_percentiles_test.go78
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_stats.go99
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_stats_test.go61
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_sum.go99
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_sum_test.go61
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_top_hits.go143
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_top_hits_test.go31
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_value_count.go102
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_metrics_value_count_test.go63
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket.go113
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_script.go132
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_script_test.go30
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector.go134
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector_test.go29
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum.go90
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_derivative.go124
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_derivative_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_max_bucket.go114
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_max_bucket_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_min_bucket.go114
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_min_bucket_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_mov_avg.go393
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_mov_avg_test.go132
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_percentiles_bucket.go125
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_percentiles_bucket_test.go44
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_serial_diff.go124
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_serial_diff_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_stats_bucket.go113
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_stats_bucket_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket.go113
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_pipeline_test.go903
-rw-r--r--vendor/github.com/olivere/elastic/search_aggs_test.go3233
-rw-r--r--vendor/github.com/olivere/elastic/search_collapse_builder.go68
-rw-r--r--vendor/github.com/olivere/elastic/search_collapse_builder_test.go29
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_bool.go203
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_bool_test.go33
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_boosting.go97
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_boosting_test.go30
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_common_terms.go137
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_common_terms_test.go85
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_constant_score.go59
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_constant_score_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_dis_max.go104
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_dis_max_test.go28
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_exists.go49
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_exists_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_fsq.go171
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_fsq_score_funcs.go567
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_fsq_test.go166
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_fuzzy.go120
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_fuzzy_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_geo_bounding_box.go121
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_geo_bounding_box_test.go63
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_geo_distance.go107
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_geo_distance_test.go69
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_geo_polygon.go72
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_geo_polygon_test.go58
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_has_child.go131
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_has_child_test.go (renamed from vendor/gopkg.in/olivere/elastic.v5/search_queries_indices_test.go)15
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_has_parent.go97
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_has_parent_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_ids.go76
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_ids_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_match.go189
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_match_all.go51
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_match_all_test.go61
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_match_none.go39
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_match_none_test.go44
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_match_phrase.go79
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_match_phrase_prefix.go89
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_match_phrase_prefix_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_match_phrase_test.go29
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_match_test.go44
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_more_like_this.go412
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_more_like_this_test.go92
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_multi_match.go275
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_multi_match_test.go131
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_nested.go96
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_nested_test.go86
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_parent_id.go99
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_parent_id_test.go52
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_percolator.go115
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_percolator_test.go65
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_prefix.go67
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_prefix_example_test.go35
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_prefix_test.go45
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_query_string.go350
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_query_string_test.go46
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_range.go144
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_range_test.go68
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_raw_string.go26
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_raw_string_test.go44
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_regexp.go82
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_regexp_test.go47
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_script.go51
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_script_test.go45
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_simple_query_string.go185
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_simple_query_string_test.go87
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_slice.go53
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_slice_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_term.go58
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_term_test.go46
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_terms.go75
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_terms_test.go82
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_type.go26
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_type_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_wildcard.go81
-rw-r--r--vendor/github.com/olivere/elastic/search_queries_wildcard_test.go68
-rw-r--r--vendor/github.com/olivere/elastic/search_request.go205
-rw-r--r--vendor/github.com/olivere/elastic/search_request_test.go61
-rw-r--r--vendor/github.com/olivere/elastic/search_source.go546
-rw-r--r--vendor/github.com/olivere/elastic/search_source_test.go295
-rw-r--r--vendor/github.com/olivere/elastic/search_suggester_test.go355
-rw-r--r--vendor/github.com/olivere/elastic/search_terms_lookup.go74
-rw-r--r--vendor/github.com/olivere/elastic/search_terms_lookup_test.go27
-rw-r--r--vendor/github.com/olivere/elastic/search_test.go1265
-rw-r--r--vendor/github.com/olivere/elastic/setup_test.go445
-rw-r--r--vendor/github.com/olivere/elastic/snapshot_create.go191
-rw-r--r--vendor/github.com/olivere/elastic/snapshot_create_repository.go205
-rw-r--r--vendor/github.com/olivere/elastic/snapshot_create_repository_test.go61
-rw-r--r--vendor/github.com/olivere/elastic/snapshot_create_test.go63
-rw-r--r--vendor/github.com/olivere/elastic/snapshot_delete_repository.go132
-rw-r--r--vendor/github.com/olivere/elastic/snapshot_delete_repository_test.go35
-rw-r--r--vendor/github.com/olivere/elastic/snapshot_get_repository.go134
-rw-r--r--vendor/github.com/olivere/elastic/snapshot_get_repository_test.go39
-rw-r--r--vendor/github.com/olivere/elastic/snapshot_verify_repository.go132
-rw-r--r--vendor/github.com/olivere/elastic/snapshot_verify_repository_test.go31
-rw-r--r--vendor/github.com/olivere/elastic/sort.go614
-rw-r--r--vendor/github.com/olivere/elastic/sort_test.go278
-rw-r--r--vendor/github.com/olivere/elastic/suggest_field.go90
-rw-r--r--vendor/github.com/olivere/elastic/suggest_field_test.go29
-rw-r--r--vendor/github.com/olivere/elastic/suggester.go15
-rw-r--r--vendor/github.com/olivere/elastic/suggester_completion.go352
-rw-r--r--vendor/github.com/olivere/elastic/suggester_completion_test.go110
-rw-r--r--vendor/github.com/olivere/elastic/suggester_context.go124
-rw-r--r--vendor/github.com/olivere/elastic/suggester_context_category.go119
-rw-r--r--vendor/github.com/olivere/elastic/suggester_context_category_test.go163
-rw-r--r--vendor/github.com/olivere/elastic/suggester_context_geo.go130
-rw-r--r--vendor/github.com/olivere/elastic/suggester_context_geo_test.go48
-rw-r--r--vendor/github.com/olivere/elastic/suggester_context_test.go55
-rw-r--r--vendor/github.com/olivere/elastic/suggester_phrase.go546
-rw-r--r--vendor/github.com/olivere/elastic/suggester_phrase_test.go169
-rw-r--r--vendor/github.com/olivere/elastic/suggester_term.go233
-rw-r--r--vendor/github.com/olivere/elastic/suggester_term_test.go (renamed from vendor/gopkg.in/olivere/elastic.v5/suggester_completion_fuzzy_test.go)17
-rw-r--r--vendor/github.com/olivere/elastic/tasks_cancel.go149
-rw-r--r--vendor/github.com/olivere/elastic/tasks_cancel_test.go51
-rw-r--r--vendor/github.com/olivere/elastic/tasks_get_task.go108
-rw-r--r--vendor/github.com/olivere/elastic/tasks_get_task_test.go43
-rw-r--r--vendor/github.com/olivere/elastic/tasks_list.go231
-rw-r--r--vendor/github.com/olivere/elastic/tasks_list_test.go65
-rw-r--r--vendor/github.com/olivere/elastic/termvectors.go464
-rw-r--r--vendor/github.com/olivere/elastic/termvectors_test.go157
-rw-r--r--vendor/github.com/olivere/elastic/update.go327
-rw-r--r--vendor/github.com/olivere/elastic/update_by_query.go655
-rw-r--r--vendor/github.com/olivere/elastic/update_by_query_test.go147
-rw-r--r--vendor/github.com/olivere/elastic/update_integration_test.go58
-rw-r--r--vendor/github.com/olivere/elastic/update_test.go262
-rw-r--r--vendor/github.com/olivere/elastic/uritemplates/LICENSE18
-rw-r--r--vendor/github.com/olivere/elastic/uritemplates/uritemplates.go359
-rw-r--r--vendor/github.com/olivere/elastic/uritemplates/utils.go13
-rw-r--r--vendor/github.com/olivere/elastic/uritemplates/utils_test.go105
-rw-r--r--vendor/github.com/pelletier/go-toml/.travis.yml4
-rw-r--r--vendor/github.com/pelletier/go-toml/keysparsing.go104
-rw-r--r--vendor/github.com/pelletier/go-toml/keysparsing_test.go13
-rw-r--r--vendor/github.com/pelletier/go-toml/lexer.go42
-rw-r--r--vendor/github.com/pelletier/go-toml/lexer_test.go2
-rw-r--r--vendor/github.com/pelletier/go-toml/marshal.go127
-rw-r--r--vendor/github.com/pelletier/go-toml/parser.go19
-rw-r--r--vendor/github.com/pelletier/go-toml/parser_test.go63
-rwxr-xr-xvendor/github.com/pelletier/go-toml/test.sh1
-rw-r--r--vendor/github.com/pelletier/go-toml/token.go4
-rw-r--r--vendor/github.com/pelletier/go-toml/toml.go27
-rw-r--r--vendor/github.com/pelletier/go-toml/tomltree_write.go4
-rw-r--r--vendor/github.com/pelletier/go-toml/tomltree_write_test.go18
-rw-r--r--vendor/github.com/pkg/errors/.travis.yml10
-rw-r--r--vendor/github.com/pkg/errors/README.md4
-rw-r--r--vendor/github.com/pkg/errors/bench_test.go8
-rw-r--r--vendor/github.com/pkg/errors/errors_test.go1
-rw-r--r--vendor/github.com/pkg/errors/format_test.go2
-rw-r--r--vendor/github.com/pkg/errors/stack.go11
-rw-r--r--vendor/github.com/pmezard/go-difflib/difflib/difflib.go54
-rw-r--r--vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go82
-rw-r--r--vendor/github.com/prometheus/client_golang/.gitignore4
-rw-r--r--vendor/github.com/prometheus/client_golang/.travis.yml7
-rw-r--r--vendor/github.com/prometheus/client_golang/AUTHORS.md18
-rw-r--r--vendor/github.com/prometheus/client_golang/CONTRIBUTING.md6
-rw-r--r--vendor/github.com/prometheus/client_golang/MAINTAINERS.md1
-rw-r--r--vendor/github.com/prometheus/client_golang/README.md6
-rw-r--r--vendor/github.com/prometheus/client_golang/api/client.go131
-rw-r--r--vendor/github.com/prometheus/client_golang/api/client_test.go115
-rw-r--r--vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go (renamed from vendor/github.com/prometheus/client_golang/api/prometheus/api.go)346
-rw-r--r--vendor/github.com/prometheus/client_golang/api/prometheus/v1/api_test.go (renamed from vendor/github.com/prometheus/client_golang/api/prometheus/api_test.go)433
-rw-r--r--vendor/github.com/prometheus/client_golang/examples/random/Dockerfile20
-rw-r--r--vendor/github.com/prometheus/client_golang/examples/random/main.go21
-rw-r--r--vendor/github.com/prometheus/client_golang/examples/simple/Dockerfile20
-rw-r--r--vendor/github.com/prometheus/client_golang/examples/simple/main.go7
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go24
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/counter.go191
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/counter_test.go160
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/desc.go39
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/desc_test.go17
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/doc.go69
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/example_timer_complex_test.go71
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/example_timer_gauge_test.go48
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/example_timer_test.go40
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/examples_test.go17
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go2
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/gauge.go204
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go24
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/go_collector.go51
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go106
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/graphite/bridge.go280
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/graphite/bridge_test.go309
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/histogram.go147
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go24
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/http.go110
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/http_test.go45
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/labels.go57
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/metric.go20
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/observer.go52
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/process_collector.go104
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go24
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go199
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go181
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go44
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go31
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go6
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go97
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go144
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8_test.go195
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go447
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go375
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/push/example_add_from_gatherer_test.go84
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/push/examples_test.go24
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/push/push.go2
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/registry.go68
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/registry_test.go67
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/summary.go173
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/summary_test.go53
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/timer.go51
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/timer_test.go152
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/untyped.go102
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/value.go78
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/value_test.go43
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/vec.go491
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/vec_test.go281
-rw-r--r--vendor/github.com/prometheus/client_model/python/prometheus/client/__init__.py12
-rw-r--r--vendor/github.com/prometheus/client_model/python/prometheus/client/model/__init__.py14
-rw-r--r--vendor/github.com/prometheus/client_model/python/prometheus/client/model/metrics_pb2.py575
-rw-r--r--vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model.rb2
-rw-r--r--vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/metrics.pb.rb111
-rw-r--r--vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/version.rb7
-rw-r--r--vendor/github.com/prometheus/client_model/src/main/java/io/prometheus/client/Metrics.java7683
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/net/rpc/nfs5
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/net/rpc/nfsd11
-rw-r--r--vendor/github.com/prometheus/procfs/fs.go23
-rw-r--r--vendor/github.com/prometheus/procfs/internal/util/parse.go46
-rw-r--r--vendor/github.com/prometheus/procfs/nfs/nfs.go263
-rw-r--r--vendor/github.com/prometheus/procfs/nfs/parse.go308
-rw-r--r--vendor/github.com/prometheus/procfs/nfs/parse_nfs.go67
-rw-r--r--vendor/github.com/prometheus/procfs/nfs/parse_nfs_test.go180
-rw-r--r--vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go89
-rw-r--r--vendor/github.com/prometheus/procfs/nfs/parse_nfsd_test.go196
-rw-r--r--vendor/github.com/prometheus/procfs/xfs/parse.go37
-rw-r--r--vendor/github.com/spf13/afero/util.go3
-rw-r--r--vendor/github.com/spf13/cobra/bash_completions.md4
-rw-r--r--vendor/github.com/spf13/cobra/cobra/cmd/helpers.go3
-rw-r--r--vendor/github.com/spf13/cobra/cobra/cmd/project.go39
-rw-r--r--vendor/github.com/spf13/pflag/count.go12
-rw-r--r--vendor/github.com/spf13/pflag/count_test.go6
-rw-r--r--vendor/github.com/spf13/pflag/flag.go57
-rw-r--r--vendor/github.com/spf13/pflag/flag_test.go83
-rw-r--r--vendor/github.com/spf13/pflag/int16.go88
-rw-r--r--vendor/github.com/stretchr/objx/.codeclimate.yml13
-rw-r--r--vendor/github.com/stretchr/objx/.github/CODE_OF_CONDUCT.md46
-rw-r--r--vendor/github.com/stretchr/objx/.gitignore27
-rw-r--r--vendor/github.com/stretchr/objx/.travis.yml25
-rw-r--r--vendor/github.com/stretchr/objx/Gopkg.lock30
-rw-r--r--vendor/github.com/stretchr/objx/Gopkg.toml8
-rw-r--r--vendor/github.com/stretchr/objx/LICENSE (renamed from vendor/github.com/stretchr/objx/LICENSE.md)5
-rw-r--r--vendor/github.com/stretchr/objx/README.md81
-rw-r--r--vendor/github.com/stretchr/objx/Taskfile.yml32
-rw-r--r--vendor/github.com/stretchr/objx/accessors.go43
-rw-r--r--vendor/github.com/stretchr/objx/accessors_test.go271
-rw-r--r--vendor/github.com/stretchr/objx/codegen/template.txt166
-rw-r--r--vendor/github.com/stretchr/objx/codegen/template_test.txt121
-rw-r--r--vendor/github.com/stretchr/objx/conversions.go19
-rw-r--r--vendor/github.com/stretchr/objx/conversions_test.go64
-rw-r--r--vendor/github.com/stretchr/objx/doc.go136
-rw-r--r--vendor/github.com/stretchr/objx/fixture_test.go14
-rw-r--r--vendor/github.com/stretchr/objx/map.go54
-rw-r--r--vendor/github.com/stretchr/objx/map_test.go169
-rw-r--r--vendor/github.com/stretchr/objx/mutations.go42
-rw-r--r--vendor/github.com/stretchr/objx/mutations_test.go133
-rw-r--r--vendor/github.com/stretchr/objx/security.go8
-rw-r--r--vendor/github.com/stretchr/objx/security_test.go10
-rw-r--r--vendor/github.com/stretchr/objx/simple_example_test.go13
-rw-r--r--vendor/github.com/stretchr/objx/tests_test.go11
-rw-r--r--vendor/github.com/stretchr/objx/type_specific_codegen.go460
-rw-r--r--vendor/github.com/stretchr/objx/type_specific_codegen_test.go863
-rw-r--r--vendor/github.com/stretchr/objx/value.go40
-rw-r--r--vendor/github.com/stretchr/objx/value_test.go75
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/LICENSE15
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/bypass.go152
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go38
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/common.go341
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/config.go306
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/doc.go211
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/dump.go509
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/format.go419
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/spew.go148
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/pmezard/go-difflib/LICENSE27
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/pmezard/go-difflib/difflib/difflib.go772
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/LICENSE (renamed from vendor/github.com/stretchr/testify/LICENCE.txt)0
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/assertion_format.go405
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/assertion_forward.go798
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/assertions.go1312
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/doc.go45
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/errors.go10
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/forward_assertions.go16
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/http_assertions.go127
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/require/doc.go28
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/require/forward_requirements.go16
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/require/require.go979
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/require/require_forward.go799
-rw-r--r--vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/require/requirements.go9
-rwxr-xr-xvendor/github.com/stretchr/testify/.travis.gofmt.sh7
-rwxr-xr-xvendor/github.com/stretchr/testify/.travis.gogenerate.sh13
-rwxr-xr-xvendor/github.com/stretchr/testify/.travis.govet.sh10
-rw-r--r--vendor/github.com/stretchr/testify/.travis.yml13
-rw-r--r--vendor/github.com/stretchr/testify/Godeps/Godeps.json23
-rw-r--r--vendor/github.com/stretchr/testify/Godeps/Readme5
-rw-r--r--vendor/github.com/stretchr/testify/Gopkg.lock25
-rw-r--r--vendor/github.com/stretchr/testify/Gopkg.toml26
-rw-r--r--vendor/github.com/stretchr/testify/_codegen/main.go37
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_format.go405
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl4
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_forward.go623
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertions.go556
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertions_test.go379
-rw-r--r--vendor/github.com/stretchr/testify/assert/forward_assertions.go2
-rw-r--r--vendor/github.com/stretchr/testify/assert/http_assertions.go59
-rw-r--r--vendor/github.com/stretchr/testify/assert/http_assertions_test.go53
-rw-r--r--vendor/github.com/stretchr/testify/mock/mock.go141
-rw-r--r--vendor/github.com/stretchr/testify/mock/mock_test.go236
-rw-r--r--vendor/github.com/stretchr/testify/require/forward_requirements.go2
-rw-r--r--vendor/github.com/stretchr/testify/require/require.go943
-rw-r--r--vendor/github.com/stretchr/testify/require/require.go.tmpl6
-rw-r--r--vendor/github.com/stretchr/testify/require/require_forward.go623
-rw-r--r--vendor/github.com/stretchr/testify/require/requirements.go2
-rw-r--r--vendor/github.com/stretchr/testify/suite/interfaces.go12
-rw-r--r--vendor/github.com/stretchr/testify/suite/suite.go27
-rw-r--r--vendor/github.com/stretchr/testify/suite/suite_test.go69
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/.gitignore (renamed from vendor/github.com/cpanato/html2text/.gitignore)2
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/.travis.yml14
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/LICENSE2
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/README.md205
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/cov_report.sh22
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypass.go2
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go2
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/common.go2
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/common_test.go298
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/config.go11
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/doc.go11
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dump.go8
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dump_test.go1042
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go99
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go26
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/example_test.go226
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/format.go2
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/format_test.go1558
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/internal_test.go87
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go102
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/spew.go2
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/spew_test.go320
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go82
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/test_coverage.txt61
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/.travis.yml5
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/README.md50
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go352
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/accessors_test.go145
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/conversions_test.go94
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/fixture_test.go98
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map_for_test.go (renamed from vendor/github.com/stretchr/objx/map_for_test.go)0
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map_test.go147
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/mutations_test.go77
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/security_test.go12
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/simple_example_test.go41
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/tests_test.go24
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/type_specific_codegen_test.go2867
-rw-r--r--vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/value_test.go1
-rw-r--r--vendor/github.com/tylerb/graceful/README.md5
-rw-r--r--vendor/github.com/xenolf/lego/README.md2
-rw-r--r--vendor/github.com/xenolf/lego/acme/http.go1
-rw-r--r--vendor/github.com/xenolf/lego/cli.go2
-rw-r--r--vendor/github.com/xenolf/lego/providers/dns/exoscale/exoscale.go26
-rw-r--r--vendor/golang.org/x/crypto/acme/autocert/autocert.go316
-rw-r--r--vendor/golang.org/x/crypto/acme/autocert/autocert_test.go151
-rw-r--r--vendor/golang.org/x/crypto/acme/autocert/example_test.go3
-rw-r--r--vendor/golang.org/x/crypto/argon2/blamka_amd64.go2
-rw-r--r--vendor/golang.org/x/crypto/ssh/cipher.go240
-rw-r--r--vendor/golang.org/x/crypto/ssh/cipher_test.go11
-rw-r--r--vendor/golang.org/x/crypto/ssh/client_test.go92
-rw-r--r--vendor/golang.org/x/crypto/ssh/common.go16
-rw-r--r--vendor/golang.org/x/crypto/ssh/test/session_test.go61
-rw-r--r--vendor/golang.org/x/crypto/ssh/transport.go56
-rw-r--r--vendor/golang.org/x/crypto/ssh/transport_test.go14
-rw-r--r--vendor/golang.org/x/net/html/token.go4
-rw-r--r--vendor/golang.org/x/net/http2/ciphers.go2
-rw-r--r--vendor/golang.org/x/net/http2/configure_transport.go2
-rw-r--r--vendor/golang.org/x/net/http2/transport.go21
-rw-r--r--vendor/golang.org/x/net/internal/iana/gen.go6
-rw-r--r--vendor/golang.org/x/net/ipv4/gen.go2
-rw-r--r--vendor/golang.org/x/net/ipv6/gen.go2
-rw-r--r--vendor/golang.org/x/sys/plan9/asm_plan9_arm.s25
-rw-r--r--vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go284
-rw-r--r--vendor/golang.org/x/sys/unix/dirent.go89
-rw-r--r--vendor/golang.org/x/sys/unix/linux/types.go61
-rwxr-xr-xvendor/golang.org/x/sys/unix/mkerrors.sh2
-rw-r--r--vendor/golang.org/x/sys/unix/mkpost.go14
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_darwin.go13
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_dragonfly.go17
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_freebsd.go13
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux.go46
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_test.go102
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_netbsd.go13
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_openbsd.go13
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_solaris.go17
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_unix.go4
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_386.go21
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go21
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_arm.go21
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go21
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mips.go21
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go21
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go21
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go21
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go21
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go21
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go21
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_386.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go15
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_386.go100
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go136
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_arm.go142
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go136
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mips.go138
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go136
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go136
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go138
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go136
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go136
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go58
-rw-r--r--vendor/google.golang.org/appengine/.travis.yml24
-rw-r--r--vendor/google.golang.org/appengine/CONTRIBUTING.md90
-rw-r--r--vendor/google.golang.org/appengine/LICENSE (renamed from vendor/github.com/avct/uasurfer/LICENSE)20
-rw-r--r--vendor/google.golang.org/appengine/README.md73
-rw-r--r--vendor/google.golang.org/appengine/aetest/doc.go42
-rw-r--r--vendor/google.golang.org/appengine/aetest/instance.go55
-rw-r--r--vendor/google.golang.org/appengine/aetest/instance_classic.go21
-rw-r--r--vendor/google.golang.org/appengine/aetest/instance_test.go119
-rw-r--r--vendor/google.golang.org/appengine/aetest/instance_vm.go282
-rw-r--r--vendor/google.golang.org/appengine/aetest/user.go36
-rw-r--r--vendor/google.golang.org/appengine/appengine.go113
-rw-r--r--vendor/google.golang.org/appengine/appengine_test.go49
-rw-r--r--vendor/google.golang.org/appengine/appengine_vm.go20
-rw-r--r--vendor/google.golang.org/appengine/blobstore/blobstore.go276
-rw-r--r--vendor/google.golang.org/appengine/blobstore/blobstore_test.go183
-rw-r--r--vendor/google.golang.org/appengine/blobstore/read.go160
-rw-r--r--vendor/google.golang.org/appengine/capability/capability.go52
-rw-r--r--vendor/google.golang.org/appengine/channel/channel.go87
-rw-r--r--vendor/google.golang.org/appengine/channel/channel_test.go21
-rw-r--r--vendor/google.golang.org/appengine/cloudsql/cloudsql.go62
-rw-r--r--vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go17
-rw-r--r--vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go16
-rw-r--r--vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go342
-rw-r--r--vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go72
-rw-r--r--vendor/google.golang.org/appengine/cmd/aefix/ae.go185
-rw-r--r--vendor/google.golang.org/appengine/cmd/aefix/ae_test.go144
-rw-r--r--vendor/google.golang.org/appengine/cmd/aefix/fix.go848
-rw-r--r--vendor/google.golang.org/appengine/cmd/aefix/main.go258
-rw-r--r--vendor/google.golang.org/appengine/cmd/aefix/main_test.go129
-rw-r--r--vendor/google.golang.org/appengine/cmd/aefix/typecheck.go673
-rw-r--r--vendor/google.golang.org/appengine/datastore/datastore.go407
-rw-r--r--vendor/google.golang.org/appengine/datastore/datastore_test.go1744
-rw-r--r--vendor/google.golang.org/appengine/datastore/doc.go361
-rw-r--r--vendor/google.golang.org/appengine/datastore/key.go309
-rw-r--r--vendor/google.golang.org/appengine/datastore/key_test.go204
-rw-r--r--vendor/google.golang.org/appengine/datastore/load.go429
-rw-r--r--vendor/google.golang.org/appengine/datastore/load_test.go656
-rw-r--r--vendor/google.golang.org/appengine/datastore/metadata.go78
-rw-r--r--vendor/google.golang.org/appengine/datastore/prop.go330
-rw-r--r--vendor/google.golang.org/appengine/datastore/prop_test.go547
-rw-r--r--vendor/google.golang.org/appengine/datastore/query.go724
-rw-r--r--vendor/google.golang.org/appengine/datastore/query_test.go583
-rw-r--r--vendor/google.golang.org/appengine/datastore/save.go327
-rw-r--r--vendor/google.golang.org/appengine/datastore/time_test.go65
-rw-r--r--vendor/google.golang.org/appengine/datastore/transaction.go87
-rw-r--r--vendor/google.golang.org/appengine/delay/delay.go295
-rw-r--r--vendor/google.golang.org/appengine/delay/delay_go17.go23
-rw-r--r--vendor/google.golang.org/appengine/delay/delay_go17_test.go55
-rw-r--r--vendor/google.golang.org/appengine/delay/delay_pre17.go19
-rw-r--r--vendor/google.golang.org/appengine/delay/delay_test.go428
-rw-r--r--vendor/google.golang.org/appengine/demos/guestbook/app.yaml14
-rw-r--r--vendor/google.golang.org/appengine/demos/guestbook/favicon.icobin0 -> 1150 bytes
-rw-r--r--vendor/google.golang.org/appengine/demos/guestbook/guestbook.go109
-rw-r--r--vendor/google.golang.org/appengine/demos/guestbook/index.yaml7
-rw-r--r--vendor/google.golang.org/appengine/demos/guestbook/templates/guestbook.html26
-rw-r--r--vendor/google.golang.org/appengine/demos/helloworld/app.yaml10
-rw-r--r--vendor/google.golang.org/appengine/demos/helloworld/favicon.icobin0 -> 1150 bytes
-rw-r--r--vendor/google.golang.org/appengine/demos/helloworld/helloworld.go50
-rw-r--r--vendor/google.golang.org/appengine/errors.go46
-rw-r--r--vendor/google.golang.org/appengine/file/file.go28
-rw-r--r--vendor/google.golang.org/appengine/identity.go142
-rw-r--r--vendor/google.golang.org/appengine/image/image.go67
-rw-r--r--vendor/google.golang.org/appengine/internal/aetesting/fake.go81
-rw-r--r--vendor/google.golang.org/appengine/internal/api.go660
-rw-r--r--vendor/google.golang.org/appengine/internal/api_classic.go169
-rw-r--r--vendor/google.golang.org/appengine/internal/api_common.go123
-rw-r--r--vendor/google.golang.org/appengine/internal/api_pre17.go682
-rw-r--r--vendor/google.golang.org/appengine/internal/api_race_test.go9
-rw-r--r--vendor/google.golang.org/appengine/internal/api_test.go466
-rw-r--r--vendor/google.golang.org/appengine/internal/app_id.go28
-rw-r--r--vendor/google.golang.org/appengine/internal/app_id_test.go34
-rw-r--r--vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go296
-rw-r--r--vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto64
-rw-r--r--vendor/google.golang.org/appengine/internal/base/api_base.pb.go133
-rw-r--r--vendor/google.golang.org/appengine/internal/base/api_base.proto33
-rw-r--r--vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go347
-rw-r--r--vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto71
-rw-r--r--vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go125
-rw-r--r--vendor/google.golang.org/appengine/internal/capability/capability_service.proto28
-rw-r--r--vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go154
-rw-r--r--vendor/google.golang.org/appengine/internal/channel/channel_service.proto30
-rw-r--r--vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go2778
-rwxr-xr-xvendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto541
-rw-r--r--vendor/google.golang.org/appengine/internal/identity.go14
-rw-r--r--vendor/google.golang.org/appengine/internal/identity_classic.go57
-rw-r--r--vendor/google.golang.org/appengine/internal/identity_vm.go101
-rw-r--r--vendor/google.golang.org/appengine/internal/image/images_service.pb.go845
-rw-r--r--vendor/google.golang.org/appengine/internal/image/images_service.proto162
-rw-r--r--vendor/google.golang.org/appengine/internal/internal.go110
-rw-r--r--vendor/google.golang.org/appengine/internal/internal_vm_test.go60
-rw-r--r--vendor/google.golang.org/appengine/internal/log/log_service.pb.go899
-rw-r--r--vendor/google.golang.org/appengine/internal/log/log_service.proto150
-rw-r--r--vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go229
-rw-r--r--vendor/google.golang.org/appengine/internal/mail/mail_service.proto45
-rw-r--r--vendor/google.golang.org/appengine/internal/main.go15
-rw-r--r--vendor/google.golang.org/appengine/internal/main_vm.go48
-rw-r--r--vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go938
-rw-r--r--vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto165
-rw-r--r--vendor/google.golang.org/appengine/internal/metadata.go61
-rw-r--r--vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go375
-rw-r--r--vendor/google.golang.org/appengine/internal/modules/modules_service.proto80
-rw-r--r--vendor/google.golang.org/appengine/internal/net.go56
-rw-r--r--vendor/google.golang.org/appengine/internal/net_test.go58
-rwxr-xr-xvendor/google.golang.org/appengine/internal/regen.sh40
-rw-r--r--vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go231
-rw-r--r--vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto44
-rw-r--r--vendor/google.golang.org/appengine/internal/search/search.pb.go2488
-rw-r--r--vendor/google.golang.org/appengine/internal/search/search.proto394
-rw-r--r--vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go1858
-rw-r--r--vendor/google.golang.org/appengine/internal/socket/socket_service.proto460
-rw-r--r--vendor/google.golang.org/appengine/internal/system/system_service.pb.go198
-rw-r--r--vendor/google.golang.org/appengine/internal/system/system_service.proto49
-rw-r--r--vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go1888
-rw-r--r--vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto342
-rw-r--r--vendor/google.golang.org/appengine/internal/transaction.go107
-rw-r--r--vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go355
-rw-r--r--vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto64
-rw-r--r--vendor/google.golang.org/appengine/internal/user/user_service.pb.go289
-rw-r--r--vendor/google.golang.org/appengine/internal/user/user_service.proto58
-rw-r--r--vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go427
-rw-r--r--vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto83
-rw-r--r--vendor/google.golang.org/appengine/log/api.go40
-rw-r--r--vendor/google.golang.org/appengine/log/log.go323
-rw-r--r--vendor/google.golang.org/appengine/log/log_test.go112
-rw-r--r--vendor/google.golang.org/appengine/mail/mail.go123
-rw-r--r--vendor/google.golang.org/appengine/mail/mail_test.go65
-rw-r--r--vendor/google.golang.org/appengine/memcache/memcache.go526
-rw-r--r--vendor/google.golang.org/appengine/memcache/memcache_test.go263
-rw-r--r--vendor/google.golang.org/appengine/module/module.go113
-rw-r--r--vendor/google.golang.org/appengine/module/module_test.go124
-rw-r--r--vendor/google.golang.org/appengine/namespace.go25
-rw-r--r--vendor/google.golang.org/appengine/namespace_test.go39
-rw-r--r--vendor/google.golang.org/appengine/remote_api/client.go194
-rw-r--r--vendor/google.golang.org/appengine/remote_api/client_test.go43
-rw-r--r--vendor/google.golang.org/appengine/remote_api/remote_api.go152
-rw-r--r--vendor/google.golang.org/appengine/runtime/runtime.go148
-rw-r--r--vendor/google.golang.org/appengine/runtime/runtime_test.go101
-rw-r--r--vendor/google.golang.org/appengine/search/doc.go209
-rw-r--r--vendor/google.golang.org/appengine/search/field.go82
-rw-r--r--vendor/google.golang.org/appengine/search/search.go1189
-rw-r--r--vendor/google.golang.org/appengine/search/search_test.go1270
-rw-r--r--vendor/google.golang.org/appengine/search/struct.go251
-rw-r--r--vendor/google.golang.org/appengine/search/struct_test.go213
-rw-r--r--vendor/google.golang.org/appengine/socket/doc.go10
-rw-r--r--vendor/google.golang.org/appengine/socket/socket_classic.go290
-rw-r--r--vendor/google.golang.org/appengine/socket/socket_vm.go64
-rw-r--r--vendor/google.golang.org/appengine/taskqueue/taskqueue.go541
-rw-r--r--vendor/google.golang.org/appengine/taskqueue/taskqueue_test.go173
-rw-r--r--vendor/google.golang.org/appengine/timeout.go20
-rw-r--r--vendor/google.golang.org/appengine/urlfetch/urlfetch.go210
-rw-r--r--vendor/google.golang.org/appengine/user/oauth.go52
-rw-r--r--vendor/google.golang.org/appengine/user/user.go84
-rw-r--r--vendor/google.golang.org/appengine/user/user_classic.go44
-rw-r--r--vendor/google.golang.org/appengine/user/user_test.go99
-rw-r--r--vendor/google.golang.org/appengine/user/user_vm.go38
-rw-r--r--vendor/google.golang.org/appengine/xmpp/xmpp.go253
-rw-r--r--vendor/google.golang.org/appengine/xmpp/xmpp_test.go173
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/.gitignore1
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/.travis.yml3
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-6.0.md18
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS4
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/ISSUE_TEMPLATE.md1
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/README.md59
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/acknowledged_response.go4
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/bulk.go33
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request.go81
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request_easyjson.go230
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request_test.go25
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/bulk_index_request.go102
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/bulk_index_request_easyjson.go262
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/bulk_index_request_test.go41
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/bulk_processor.go104
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/bulk_processor_test.go11
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/bulk_test.go188
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/bulk_update_request.go172
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/bulk_update_request_easyjson.go461
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/bulk_update_request_test.go60
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/clear_scroll.go11
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/clear_scroll_test.go6
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/client.go123
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/client_test.go64
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/cluster-test/cluster-test.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/cluster_health.go10
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/cluster_state.go20
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/cluster_stats.go18
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/count.go11
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/count_test.go10
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/decoder_test.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/delete.go30
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/delete_by_query.go11
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/delete_by_query_test.go8
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/delete_template.go109
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/delete_test.go25
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/docker-compose.yml23
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/example_test.go69
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/exists.go13
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/exists_test.go6
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/explain.go13
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/explain_test.go4
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/fetch_source_context.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/field_caps.go202
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/field_caps_test.go146
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/field_stats.go259
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/field_stats_test.go282
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/get.go12
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/get_template.go113
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/get_template_test.go52
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/get_test.go22
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/highlight.go20
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/highlight_test.go14
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/index.go31
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/index_test.go30
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_analyze.go15
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_close.go14
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_create.go18
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_delete.go12
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_delete_template.go16
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_exists.go13
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_exists_template.go11
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_exists_template_test.go4
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_exists_type.go13
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_exists_type_test.go8
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_flush.go12
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_forcemerge.go10
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_get.go12
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases.go12
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases_test.go15
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_get_field_mapping.go12
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_get_mapping.go12
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_get_settings.go12
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_get_template.go12
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_open.go16
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_put_alias.go13
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_put_alias_test.go6
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_put_mapping.go17
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_put_mapping_test.go30
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_put_settings.go17
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_put_template.go17
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_refresh.go10
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_refresh_test.go6
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_rollover.go15
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_rollover_test.go4
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_segments.go237
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_segments_test.go86
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_shrink.go18
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/indices_stats.go39
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/ingest_delete_pipeline.go16
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/ingest_get_pipeline.go12
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/ingest_put_pipeline.go17
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/ingest_simulate_pipeline.go13
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/mget.go13
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/mget_test.go10
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/msearch.go7
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/msearch_test.go14
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/mtermvectors.go13
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/mtermvectors_test.go18
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/nodes_info.go12
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/nodes_stats.go45
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/percolate_test.go27
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/ping.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/put_template.go145
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/put_template_test.go53
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/recipes/bulk_insert/bulk_insert.go4
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/recipes/connect/connect.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/recipes/sliced_scroll/sliced_scroll.go4
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/reindex.go22
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/reindex_test.go10
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/request.go53
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/request_test.go55
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/retrier_test.go55
-rwxr-xr-xvendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0.sh1
-rwxr-xr-xvendor/gopkg.in/olivere/elastic.v5/run-es-5.0.1.sh1
-rwxr-xr-xvendor/gopkg.in/olivere/elastic.v5/run-es-5.1.1.sh1
-rwxr-xr-xvendor/gopkg.in/olivere/elastic.v5/run-es-5.1.2.sh1
-rwxr-xr-xvendor/gopkg.in/olivere/elastic.v5/run-es-5.2.0.sh1
-rwxr-xr-xvendor/gopkg.in/olivere/elastic.v5/run-es-5.2.1.sh1
-rwxr-xr-xvendor/gopkg.in/olivere/elastic.v5/run-es-5.2.2.sh1
-rwxr-xr-xvendor/gopkg.in/olivere/elastic.v5/run-es-5.3.0.sh1
-rwxr-xr-xvendor/gopkg.in/olivere/elastic.v5/run-es-5.4.0.sh1
-rwxr-xr-xvendor/gopkg.in/olivere/elastic.v5/run-es-5.4.1.sh1
-rwxr-xr-xvendor/gopkg.in/olivere/elastic.v5/run-es-5.5.0.sh1
-rwxr-xr-xvendor/gopkg.in/olivere/elastic.v5/run-es-5.5.1.sh1
-rwxr-xr-xvendor/gopkg.in/olivere/elastic.v5/run-es-5.6.3.sh2
-rwxr-xr-xvendor/gopkg.in/olivere/elastic.v5/run-es.sh3
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/script.go26
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/script_test.go25
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/scroll.go44
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/scroll_test.go18
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search.go39
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs.go90
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_children.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_count_thresholds.go13
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_histogram.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filter.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filters.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geo_distance.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_global.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_histogram.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_ip_range.go195
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_ip_range_test.go90
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_missing.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_nested.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_range.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_reverse_nested.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_sampler.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_terms.go18
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_terms_test.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_text.go245
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_text_test.go66
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_matrix_stats.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_avg.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_cardinality.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_extended_stats.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_geo_bounds.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_max.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_min.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentile_ranks.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentiles.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_stats.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_sum.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_top_hits.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_value_count.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_avg_bucket.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_script.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_script_test.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_selector.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_selector_test.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_cumulative_sum.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_derivative.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_max_bucket.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_min_bucket.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_mov_avg.go14
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_percentiles_bucket.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_serial_diff.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_stats_bucket.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_sum_bucket.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_test.go143
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_aggs_test.go12
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_collapse_builder.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_bool.go13
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_bool_test.go3
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_boosting.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_common_terms.go11
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_common_terms_test.go6
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_constant_score.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_dis_max.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_exists.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq_score_funcs.go28
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq_test.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_fuzzy.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_bounding_box.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_distance.go11
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_distance_test.go3
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_polygon.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_has_child.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_has_parent.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_ids.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_indices.go89
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_match.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_match_all.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_match_none.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_match_phrase.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_match_phrase_prefix.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_more_like_this.go24
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_more_like_this_test.go6
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_multi_match.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_nested.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_parent_id.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_percolator.go12
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_percolator_test.go7
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_prefix.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_prefix_example_test.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_query_string.go97
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_range.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_regexp.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_script.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_script_test.go4
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_simple_query_string.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_simple_query_string_test.go6
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_slice.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_term.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_terms.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_type.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_wildcard.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_queries_wildcard_test.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_request.go4
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_source.go4
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_source_test.go4
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_suggester_test.go31
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_terms_lookup.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/search_test.go381
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/setup_test.go235
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/snapshot_create.go13
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/snapshot_create_repository.go17
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/snapshot_create_test.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/snapshot_delete_repository.go16
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/snapshot_get_repository.go12
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/snapshot_verify_repository.go12
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/sort.go177
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/sort_test.go52
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/suggest.go158
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/suggest_field.go4
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/suggest_field_test.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/suggest_test.go162
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/suggester_completion.go226
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/suggester_completion_fuzzy.go179
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/suggester_completion_test.go58
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/suggester_context.go4
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/suggester_context_category.go4
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/suggester_context_geo.go4
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/suggester_context_test.go4
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/suggester_phrase.go14
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/suggester_phrase_test.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/suggester_term.go6
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/tasks_cancel.go10
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/tasks_get_task.go8
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/tasks_list.go56
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/tasks_list_test.go8
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/termvectors.go17
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/termvectors_test.go16
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/update.go26
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/update_by_query.go11
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/update_by_query_test.go2
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/update_integration_test.go58
-rw-r--r--vendor/gopkg.in/olivere/elastic.v5/update_test.go71
1504 files changed, 181835 insertions, 24437 deletions
diff --git a/vendor/github.com/NYTimes/gziphandler/gzip.go b/vendor/github.com/NYTimes/gziphandler/gzip.go
index b3cb8315b..ae1ebe4bf 100644
--- a/vendor/github.com/NYTimes/gziphandler/gzip.go
+++ b/vendor/github.com/NYTimes/gziphandler/gzip.go
@@ -88,7 +88,7 @@ type GzipResponseWriterWithCloseNotify struct {
*GzipResponseWriter
}
-func (w *GzipResponseWriterWithCloseNotify) CloseNotify() <-chan bool {
+func (w GzipResponseWriterWithCloseNotify) CloseNotify() <-chan bool {
return w.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
diff --git a/vendor/github.com/NYTimes/gziphandler/gzip_test.go b/vendor/github.com/NYTimes/gziphandler/gzip_test.go
index ec1543372..ba5048921 100644
--- a/vendor/github.com/NYTimes/gziphandler/gzip_test.go
+++ b/vendor/github.com/NYTimes/gziphandler/gzip_test.go
@@ -325,17 +325,32 @@ func TestFlushBeforeWrite(t *testing.T) {
}
func TestImplementCloseNotifier(t *testing.T) {
+ request := httptest.NewRequest(http.MethodGet, "/", nil)
+ request.Header.Set(acceptEncoding, "gzip")
GzipHandler(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request){
_, ok := rw.(http.CloseNotifier)
assert.True(t, ok, "response writer must implement http.CloseNotifier")
- })).ServeHTTP(&mockRWCloseNotify{}, &http.Request{})
+ })).ServeHTTP(&mockRWCloseNotify{}, request)
+}
+
+func TestImplementFlusherAndCloseNotifier(t *testing.T) {
+ request := httptest.NewRequest(http.MethodGet, "/", nil)
+ request.Header.Set(acceptEncoding, "gzip")
+ GzipHandler(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request){
+ _, okCloseNotifier := rw.(http.CloseNotifier)
+ assert.True(t, okCloseNotifier, "response writer must implement http.CloseNotifier")
+ _, okFlusher := rw.(http.Flusher)
+ assert.True(t, okFlusher, "response writer must implement http.Flusher")
+ })).ServeHTTP(&mockRWCloseNotify{}, request)
}
func TestNotImplementCloseNotifier(t *testing.T) {
+ request := httptest.NewRequest(http.MethodGet, "/", nil)
+ request.Header.Set(acceptEncoding, "gzip")
GzipHandler(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request){
_, ok := rw.(http.CloseNotifier)
assert.False(t, ok, "response writer must not implement http.CloseNotifier")
- })).ServeHTTP(httptest.NewRecorder(), &http.Request{})
+ })).ServeHTTP(httptest.NewRecorder(), request)
}
diff --git a/vendor/github.com/avct/uasurfer/.gitignore b/vendor/github.com/avct/uasurfer/.gitignore
deleted file mode 100644
index 35ba52a16..000000000
--- a/vendor/github.com/avct/uasurfer/.gitignore
+++ /dev/null
@@ -1,56 +0,0 @@
-# Compiled bin #
-###################
-
-
-# Compiled source #
-###################
-*.dll
-*.exe
-*.o
-*.so
-
-# Packages #
-############
-# it's better to unpack these files and commit the raw source
-# git has its own built in compression methods
-*.7z
-*.dmg
-*.gz
-*.iso
-*.jar
-*.rar
-*.tar
-*.zip
-
-# Configuration Files #
-#######################
-*.cfg
-
-# Logs and databases #
-######################
-*.log
-*.sql
-*.sqlite
-logs
-coverage.html
-coverage.out
-
-# Test Files #
-#######################
-*.test
-
-# OS generated files #
-######################
-.DS_Store
-.DS_Store?
-.Spotlight-V100
-.Trashes
-ehthumbs.db
-Thumbs.db
-
-# go.rice generated files
-*.rice-box.go
-
-# Dev Tools #
-######################
-.vagrant \ No newline at end of file
diff --git a/vendor/github.com/avct/uasurfer/.travis.yml b/vendor/github.com/avct/uasurfer/.travis.yml
deleted file mode 100644
index 77b64e6f2..000000000
--- a/vendor/github.com/avct/uasurfer/.travis.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-sudo: false
-
-language: go
-
-go:
- - 1.9.x
- - 1.8.x
- - 1.7.x
-
-script:
- - go test
diff --git a/vendor/github.com/avct/uasurfer/README.md b/vendor/github.com/avct/uasurfer/README.md
deleted file mode 100644
index 2a4ab608d..000000000
--- a/vendor/github.com/avct/uasurfer/README.md
+++ /dev/null
@@ -1,169 +0,0 @@
-[![Build Status](https://travis-ci.org/avct/uasurfer.svg?branch=master)](https://travis-ci.org/avct/uasurfer) [![GoDoc](https://godoc.org/github.com/avct/uasurfer?status.svg)](https://godoc.org/github.com/avct/uasurfer) [![Go Report Card](https://goreportcard.com/badge/github.com/avct/uasurfer)](https://goreportcard.com/report/github.com/avct/uasurfer)
-
-# uasurfer
-
-![uasurfer-100px](https://cloud.githubusercontent.com/assets/597902/16172506/9debc136-357a-11e6-90fb-c7c46f50dff0.png)
-
-**User Agent Surfer** (uasurfer) is a lightweight Golang package that parses and abstracts [HTTP User-Agent strings](https://en.wikipedia.org/wiki/User_agent) with particular attention to device type.
-
-The following information is returned by uasurfer from a raw HTTP User-Agent string:
-
-| Name | Example | Coverage in 192,792 parses |
-|----------------|---------|--------------------------------|
-| Browser name | `chrome` | 99.85% |
-| Browser version | `53` | 99.17% |
-| Platform | `ipad` | 99.97% |
-| OS name | `ios` | 99.96% |
-| OS version | `10` | 98.81% |
-| Device type | `tablet` | 99.98% |
-
-Layout engine, browser language, and other esoteric attributes are not parsed.
-
-Coverage is estimated from a random sample of real UA strings collected across thousands of sources in US and EU mid-2016.
-
-## Usage
-
-### Parse(ua string) Function
-
-The `Parse()` function accepts a user agent `string` and returns UserAgent struct with named constants and integers for versions (minor, major and patch separately), and the full UA string that was parsed (lowercase). A string can be retrieved by adding `.String()` to a variable, such as `uasurfer.BrowserName.String()`.
-
-```
-// Define a user agent string
-myUA := "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.85 Safari/537.36"
-
-// Parse() returns all attributes, including returning the full UA string last
-ua, uaString := uasurfer.Parse(myUA)
-```
-
-where example UserAgent is:
-```
-{
- Browser {
- BrowserName: BrowserChrome,
- Version: {
- Major: 45,
- Minor: 0,
- Patch: 2454,
- },
- },
- OS {
- Platform: PlatformMac,
- Name: OSMacOSX,
- Version: {
- Major: 10,
- Minor: 10,
- Patch: 5,
- },
- },
- DeviceType: DeviceComputer,
-}
-```
-
-**Usage note:** There are some OSes that do not return a version, see docs below. Linux is typically not reported with a specific Linux distro name or version.
-
-#### Browser Name
-* `BrowserChrome` - Google [Chrome](https://en.wikipedia.org/wiki/Google_Chrome), [Chromium](https://en.wikipedia.org/wiki/Chromium_(web_browser))
-* `BrowserSafari` - Apple [Safari](https://en.wikipedia.org/wiki/Safari_(web_browser)), Google Search ([GSA](https://itunes.apple.com/us/app/google/id284815942))
-* `BrowserIE` - Microsoft [Internet Explorer](https://en.wikipedia.org/wiki/Internet_Explorer), [Edge](https://en.wikipedia.org/wiki/Microsoft_Edge)
-* `BrowserFirefox` - Mozilla [Firefox](https://en.wikipedia.org/wiki/Firefox), GNU [IceCat](https://en.wikipedia.org/wiki/GNU_IceCat), [Iceweasel](https://en.wikipedia.org/wiki/Mozilla_Corporation_software_rebranded_by_the_Debian_project#Iceweasel), [Seamonkey](https://en.wikipedia.org/wiki/SeaMonkey)
-* `BrowserAndroid` - Android [WebView](https://developer.chrome.com/multidevice/webview/overview) (Android OS <4.4 only)
-* `BrowserOpera` - [Opera](https://en.wikipedia.org/wiki/Opera_(web_browser))
-* `BrowserUCBrowser` - [UC Browser](https://en.wikipedia.org/wiki/UC_Browser)
-* `BrowserSilk` - Amazon [Silk](https://en.wikipedia.org/wiki/Amazon_Silk)
-* `BrowserSpotify` - [Spotify](https://en.wikipedia.org/wiki/Spotify#Clients) desktop client
-* `BrowserBlackberry` - RIM [BlackBerry](https://en.wikipedia.org/wiki/BlackBerry)
-* `BrowserUnknown` - Unknown
-
-#### Browser Version
-
-Browser version returns an `unint8` of the major version attribute of the User-Agent String. For example Chrome 45.0.23423 would return `45`. The intention is to support math operators with versions, such as "do XYZ for Chrome version >23".
-
-Unknown version is returned as `0`.
-
-#### Platform
-* `PlatformWindows` - Microsoft Windows
-* `PlatformMac` - Apple Macintosh
-* `PlatformLinux` - Linux, including Android and other OSes
-* `PlatformiPad` - Apple iPad
-* `PlatformiPhone` - Apple iPhone
-* `PlatformBlackberry` - RIM Blackberry
-* `PlatformWindowsPhone` Microsoft Windows Phone & Mobile
-* `PlatformKindle` - Amazon Kindle & Kindle Fire
-* `PlatformPlaystation` - Sony Playstation, Vita, PSP
-* `PlatformXbox` - Microsoft Xbox - `PlatformXbox`
-* `PlatformNintendo` - Nintendo DS, Wii, etc.
-* `PlatformUnknown` - Unknown
-
-#### OS Name
-* `OSWindows`
-* `OSMacOSX` - includes "macOS Sierra"
-* `OSiOS`
-* `OSAndroid`
-* `OSChromeOS`
-* `OSWebOS`
-* `OSLinux`
-* `OSPlaystation`
-* `OSXbox`
-* `OSNintendo`
-* `OSUnknown`
-
-#### OS Version
-
-OS X major version is alway 10 with consecutive minor versions indicating release releases (10 - Yosemite, 11 - El Capitain, 12 Sierra, etc). Windows version is NT version. `Version{0, 0, 0}` indicated version is unknown or not evaluated.
-Versions can be compared using `Less` function: `if ver1.Less(ver2) {}`
-
-Here are some examples across the platform, os.name, and os.version:
-
-* For Windows XP (Windows NT 5.1), "`PlatformWindows`" is the platform, "`OSWindows`" is the name, and `{5, 1, 0}` the version.
-* For OS X 10.5.1, "`PlatformMac`" is the platform, "`OSMacOSX`" the name, and `{10, 5, 1}` the version.
-* For Android 5.1, "`PlatformLinux`" is the platform, "`OSAndroid`" is the name, and `{5, 1, 0}` the version.
-* For iOS 5.1, "`PlatformiPhone`" or "`PlatformiPad`" is the platform, "`OSiOS`" is the name, and `{5, 1, 0}` the version.
-
-###### Windows Version Guide
-
-* Windows 10 - `{10, 0, 0}`
-* Windows 8.1 - `{6, 3, 0}`
-* Windows 8 - `{6, 2, 0}`
-* Windows 7 - `{6, 1, 0}`
-* Windows Vista - `{6, 0, 0}`
-* Windows XP - `{5, 1, 0}` or `{5, 2, 0}`
-* Windows 2000 - `{5, 0, 0}`
-
-Windows 95, 98, and ME represent 0.01% of traffic worldwide and are not available through this package at this time.
-
-#### DeviceType
-DeviceType is typically quite accurate, though determining between phones and tablets on Android is not always possible due to how some vendors design their UA strings. A mobile Android device without tablet indicator defaults to being classified as a phone. DeviceTV supports major brands such as Philips, Sharp, Vizio and steaming boxes such as Apple, Google, Roku, Amazon.
-
-* `DeviceComputer`
-* `DevicePhone`
-* `DeviceTablet`
-* `DeviceTV`
-* `DeviceConsole`
-* `DeviceWearable`
-* `DeviceUnknown`
-
-## Example Combinations of Attributes
-* Surface RT -> `OSWindows8`, `DeviceTablet`, OSVersion >= `6`
-* Android Tablet -> `OSAndroid`, `DeviceTablet`
-* Microsoft Edge -> `BrowserIE`, BrowserVersion >= `12.0.0`
-
-## To do
-
-* Remove compiled regexp in favor of string.Contains wherever possible (lowers mem/alloc)
-* Better version support on Firefox derivatives (e.g. SeaMonkey)
-* Potential additional browser support:
- * "NetFront" (1% share in India)
- * "QQ Browser" (6.5% share in China)
- * "Sogou Explorer" (5% share in China)
- * "Maxthon" (1.5% share in China)
- * "Nokia"
-* Potential additional OS support:
- * "Nokia" (5% share in India)
- * "Series 40" (5.5% share in India)
- * Windows 2003 Server
-* iOS safari browser identification based on iOS version
-* Add android version to browser identification
-* old Macs
- * "opera/9.64 (macintosh; ppc mac os x; u; en) presto/2.1.1"
-* old Windows
- * "mozilla/5.0 (windows nt 4.0; wow64) applewebkit/537.36 (khtml, like gecko) chrome/37.0.2049.0 safari/537.36"
diff --git a/vendor/github.com/avct/uasurfer/browser.go b/vendor/github.com/avct/uasurfer/browser.go
deleted file mode 100644
index e156818ab..000000000
--- a/vendor/github.com/avct/uasurfer/browser.go
+++ /dev/null
@@ -1,192 +0,0 @@
-package uasurfer
-
-import (
- "strings"
-)
-
-// Browser struct contains the lowercase name of the browser, along
-// with its browser version number. Browser are grouped together without
-// consideration for device. For example, Chrome (Chrome/43.0) and Chrome for iOS
-// (CriOS/43.0) would both return as "chrome" (name) and 43.0 (version). Similarly
-// Internet Explorer 11 and Edge 12 would return as "ie" and "11" or "12", respectively.
-// type Browser struct {
-// Name BrowserName
-// Version struct {
-// Major int
-// Minor int
-// Patch int
-// }
-// }
-
-// Retrieve browser name from UA strings
-func (u *UserAgent) evalBrowserName(ua string) bool {
- // Blackberry goes first because it reads as MSIE & Safari
- if strings.Contains(ua, "blackberry") || strings.Contains(ua, "playbook") || strings.Contains(ua, "bb10") || strings.Contains(ua, "rim ") {
- u.Browser.Name = BrowserBlackberry
- return u.isBot()
- }
-
- if strings.Contains(ua, "applewebkit") {
- switch {
- case strings.Contains(ua, "opr/") || strings.Contains(ua, "opios/"):
- u.Browser.Name = BrowserOpera
-
- case strings.Contains(ua, "silk/"):
- u.Browser.Name = BrowserSilk
-
- case strings.Contains(ua, "edge/") || strings.Contains(ua, "iemobile/") || strings.Contains(ua, "msie "):
- u.Browser.Name = BrowserIE
-
- case strings.Contains(ua, "ucbrowser/") || strings.Contains(ua, "ucweb/"):
- u.Browser.Name = BrowserUCBrowser
-
- // Edge, Silk and other chrome-identifying browsers must evaluate before chrome, unless we want to add more overhead
- case strings.Contains(ua, "chrome/") || strings.Contains(ua, "crios/") || strings.Contains(ua, "chromium/") || strings.Contains(ua, "crmo/"):
- u.Browser.Name = BrowserChrome
-
- case strings.Contains(ua, "android") && !strings.Contains(ua, "chrome/") && strings.Contains(ua, "version/") && !strings.Contains(ua, "like android"):
- // Android WebView on Android >= 4.4 is purposefully being identified as Chrome above -- https://developer.chrome.com/multidevice/webview/overview
- u.Browser.Name = BrowserAndroid
-
- case strings.Contains(ua, "fxios"):
- u.Browser.Name = BrowserFirefox
-
- case strings.Contains(ua, " spotify/"):
- u.Browser.Name = BrowserSpotify
-
- // AppleBot uses webkit signature as well
- case strings.Contains(ua, "applebot"):
- u.Browser.Name = BrowserAppleBot
-
- // presume it's safari unless an esoteric browser is being specified (webOSBrowser, SamsungBrowser, etc.)
- case strings.Contains(ua, "like gecko") && strings.Contains(ua, "mozilla/") && strings.Contains(ua, "safari/") && !strings.Contains(ua, "linux") && !strings.Contains(ua, "android") && !strings.Contains(ua, "browser/") && !strings.Contains(ua, "os/"):
- u.Browser.Name = BrowserSafari
-
- // if we got this far and the device is iPhone or iPad, assume safari. Some agents don't actually contain the word "safari"
- case strings.Contains(ua, "iphone") || strings.Contains(ua, "ipad"):
- u.Browser.Name = BrowserSafari
-
- // Google's search app on iPhone, leverages native Safari rather than Chrome
- case strings.Contains(ua, " gsa/"):
- u.Browser.Name = BrowserSafari
-
- default:
- goto notwebkit
-
- }
- return u.isBot()
- }
-
-notwebkit:
- switch {
- case strings.Contains(ua, "msie") || strings.Contains(ua, "trident"):
- u.Browser.Name = BrowserIE
-
- case strings.Contains(ua, "gecko") && (strings.Contains(ua, "firefox") || strings.Contains(ua, "iceweasel") || strings.Contains(ua, "seamonkey") || strings.Contains(ua, "icecat")):
- u.Browser.Name = BrowserFirefox
-
- case strings.Contains(ua, "presto") || strings.Contains(ua, "opera"):
- u.Browser.Name = BrowserOpera
-
- case strings.Contains(ua, "ucbrowser"):
- u.Browser.Name = BrowserUCBrowser
-
- case strings.Contains(ua, "applebot"):
- u.Browser.Name = BrowserAppleBot
-
- case strings.Contains(ua, "baiduspider"):
- u.Browser.Name = BrowserBaiduBot
-
- case strings.Contains(ua, "adidxbot") || strings.Contains(ua, "bingbot") || strings.Contains(ua, "bingpreview"):
- u.Browser.Name = BrowserBingBot
-
- case strings.Contains(ua, "duckduckbot"):
- u.Browser.Name = BrowserDuckDuckGoBot
-
- case strings.Contains(ua, "facebot") || strings.Contains(ua, "facebookexternalhit"):
- u.Browser.Name = BrowserFacebookBot
-
- case strings.Contains(ua, "googlebot"):
- u.Browser.Name = BrowserGoogleBot
-
- case strings.Contains(ua, "linkedinbot"):
- u.Browser.Name = BrowserLinkedInBot
-
- case strings.Contains(ua, "msnbot"):
- u.Browser.Name = BrowserMsnBot
-
- case strings.Contains(ua, "pingdom.com_bot"):
- u.Browser.Name = BrowserPingdomBot
-
- case strings.Contains(ua, "twitterbot"):
- u.Browser.Name = BrowserTwitterBot
-
- case strings.Contains(ua, "yandex") || strings.Contains(ua, "yadirectfetcher"):
- u.Browser.Name = BrowserYandexBot
-
- case strings.Contains(ua, "yahoo"):
- u.Browser.Name = BrowserYahooBot
-
- case strings.Contains(ua, "phantomjs"):
- u.Browser.Name = BrowserBot
-
- default:
- u.Browser.Name = BrowserUnknown
-
- }
-
- return u.isBot()
-}
-
-// Retrieve browser version
-// Methods used in order:
-// 1st: look for generic version/#
-// 2nd: look for browser-specific instructions (e.g. chrome/34)
-// 3rd: infer from OS (iOS only)
-func (u *UserAgent) evalBrowserVersion(ua string) {
- // if there is a 'version/#' attribute with numeric version, use it -- except for Chrome since Android vendors sometimes hijack version/#
- if u.Browser.Name != BrowserChrome && u.Browser.Version.findVersionNumber(ua, "version/") {
- return
- }
-
- switch u.Browser.Name {
- case BrowserChrome:
- // match both chrome and crios
- _ = u.Browser.Version.findVersionNumber(ua, "chrome/") || u.Browser.Version.findVersionNumber(ua, "crios/") || u.Browser.Version.findVersionNumber(ua, "crmo/")
-
- case BrowserIE:
- if u.Browser.Version.findVersionNumber(ua, "msie ") || u.Browser.Version.findVersionNumber(ua, "edge/") {
- return
- }
-
- // get MSIE version from trident version https://en.wikipedia.org/wiki/Trident_(layout_engine)
- if u.Browser.Version.findVersionNumber(ua, "trident/") {
- // convert trident versions 3-7 to MSIE version
- if (u.Browser.Version.Major >= 3) && (u.Browser.Version.Major <= 7) {
- u.Browser.Version.Major += 4
- }
- }
-
- case BrowserFirefox:
- _ = u.Browser.Version.findVersionNumber(ua, "firefox/") || u.Browser.Version.findVersionNumber(ua, "fxios/")
-
- case BrowserSafari: // executes typically if we're on iOS and not using a familiar browser
- u.Browser.Version = u.OS.Version
- // early Safari used a version number +1 to OS version
- if (u.Browser.Version.Major <= 3) && (u.Browser.Version.Major >= 1) {
- u.Browser.Version.Major++
- }
-
- case BrowserUCBrowser:
- _ = u.Browser.Version.findVersionNumber(ua, "ucbrowser/")
-
- case BrowserOpera:
- _ = u.Browser.Version.findVersionNumber(ua, "opr/") || u.Browser.Version.findVersionNumber(ua, "opios/") || u.Browser.Version.findVersionNumber(ua, "opera/")
-
- case BrowserSilk:
- _ = u.Browser.Version.findVersionNumber(ua, "silk/")
-
- case BrowserSpotify:
- _ = u.Browser.Version.findVersionNumber(ua, "spotify/")
- }
-}
diff --git a/vendor/github.com/avct/uasurfer/const_string.go b/vendor/github.com/avct/uasurfer/const_string.go
deleted file mode 100644
index 2fa21d86d..000000000
--- a/vendor/github.com/avct/uasurfer/const_string.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Code generated by "stringer -type=DeviceType,BrowserName,OSName,Platform -output=const_string.go"; DO NOT EDIT.
-
-package uasurfer
-
-import "fmt"
-
-const _DeviceType_name = "DeviceUnknownDeviceComputerDeviceTabletDevicePhoneDeviceConsoleDeviceWearableDeviceTV"
-
-var _DeviceType_index = [...]uint8{0, 13, 27, 39, 50, 63, 77, 85}
-
-func (i DeviceType) String() string {
- if i < 0 || i >= DeviceType(len(_DeviceType_index)-1) {
- return fmt.Sprintf("DeviceType(%d)", i)
- }
- return _DeviceType_name[_DeviceType_index[i]:_DeviceType_index[i+1]]
-}
-
-const _BrowserName_name = "BrowserUnknownBrowserChromeBrowserIEBrowserSafariBrowserFirefoxBrowserAndroidBrowserOperaBrowserBlackberryBrowserUCBrowserBrowserSilkBrowserNokiaBrowserNetFrontBrowserQQBrowserMaxthonBrowserSogouExplorerBrowserSpotifyBrowserBotBrowserAppleBotBrowserBaiduBotBrowserBingBotBrowserDuckDuckGoBotBrowserFacebookBotBrowserGoogleBotBrowserLinkedInBotBrowserMsnBotBrowserPingdomBotBrowserTwitterBotBrowserYandexBotBrowserYahooBot"
-
-var _BrowserName_index = [...]uint16{0, 14, 27, 36, 49, 63, 77, 89, 106, 122, 133, 145, 160, 169, 183, 203, 217, 227, 242, 257, 271, 291, 309, 325, 343, 356, 373, 390, 406, 421}
-
-func (i BrowserName) String() string {
- if i < 0 || i >= BrowserName(len(_BrowserName_index)-1) {
- return fmt.Sprintf("BrowserName(%d)", i)
- }
- return _BrowserName_name[_BrowserName_index[i]:_BrowserName_index[i+1]]
-}
-
-const _OSName_name = "OSUnknownOSWindowsPhoneOSWindowsOSMacOSXOSiOSOSAndroidOSBlackberryOSChromeOSOSKindleOSWebOSOSLinuxOSPlaystationOSXboxOSNintendoOSBot"
-
-var _OSName_index = [...]uint8{0, 9, 23, 32, 40, 45, 54, 66, 76, 84, 91, 98, 111, 117, 127, 132}
-
-func (i OSName) String() string {
- if i < 0 || i >= OSName(len(_OSName_index)-1) {
- return fmt.Sprintf("OSName(%d)", i)
- }
- return _OSName_name[_OSName_index[i]:_OSName_index[i+1]]
-}
-
-const _Platform_name = "PlatformUnknownPlatformWindowsPlatformMacPlatformLinuxPlatformiPadPlatformiPhonePlatformiPodPlatformBlackberryPlatformWindowsPhonePlatformPlaystationPlatformXboxPlatformNintendoPlatformBot"
-
-var _Platform_index = [...]uint8{0, 15, 30, 41, 54, 66, 80, 92, 110, 130, 149, 161, 177, 188}
-
-func (i Platform) String() string {
- if i < 0 || i >= Platform(len(_Platform_index)-1) {
- return fmt.Sprintf("Platform(%d)", i)
- }
- return _Platform_name[_Platform_index[i]:_Platform_index[i+1]]
-}
diff --git a/vendor/github.com/avct/uasurfer/device.go b/vendor/github.com/avct/uasurfer/device.go
deleted file mode 100644
index 70c00b112..000000000
--- a/vendor/github.com/avct/uasurfer/device.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package uasurfer
-
-import (
- "strings"
-)
-
-func (u *UserAgent) evalDevice(ua string) {
- switch {
-
- case u.OS.Platform == PlatformWindows || u.OS.Platform == PlatformMac || u.OS.Name == OSChromeOS:
- if strings.Contains(ua, "mobile") || strings.Contains(ua, "touch") {
- u.DeviceType = DeviceTablet // windows rt, linux haxor tablets
- return
- }
- u.DeviceType = DeviceComputer
-
- case u.OS.Platform == PlatformiPad || u.OS.Platform == PlatformiPod || strings.Contains(ua, "tablet") || strings.Contains(ua, "kindle/") || strings.Contains(ua, "playbook"):
- u.DeviceType = DeviceTablet
-
- case u.OS.Platform == PlatformiPhone || u.OS.Platform == PlatformBlackberry || strings.Contains(ua, "phone"):
- u.DeviceType = DevicePhone
-
- // long list of smarttv and tv dongle identifiers
- case strings.Contains(ua, "tv") || strings.Contains(ua, "crkey") || strings.Contains(ua, "googletv") || strings.Contains(ua, "aftb") || strings.Contains(ua, "adt-") || strings.Contains(ua, "roku") || strings.Contains(ua, "viera") || strings.Contains(ua, "aquos") || strings.Contains(ua, "dtv") || strings.Contains(ua, "appletv") || strings.Contains(ua, "smarttv") || strings.Contains(ua, "tuner") || strings.Contains(ua, "smart-tv") || strings.Contains(ua, "hbbtv") || strings.Contains(ua, "netcast") || strings.Contains(ua, "vizio"):
- u.DeviceType = DeviceTV
-
- case u.OS.Name == OSAndroid:
- // android phones report as "mobile", android tablets should not but often do -- http://android-developers.blogspot.com/2010/12/android-browser-user-agent-issues.html
- if strings.Contains(ua, "mobile") {
- u.DeviceType = DevicePhone
- return
- }
-
- if strings.Contains(ua, "tablet") || strings.Contains(ua, "nexus 7") || strings.Contains(ua, "nexus 9") || strings.Contains(ua, "nexus 10") || strings.Contains(ua, "xoom") {
- u.DeviceType = DeviceTablet
- return
- }
-
- u.DeviceType = DevicePhone // default to phone
-
- case u.OS.Platform == PlatformPlaystation || u.OS.Platform == PlatformXbox || u.OS.Platform == PlatformNintendo:
- u.DeviceType = DeviceConsole
-
- case strings.Contains(ua, "glass") || strings.Contains(ua, "watch") || strings.Contains(ua, "sm-v"):
- u.DeviceType = DeviceWearable
-
- // specifically above "mobile" string check as Kindle Fire tablets report as "mobile"
- case u.Browser.Name == BrowserSilk || u.OS.Name == OSKindle && !strings.Contains(ua, "sd4930ur"):
- u.DeviceType = DeviceTablet
-
- case strings.Contains(ua, "mobile") || strings.Contains(ua, "touch") || strings.Contains(ua, " mobi") || strings.Contains(ua, "webos"): //anything "mobile"/"touch" that didn't get captured as tablet, console or wearable is presumed a phone
- u.DeviceType = DevicePhone
-
- case u.OS.Name == OSLinux: // linux goes last since it's in so many other device types (tvs, wearables, android-based stuff)
- u.DeviceType = DeviceComputer
-
- default:
- u.DeviceType = DeviceUnknown
- }
-}
diff --git a/vendor/github.com/avct/uasurfer/system.go b/vendor/github.com/avct/uasurfer/system.go
deleted file mode 100644
index 45a80409f..000000000
--- a/vendor/github.com/avct/uasurfer/system.go
+++ /dev/null
@@ -1,332 +0,0 @@
-package uasurfer
-
-import (
- "regexp"
- "strconv"
- "strings"
-)
-
-var (
- amazonFireFingerprint = regexp.MustCompile("\\s(k[a-z]{3,5}|sd\\d{4}ur)\\s") //tablet or phone
-)
-
-func (u *UserAgent) evalOS(ua string) bool {
-
- s := strings.IndexRune(ua, '(')
- e := strings.IndexRune(ua, ')')
- if s > e {
- s = 0
- e = len(ua)
- }
- if e == -1 {
- e = len(ua)
- }
-
- agentPlatform := ua[s+1 : e]
- specsEnd := strings.Index(agentPlatform, ";")
- var specs string
- if specsEnd != -1 {
- specs = agentPlatform[:specsEnd]
- } else {
- specs = agentPlatform
- }
-
- //strict OS & version identification
- switch specs {
- case "android":
- u.evalLinux(ua, agentPlatform)
-
- case "bb10", "playbook":
- u.OS.Platform = PlatformBlackberry
- u.OS.Name = OSBlackberry
-
- case "x11", "linux":
- u.evalLinux(ua, agentPlatform)
-
- case "ipad", "iphone", "ipod touch", "ipod":
- u.evaliOS(specs, agentPlatform)
-
- case "macintosh":
- u.evalMacintosh(ua)
-
- default:
- switch {
- // Blackberry
- case strings.Contains(ua, "blackberry") || strings.Contains(ua, "playbook"):
- u.OS.Platform = PlatformBlackberry
- u.OS.Name = OSBlackberry
-
- // Windows Phone
- case strings.Contains(agentPlatform, "windows phone "):
- u.evalWindowsPhone(agentPlatform)
-
- // Windows, Xbox
- case strings.Contains(ua, "windows "):
- u.evalWindows(ua)
-
- // Kindle
- case strings.Contains(ua, "kindle/") || amazonFireFingerprint.MatchString(agentPlatform):
- u.OS.Platform = PlatformLinux
- u.OS.Name = OSKindle
-
- // Linux (broader attempt)
- case strings.Contains(ua, "linux"):
- u.evalLinux(ua, agentPlatform)
-
- // WebOS (non-linux flagged)
- case strings.Contains(ua, "webos") || strings.Contains(ua, "hpwos"):
- u.OS.Platform = PlatformLinux
- u.OS.Name = OSWebOS
-
- // Nintendo
- case strings.Contains(ua, "nintendo"):
- u.OS.Platform = PlatformNintendo
- u.OS.Name = OSNintendo
-
- // Playstation
- case strings.Contains(ua, "playstation") || strings.Contains(ua, "vita") || strings.Contains(ua, "psp"):
- u.OS.Platform = PlatformPlaystation
- u.OS.Name = OSPlaystation
-
- // Android
- case strings.Contains(ua, "android"):
- u.evalLinux(ua, agentPlatform)
-
- default:
- u.OS.Platform = PlatformUnknown
- u.OS.Name = OSUnknown
- }
- }
-
- return u.isBot()
-}
-
-func (u *UserAgent) isBot() bool {
-
- if u.OS.Platform == PlatformBot || u.OS.Name == OSBot {
- u.DeviceType = DeviceComputer
- return true
- }
-
- if u.Browser.Name >= BrowserBot && u.Browser.Name <= BrowserYahooBot {
- u.OS.Platform = PlatformBot
- u.OS.Name = OSBot
- u.DeviceType = DeviceComputer
- return true
- }
-
- return false
-}
-
-// evalLinux returns the `Platform`, `OSName` and Version of UAs with
-// 'linux' listed as their platform.
-func (u *UserAgent) evalLinux(ua string, agentPlatform string) {
-
- switch {
- // Kindle Fire
- case strings.Contains(ua, "kindle") || amazonFireFingerprint.MatchString(agentPlatform):
- // get the version of Android if available, though we don't call this OSAndroid
- u.OS.Platform = PlatformLinux
- u.OS.Name = OSKindle
- u.OS.Version.findVersionNumber(agentPlatform, "android ")
-
- // Android, Kindle Fire
- case strings.Contains(ua, "android") || strings.Contains(ua, "googletv"):
- // Android
- u.OS.Platform = PlatformLinux
- u.OS.Name = OSAndroid
- u.OS.Version.findVersionNumber(agentPlatform, "android ")
-
- // ChromeOS
- case strings.Contains(ua, "cros"):
- u.OS.Platform = PlatformLinux
- u.OS.Name = OSChromeOS
-
- // WebOS
- case strings.Contains(ua, "webos") || strings.Contains(ua, "hpwos"):
- u.OS.Platform = PlatformLinux
- u.OS.Name = OSWebOS
-
- // Linux, "Linux-like"
- case strings.Contains(ua, "x11") || strings.Contains(ua, "bsd") || strings.Contains(ua, "suse") || strings.Contains(ua, "debian") || strings.Contains(ua, "ubuntu"):
- u.OS.Platform = PlatformLinux
- u.OS.Name = OSLinux
-
- default:
- u.OS.Platform = PlatformLinux
- u.OS.Name = OSLinux
- }
-}
-
-// evaliOS returns the `Platform`, `OSName` and Version of UAs with
-// 'ipad' or 'iphone' listed as their platform.
-func (u *UserAgent) evaliOS(uaPlatform string, agentPlatform string) {
-
- switch uaPlatform {
- // iPhone
- case "iphone":
- u.OS.Platform = PlatformiPhone
- u.OS.Name = OSiOS
- u.OS.getiOSVersion(agentPlatform)
-
- // iPad
- case "ipad":
- u.OS.Platform = PlatformiPad
- u.OS.Name = OSiOS
- u.OS.getiOSVersion(agentPlatform)
-
- // iPod
- case "ipod touch", "ipod":
- u.OS.Platform = PlatformiPod
- u.OS.Name = OSiOS
- u.OS.getiOSVersion(agentPlatform)
-
- default:
- u.OS.Platform = PlatformiPad
- u.OS.Name = OSUnknown
- }
-}
-
-func (u *UserAgent) evalWindowsPhone(agentPlatform string) {
- u.OS.Platform = PlatformWindowsPhone
-
- if u.OS.Version.findVersionNumber(agentPlatform, "windows phone os ") || u.OS.Version.findVersionNumber(agentPlatform, "windows phone ") {
- u.OS.Name = OSWindowsPhone
- } else {
- u.OS.Name = OSUnknown
- }
-}
-
-func (u *UserAgent) evalWindows(ua string) {
-
- switch {
- //Xbox -- it reads just like Windows
- case strings.Contains(ua, "xbox"):
- u.OS.Platform = PlatformXbox
- u.OS.Name = OSXbox
- if !u.OS.Version.findVersionNumber(ua, "windows nt ") {
- u.OS.Version.Major = 6
- u.OS.Version.Minor = 0
- u.OS.Version.Patch = 0
- }
-
- // No windows version
- case !strings.Contains(ua, "windows "):
- u.OS.Platform = PlatformWindows
- u.OS.Name = OSUnknown
-
- case strings.Contains(ua, "windows nt ") && u.OS.Version.findVersionNumber(ua, "windows nt "):
- u.OS.Platform = PlatformWindows
- u.OS.Name = OSWindows
-
- case strings.Contains(ua, "windows xp"):
- u.OS.Platform = PlatformWindows
- u.OS.Name = OSWindows
- u.OS.Version.Major = 5
- u.OS.Version.Minor = 1
- u.OS.Version.Patch = 0
-
- default:
- u.OS.Platform = PlatformWindows
- u.OS.Name = OSUnknown
-
- }
-}
-
-func (u *UserAgent) evalMacintosh(uaPlatformGroup string) {
- u.OS.Platform = PlatformMac
- if i := strings.Index(uaPlatformGroup, "os x 10"); i != -1 {
- u.OS.Name = OSMacOSX
- u.OS.Version.parse(uaPlatformGroup[i+5:])
-
- return
- }
- u.OS.Name = OSUnknown
-}
-
-func (v *Version) findVersionNumber(s string, m string) bool {
- if ind := strings.Index(s, m); ind != -1 {
- return v.parse(s[ind+len(m):])
- }
- return false
-}
-
-// getiOSVersion accepts the platform portion of a UA string and returns
-// a Version.
-func (o *OS) getiOSVersion(uaPlatformGroup string) {
- if i := strings.Index(uaPlatformGroup, "cpu iphone os "); i != -1 {
- o.Version.parse(uaPlatformGroup[i+14:])
- return
- }
-
- if i := strings.Index(uaPlatformGroup, "cpu os "); i != -1 {
- o.Version.parse(uaPlatformGroup[i+7:])
- return
- }
-
- o.Version.parse(uaPlatformGroup)
-}
-
-// strToInt simply accepts a string and returns a `int`,
-// with '0' being default.
-func strToInt(str string) int {
- i, _ := strconv.Atoi(str)
- return i
-}
-
-// strToVer accepts a string and returns a Version,
-// with {0, 0, 0} being default.
-func (v *Version) parse(str string) bool {
- if len(str) == 0 || str[0] < '0' || str[0] > '9' {
- return false
- }
- for i := 0; i < 3; i++ {
- empty := true
- val := 0
- l := len(str) - 1
-
- for k, c := range str {
- if c >= '0' && c <= '9' {
- if empty {
- val = int(c) - 48
- empty = false
- if k == l {
- str = str[:0]
- }
- continue
- }
-
- if val == 0 {
- if c == '0' {
- if k == l {
- str = str[:0]
- }
- continue
- }
- str = str[k:]
- break
- }
-
- val = 10*val + int(c) - 48
- if k == l {
- str = str[:0]
- }
- continue
- }
- str = str[k+1:]
- break
- }
-
- switch i {
- case 0:
- v.Major = val
-
- case 1:
- v.Minor = val
-
- case 2:
- v.Patch = val
- }
- }
- return true
-}
diff --git a/vendor/github.com/avct/uasurfer/uasurfer.go b/vendor/github.com/avct/uasurfer/uasurfer.go
deleted file mode 100644
index 15aac6d40..000000000
--- a/vendor/github.com/avct/uasurfer/uasurfer.go
+++ /dev/null
@@ -1,227 +0,0 @@
-// Package uasurfer provides fast and reliable abstraction
-// of HTTP User-Agent strings. The philosophy is to identify
-// technologies that holds >1% market share, and to avoid
-// expending resources and accuracy on guessing at esoteric UA
-// strings.
-package uasurfer
-
-import "strings"
-
-//go:generate stringer -type=DeviceType,BrowserName,OSName,Platform -output=const_string.go
-
-// DeviceType (int) returns a constant.
-type DeviceType int
-
-// A complete list of supported devices in the
-// form of constants.
-const (
- DeviceUnknown DeviceType = iota
- DeviceComputer
- DeviceTablet
- DevicePhone
- DeviceConsole
- DeviceWearable
- DeviceTV
-)
-
-// BrowserName (int) returns a constant.
-type BrowserName int
-
-// A complete list of supported web browsers in the
-// form of constants.
-const (
- BrowserUnknown BrowserName = iota
- BrowserChrome
- BrowserIE
- BrowserSafari
- BrowserFirefox
- BrowserAndroid
- BrowserOpera
- BrowserBlackberry
- BrowserUCBrowser
- BrowserSilk
- BrowserNokia
- BrowserNetFront
- BrowserQQ
- BrowserMaxthon
- BrowserSogouExplorer
- BrowserSpotify
- BrowserBot // Bot list begins here
- BrowserAppleBot
- BrowserBaiduBot
- BrowserBingBot
- BrowserDuckDuckGoBot
- BrowserFacebookBot
- BrowserGoogleBot
- BrowserLinkedInBot
- BrowserMsnBot
- BrowserPingdomBot
- BrowserTwitterBot
- BrowserYandexBot
- BrowserYahooBot // Bot list ends here
-)
-
-// OSName (int) returns a constant.
-type OSName int
-
-// A complete list of supported OSes in the
-// form of constants. For handling particular versions
-// of operating systems (e.g. Windows 2000), see
-// the README.md file.
-const (
- OSUnknown OSName = iota
- OSWindowsPhone
- OSWindows
- OSMacOSX
- OSiOS
- OSAndroid
- OSBlackberry
- OSChromeOS
- OSKindle
- OSWebOS
- OSLinux
- OSPlaystation
- OSXbox
- OSNintendo
- OSBot
-)
-
-// Platform (int) returns a constant.
-type Platform int
-
-// A complete list of supported platforms in the
-// form of constants. Many OSes report their
-// true platform, such as Android OS being Linux
-// platform.
-const (
- PlatformUnknown Platform = iota
- PlatformWindows
- PlatformMac
- PlatformLinux
- PlatformiPad
- PlatformiPhone
- PlatformiPod
- PlatformBlackberry
- PlatformWindowsPhone
- PlatformPlaystation
- PlatformXbox
- PlatformNintendo
- PlatformBot
-)
-
-type Version struct {
- Major int
- Minor int
- Patch int
-}
-
-func (v Version) Less(c Version) bool {
- if v.Major < c.Major {
- return true
- }
-
- if v.Major > c.Major {
- return false
- }
-
- if v.Minor < c.Minor {
- return true
- }
-
- if v.Minor > c.Minor {
- return false
- }
-
- return v.Patch < c.Patch
-}
-
-type UserAgent struct {
- Browser Browser
- OS OS
- DeviceType DeviceType
-}
-
-type Browser struct {
- Name BrowserName
- Version Version
-}
-
-type OS struct {
- Platform Platform
- Name OSName
- Version Version
-}
-
-// Reset resets the UserAgent to it's zero value
-func (ua *UserAgent) Reset() {
- ua.Browser = Browser{}
- ua.OS = OS{}
- ua.DeviceType = DeviceUnknown
-}
-
-// Parse accepts a raw user agent (string) and returns the UserAgent.
-func Parse(ua string) *UserAgent {
- dest := new(UserAgent)
- parse(ua, dest)
- return dest
-}
-
-// ParseUserAgent is the same as Parse, but populates the supplied UserAgent.
-// It is the caller's responsibility to call Reset() on the UserAgent before
-// passing it to this function.
-func ParseUserAgent(ua string, dest *UserAgent) {
- parse(ua, dest)
-}
-
-func parse(ua string, dest *UserAgent) {
- ua = normalise(ua)
- switch {
- case len(ua) == 0:
- dest.OS.Platform = PlatformUnknown
- dest.OS.Name = OSUnknown
- dest.Browser.Name = BrowserUnknown
- dest.DeviceType = DeviceUnknown
-
- // stop on on first case returning true
- case dest.evalOS(ua):
- case dest.evalBrowserName(ua):
- default:
- dest.evalBrowserVersion(ua)
- dest.evalDevice(ua)
- }
-}
-
-// normalise normalises the user supplied agent string so that
-// we can more easily parse it.
-func normalise(ua string) string {
- if len(ua) <= 1024 {
- var buf [1024]byte
- ascii := copyLower(buf[:len(ua)], ua)
- if !ascii {
- // Fall back for non ascii characters
- return strings.ToLower(ua)
- }
- return string(buf[:len(ua)])
- }
- // Fallback for unusually long strings
- return strings.ToLower(ua)
-}
-
-// copyLower copies a lowercase version of s to b. It assumes s contains only single byte characters
-// and will panic if b is nil or is not long enough to contain all the bytes from s.
-// It returns early with false if any characters were non ascii.
-func copyLower(b []byte, s string) bool {
- for j := 0; j < len(s); j++ {
- c := s[j]
- if c > 127 {
- return false
- }
-
- if 'A' <= c && c <= 'Z' {
- c += 'a' - 'A'
- }
-
- b[j] = c
- }
- return true
-}
diff --git a/vendor/github.com/avct/uasurfer/uasurfer_test.go b/vendor/github.com/avct/uasurfer/uasurfer_test.go
deleted file mode 100644
index 8668a159f..000000000
--- a/vendor/github.com/avct/uasurfer/uasurfer_test.go
+++ /dev/null
@@ -1,1064 +0,0 @@
-package uasurfer
-
-import "testing"
-
-var testUAVars = []struct {
- UA string
- UserAgent
-}{
- // Empty
- {"",
- UserAgent{}},
-
- // Single char
- {"a",
- UserAgent{}},
-
- // Some random string
- {"some random string",
- UserAgent{}},
-
- // Potentially malformed ua
- {")(",
- UserAgent{}},
-
- // iPhone
- {"Mozilla/5.0 (iPhone; CPU iPhone OS 7_0 like Mac OS X) AppleWebKit/546.10 (KHTML, like Gecko) Version/6.0 Mobile/7E18WD Safari/8536.25",
- UserAgent{
- Browser{BrowserSafari, Version{6, 0, 0}}, OS{PlatformiPhone, OSiOS, Version{7, 0, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (iPhone; CPU iPhone OS 8_0_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12A405 Safari/600.1.4",
- UserAgent{
- Browser{BrowserSafari, Version{8, 0, 0}}, OS{PlatformiPhone, OSiOS, Version{8, 0, 2}}, DevicePhone}},
-
- // iPad
- {"Mozilla/5.0(iPad; U; CPU iPhone OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B314 Safari/531.21.10",
- UserAgent{
- Browser{BrowserSafari, Version{4, 0, 4}}, OS{PlatformiPad, OSiOS, Version{3, 2, 0}}, DeviceTablet}},
-
- {"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/601.1.17 (KHTML, like Gecko) Version/8.0 Mobile/13A175 Safari/600.1.4",
- UserAgent{
- Browser{BrowserSafari, Version{8, 0, 0}}, OS{PlatformiPad, OSiOS, Version{9, 0, 0}}, DeviceTablet}},
-
- {"Mozilla/5.0 (iPhone; CPU iPhone OS 10_0 like Mac OS X) AppleWebKit/602.1.32 (KHTML, like Gecko) Version/10.0 Mobile/14A5261v Safari/602.1",
- UserAgent{
- Browser{BrowserSafari, Version{10, 0, 0}}, OS{PlatformiPhone, OSiOS, Version{10, 0, 0}}, DevicePhone}},
-
- // Chrome
- {"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.130 Safari/537.36",
- UserAgent{
- Browser{BrowserChrome, Version{43, 0, 2357}}, OS{PlatformMac, OSMacOSX, Version{10, 10, 4}}, DeviceComputer}},
-
- {"Mozilla/5.0 (iPhone; U; CPU iPhone OS 5_1_1 like Mac OS X; en) AppleWebKit/534.46.0 (KHTML, like Gecko) CriOS/19.0.1084.60 Mobile/9B206 Safari/534.48.3",
- UserAgent{
- Browser{BrowserChrome, Version{19, 0, 1084}}, OS{PlatformiPhone, OSiOS, Version{5, 1, 1}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; Android 6.0; Nexus 5X Build/MDB08L) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.76 Mobile Safari/537.36",
- UserAgent{
- Browser{BrowserChrome, Version{46, 0, 2490}}, OS{PlatformLinux, OSAndroid, Version{6, 0, 0}}, DevicePhone}},
-
- // Chromium (Chrome)
- {"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.19 (KHTML, like Gecko) Ubuntu/11.10 Chromium/18.0.1025.142 Chrome/18.0.1025.142 Safari/535.19",
- UserAgent{
- Browser{BrowserChrome, Version{18, 0, 1025}}, OS{PlatformLinux, OSLinux, Version{0, 0, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.85 Safari/537.36",
- UserAgent{
- Browser{BrowserChrome, Version{45, 0, 2454}}, OS{PlatformMac, OSMacOSX, Version{10, 11, 0}}, DeviceComputer}},
-
- //TODO: refactor "getVersion()" to handle this device/chrome version douchebaggery
- // {"Mozilla/5.0 (Linux; Android 4.4.2; en-gb; SAMSUNG SM-G800F Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Version/1.6 Chrome/28.0.1500.94 Mobile Safari/537.36",
- // UserAgent{
- // Browser{BrowserChrome, Version{28,0,1500}, OS{PlatformLinux, OSAndroid, Version{4,4,2}}, DevicePhone}},
-
- // Safari
- {"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_4) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/8.0.7 Safari/600.7.12",
- UserAgent{
- Browser{BrowserSafari, Version{8, 0, 7}}, OS{PlatformMac, OSMacOSX, Version{10, 10, 4}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_5; en-us) AppleWebKit/525.26.2 (KHTML, like Gecko) Version/3.2 Safari/525.26.12",
- UserAgent{
- Browser{BrowserSafari, Version{3, 2, 0}}, OS{PlatformMac, OSMacOSX, Version{10, 5, 5}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12) AppleWebKit/602.1.32 (KHTML, like Gecko) Version/10.0 Safari/602.1.32", // macOS Sierra dev beta
- UserAgent{
- Browser{BrowserSafari, Version{10, 0, 0}}, OS{PlatformMac, OSMacOSX, Version{10, 12, 0}}, DeviceComputer}},
-
- // Firefox
- {"Mozilla/5.0 (iPhone; CPU iPhone OS 8_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) FxiOS/1.0 Mobile/12F69 Safari/600.1.4",
- UserAgent{
- Browser{BrowserFirefox, Version{1, 0, 0}}, OS{PlatformiPhone, OSiOS, Version{8, 3, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Android 4.4; Tablet; rv:41.0) Gecko/41.0 Firefox/41.0",
- UserAgent{
- Browser{BrowserFirefox, Version{41, 0, 0}}, OS{PlatformLinux, OSAndroid, Version{4, 4, 0}}, DeviceTablet}},
-
- {"Mozilla/5.0 (Android; Mobile; rv:40.0) Gecko/40.0 Firefox/40.0",
- UserAgent{
- Browser{BrowserFirefox, Version{40, 0, 0}}, OS{PlatformLinux, OSAndroid, Version{0, 0, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0",
- UserAgent{
- Browser{BrowserFirefox, Version{38, 0, 0}}, OS{PlatformLinux, OSLinux, Version{0, 0, 0}}, DeviceComputer}},
-
- // Silk
- {"Mozilla/5.0 (Linux; U; Android 4.4.3; de-de; KFTHWI Build/KTU84M) AppleWebKit/537.36 (KHTML, like Gecko) Silk/3.47 like Chrome/37.0.2026.117 Safari/537.36",
- UserAgent{
- Browser{BrowserSilk, Version{3, 47, 0}}, OS{PlatformLinux, OSKindle, Version{4, 4, 3}}, DeviceTablet}},
-
- {"Mozilla/5.0 (Linux; U; en-us; KFJWI Build/IMM76D) AppleWebKit/535.19 (KHTML like Gecko) Silk/2.4 Safari/535.19 Silk-Acceleratedtrue",
- UserAgent{
- Browser{BrowserSilk, Version{2, 4, 0}}, OS{PlatformLinux, OSKindle, Version{0, 0, 0}}, DeviceTablet}},
-
- // Opera
- {"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36 OPR/18.0.1284.68",
- UserAgent{
- Browser{BrowserOpera, Version{18, 0, 1284}}, OS{PlatformWindows, OSWindows, Version{6, 1, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (iPhone; CPU iPhone OS 8_4 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) OPiOS/10.2.0.93022 Mobile/12H143 Safari/9537.53",
- UserAgent{
- Browser{BrowserOpera, Version{10, 2, 0}}, OS{PlatformiPhone, OSiOS, Version{8, 4, 0}}, DevicePhone}},
-
- // Internet Explorer -- https://msdn.microsoft.com/en-us/library/hh869301(v=vs.85).aspx
- {"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.123",
- UserAgent{
- Browser{BrowserIE, Version{12, 123, 0}}, OS{PlatformWindows, OSWindows, Version{10, 0, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)",
- UserAgent{
- Browser{BrowserIE, Version{10, 0, 0}}, OS{PlatformWindows, OSWindows, Version{6, 2, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Windows NT 6.3; Trident/7.0; .NET4.0E; .NET4.0C; rv:11.0) like Gecko",
- UserAgent{
- Browser{BrowserIE, Version{11, 0, 0}}, OS{PlatformWindows, OSWindows, Version{6, 3, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Windows Phone 10.0; Android 4.2.1; DEVICE INFO) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Mobile Safari/537.36 Edge/12.123",
- UserAgent{
- Browser{BrowserIE, Version{12, 123, 0}}, OS{PlatformWindowsPhone, OSWindowsPhone, Version{10, 0, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Mobile; Windows Phone 8.1; Android 4.0; ARM; Trident/7.0; Touch; rv:11.0; IEMobile/11.0; NOKIA; Lumia 520) like iPhone OS 7_0_3 Mac OS X AppleWebKit/537 (KHTML, like Gecko) Mobile Safari/537",
- UserAgent{
- Browser{BrowserIE, Version{11, 0, 0}}, OS{PlatformWindowsPhone, OSWindowsPhone, Version{8, 1, 0}}, DevicePhone}},
-
- {"Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0; SV1; .NET CLR 1.1.4322; .NET CLR 1.0.3705; .NET CLR 2.0.50727)",
- UserAgent{
- Browser{BrowserIE, Version{5, 0, 1}}, OS{PlatformWindows, OSWindows, Version{5, 0, 0}}, DeviceComputer}},
-
- {"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/4.0; GTB6.4; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.3; OfficeLivePatch.0.0; .NET CLR 1.1.4322)",
- UserAgent{
- Browser{BrowserIE, Version{7, 0, 0}}, OS{PlatformWindows, OSWindows, Version{6, 1, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; ARM; Trident/6.0; Touch)", //Windows Surface RT tablet
- UserAgent{
- Browser{BrowserIE, Version{10, 0, 0}}, OS{PlatformWindows, OSWindows, Version{6, 2, 0}}, DeviceTablet}},
-
- // UC Browser
- {"Mozilla/5.0 (Linux; U; Android 2.3.4; en-US; MT11i Build/4.0.2.A.0.62) AppleWebKit/534.31 (KHTML, like Gecko) UCBrowser/9.0.1.275 U3/0.8.0 Mobile Safari/534.31",
- UserAgent{
- Browser{BrowserUCBrowser, Version{9, 0, 1}}, OS{PlatformLinux, OSAndroid, Version{2, 3, 4}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 4.0.4; en-US; Micromax P255 Build/IMM76D) AppleWebKit/534.31 (KHTML, like Gecko) UCBrowser/9.2.0.308 U3/0.8.0 Mobile Safari/534.31",
- UserAgent{
- Browser{BrowserUCBrowser, Version{9, 2, 0}}, OS{PlatformLinux, OSAndroid, Version{4, 0, 4}}, DevicePhone}},
-
- {"UCWEB/2.0 (Java; U; MIDP-2.0; en-US; MicromaxQ5) U2/1.0.0 UCBrowser/9.4.0.342 U2/1.0.0 Mobile",
- UserAgent{
- Browser{BrowserUCBrowser, Version{9, 4, 0}}, OS{PlatformUnknown, OSUnknown, Version{0, 0, 0}}, DevicePhone}},
-
- // Nokia Browser
- // {"Mozilla/5.0 (Series40; Nokia501/14.0.4/java_runtime_version=Nokia_Asha_1_2; Profile/MIDP-2.1 Configuration/CLDC-1.1) Gecko/20100401 S40OviBrowser/4.0.0.0.45",
- // UserAgent{
- // Browser{BrowserUnknown, Version{4,0,0}}, OS{PlatformUnknown, OSUnknown, Version{0,0,0}}, DevicePhone}},
-
- // {"Mozilla/5.0 (Symbian/3; Series60/5.3 NokiaN8-00/111.040.1511; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/535.1 (KHTML, like Gecko) NokiaBrowser/8.3.1.4 Mobile Safari/535.1",
- // UserAgent{
- // Browser{BrowserUnknown, Version{8,0,0}}, OS{PlatformUnknown, OSUnknown, Version{0,0,0}}, DevicePhone}},
-
- // {"NokiaN97/21.1.107 (SymbianOS/9.4; Series60/5.0 Mozilla/5.0; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebkit/525 (KHTML, like Gecko) BrowserNG/7.1.4",
- // BrowserUnknown, Version{7,0,0}}, OS{PlatformUnknown, OSUnknown, Version{0,0,0}}, DevicePhone}},
-
- // ChromeOS
- {"Mozilla/5.0 (X11; U; CrOS i686 9.10.0; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.253.0 Safari/532.5",
- UserAgent{
- Browser{BrowserChrome, Version{4, 0, 253}}, OS{PlatformLinux, OSChromeOS, Version{0, 0, 0}}, DeviceComputer}},
-
- // iPod, iPod Touch
- {"mozilla/5.0 (ipod touch; cpu iphone os 9_3_3 like mac os x) applewebkit/601.1.46 (khtml, like gecko) version/9.0 mobile/13g34 safari/601.1",
- UserAgent{
- Browser{BrowserSafari, Version{9, 0, 0}}, OS{PlatformiPod, OSiOS, Version{9, 3, 3}}, DeviceTablet}},
-
- {"mozilla/5.0 (ipod; cpu iphone os 6_1_6 like mac os x) applewebkit/536.26 (khtml, like gecko) version/6.0 mobile/10b500 safari/8536.25",
- UserAgent{
- Browser{BrowserSafari, Version{6, 0, 0}}, OS{PlatformiPod, OSiOS, Version{6, 1, 6}}, DeviceTablet}},
-
- // WebOS
- {"Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; de-DE) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0",
- UserAgent{
- Browser{BrowserUnknown, Version{0, 0, 0}}, OS{PlatformLinux, OSWebOS, Version{0, 0, 0}}, DeviceTablet}},
-
- {"Mozilla/5.0 (webOS/1.4.1.1; U; en-US) AppleWebKit/532.2 (KHTML, like Gecko) Version/1.0 Safari/532.2 Pre/1.0",
- UserAgent{
- Browser{BrowserUnknown, Version{1, 0, 0}}, OS{PlatformLinux, OSWebOS, Version{0, 0, 0}}, DevicePhone}},
-
- // Android WebView (Android <= 4.3)
- {"Mozilla/5.0 (Linux; U; Android 2.2; en-us; DROID2 GLOBAL Build/S273) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
- UserAgent{
- Browser{BrowserAndroid, Version{4, 0, 0}}, OS{PlatformLinux, OSAndroid, Version{2, 2, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 4.0.3; de-ch; HTC Sensation Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari53/4.30",
- UserAgent{
- Browser{BrowserAndroid, Version{4, 0, 0}}, OS{PlatformLinux, OSAndroid, Version{4, 0, 3}}, DevicePhone}},
-
- // BlackBerry
- {"Mozilla/5.0 (PlayBook; U; RIM Tablet OS 2.1.0; en-US) AppleWebKit/536.2+ (KHTML, like Gecko) Version/7.2.1.0 Safari/536.2+",
- UserAgent{
- Browser{BrowserBlackberry, Version{7, 2, 1}}, OS{PlatformBlackberry, OSBlackberry, Version{0, 0, 0}}, DeviceTablet}},
-
- {"Mozilla/5.0 (BB10; Kbd) AppleWebKit/537.35+ (KHTML, like Gecko) Version/10.2.1.1925 Mobile Safari/537.35+",
- UserAgent{
- Browser{BrowserBlackberry, Version{10, 2, 1}}, OS{PlatformBlackberry, OSBlackberry, Version{0, 0, 0}}, DevicePhone}},
-
- {"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0) BlackBerry8703e/4.1.0 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/104",
- UserAgent{
- Browser{BrowserBlackberry, Version{0, 0, 0}}, OS{PlatformBlackberry, OSBlackberry, Version{0, 0, 0}}, DevicePhone}},
-
- // Windows Phone
- {"Mozilla/5.0 (compatible; MSIE 10.0; Windows Phone 8.0; Trident/6.0; IEMobile/10.0; ARM; Touch; NOKIA; Lumia 625; ANZ941)",
- UserAgent{
- Browser{BrowserIE, Version{10, 0, 0}}, OS{PlatformWindowsPhone, OSWindowsPhone, Version{8, 0, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; NOKIA; Lumia 900)",
- UserAgent{
- Browser{BrowserIE, Version{9, 0, 0}}, OS{PlatformWindowsPhone, OSWindowsPhone, Version{7, 5, 0}}, DevicePhone}},
-
- // Kindle eReader
- {"Mozilla/5.0 (Linux; U; en-US) AppleWebKit/528.5+ (KHTML, like Gecko, Safari/528.5+) Version/4.0 Kindle/3.0 (screen 600×800; rotate)",
- UserAgent{
- Browser{BrowserUnknown, Version{4, 0, 0}}, OS{PlatformLinux, OSKindle, Version{0, 0, 0}}, DeviceTablet}},
-
- {"Mozilla/5.0 (X11; U; Linux armv7l like Android; en-us) AppleWebKit/531.2+ (KHTML, like Gecko) Version/5.0 Safari/533.2+ Kindle/3.0+",
- UserAgent{
- Browser{BrowserUnknown, Version{5, 0, 0}}, OS{PlatformLinux, OSKindle, Version{0, 0, 0}}, DeviceTablet}},
-
- // Amazon Fire
- {"Mozilla/5.0 (Linux; U; Android 4.4.3; de-de; KFTHWI Build/KTU84M) AppleWebKit/537.36 (KHTML, like Gecko) Silk/3.67 like Chrome/39.0.2171.93 Safari/537.36",
- UserAgent{
- Browser{BrowserSilk, Version{3, 67, 0}}, OS{PlatformLinux, OSKindle, Version{4, 4, 3}}, DeviceTablet}}, // Fire tablet
-
- {"Mozilla/5.0 (Linux; U; Android 4.2.2; en­us; KFTHWI Build/JDQ39) AppleWebKit/537.36 (KHTML, like Gecko) Silk/3.22 like Chrome/34.0.1847.137 Mobile Safari/537.36",
- UserAgent{
- Browser{BrowserSilk, Version{3, 22, 0}}, OS{PlatformLinux, OSKindle, Version{4, 2, 2}}, DeviceTablet}}, // Fire tablet, but with "Mobile"
-
- {"Mozilla/5.0 (Linux; Android 4.4.4; SD4930UR Build/KTU84P) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/34.0.0.0 Mobile Safari/537.36 [FB_IAB/FB4A;FBAV/35.0.0.48.273;]",
- UserAgent{
- Browser{BrowserChrome, Version{34, 0, 0}}, OS{PlatformLinux, OSKindle, Version{4, 4, 4}}, DevicePhone}}, // Facebook app on Fire Phone
-
- {"mozilla/5.0 (linux; android 4.4.3; kfthwi build/ktu84m) applewebkit/537.36 (khtml, like gecko) version/4.0 chrome/34.0.0.0 safari/537.36 [pinterest/android]",
- UserAgent{
- Browser{BrowserChrome, Version{34, 0, 0}}, OS{PlatformLinux, OSKindle, Version{4, 4, 3}}, DeviceTablet}}, // Fire tablet running pinterest
-
- // extra logic to identify phone when using silk has not been added
- // {"Mozilla/5.0 (Linux; Android 4.4.4; SD4930UR Build/KTU84P) AppleWebKit/537.36 (KHTML, like Gecko) Silk/3.67 like Chrome/39.0.2171.93 Mobile Safari/537.36",
- // UserAgent{
- // Browser{BrowserSilk, Version{3,0,0}}, OS{PlatformLinux, OSKindle, Version{4,0,0}}, DevicePhone}}, // Silk on Fire Phone
-
- // Nintendo
- {"Opera/9.30 (Nintendo Wii; U; ; 2047-7; fr)",
- UserAgent{
- Browser{BrowserOpera, Version{9, 30, 0}}, OS{PlatformNintendo, OSNintendo, Version{0, 0, 0}}, DeviceConsole}},
-
- {"Mozilla/5.0 (Nintendo WiiU) AppleWebKit/534.52 (KHTML, like Gecko) NX/2.1.0.8.21 NintendoBrowser/1.0.0.7494.US",
- UserAgent{
- Browser{BrowserUnknown, Version{0, 0, 0}}, OS{PlatformNintendo, OSNintendo, Version{0, 0, 0}}, DeviceConsole}},
-
- // Xbox
- {"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; Xbox)", //Xbox 360
- UserAgent{
- Browser{BrowserIE, Version{9, 0, 0}}, OS{PlatformXbox, OSXbox, Version{6, 1, 0}}, DeviceConsole}},
-
- // Playstation
- {"Mozilla/5.0 (PlayStation 4 4.50) AppleWebKit/601.2 (KHTML, like Gecko)",
- UserAgent{
- Browser{BrowserUnknown, Version{0, 0, 0}}, OS{PlatformPlaystation, OSPlaystation, Version{0, 0, 0}}, DeviceConsole}},
-
- {"Mozilla/5.0 (Playstation Vita 1.61) AppleWebKit/531.22.8 (KHTML, like Gecko) Silk/3.2",
- UserAgent{
- Browser{BrowserSilk, Version{3, 2, 0}}, OS{PlatformPlaystation, OSPlaystation, Version{0, 0, 0}}, DeviceConsole}},
-
- // Smart TVs and TV dongles
- {"Mozilla/5.0 (CrKey armv7l 1.4.15250) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.0 Safari/537.36", // Chromecast
- UserAgent{
- Browser{BrowserChrome, Version{31, 0, 1650}}, OS{PlatformUnknown, OSUnknown, Version{0, 0, 0}}, DeviceTV}},
-
- {"Mozilla/5.0 (Linux; GoogleTV 3.2; VAP430 Build/MASTER) AppleWebKit/534.24 (KHTML, like Gecko) Chrome/11.0.696.77 Safari/534.24", // Google TV
- UserAgent{
- Browser{BrowserChrome, Version{11, 0, 696}}, OS{PlatformLinux, OSAndroid, Version{0, 0, 0}}, DeviceTV}},
-
- {"Mozilla/5.0 (Linux; Android 5.0; ADT-1 Build/LPX13D) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.89 Mobile Safari/537.36", // Android TV
- UserAgent{
- Browser{BrowserChrome, Version{40, 0, 2214}}, OS{PlatformLinux, OSAndroid, Version{5, 0, 0}}, DeviceTV}},
-
- {"Mozilla/5.0 (Linux; Android 4.2.2; AFTB Build/JDQ39) AppleWebKit/537.22 (KHTML, like Gecko) Chrome/25.0.1364.173 Mobile Safari/537.22", // Amazon Fire
- UserAgent{
- Browser{BrowserChrome, Version{25, 0, 1364}}, OS{PlatformLinux, OSAndroid, Version{4, 2, 2}}, DeviceTV}},
-
- {"Mozilla/5.0 (Unknown; Linux armv7l) AppleWebKit/537.1+ (KHTML, like Gecko) Safari/537.1+ LG Browser/6.00.00(+mouse+3D+SCREEN+TUNER; LGE; GLOBAL-PLAT5; 03.07.01; 0x00000001;); LG NetCast.TV-2013/03.17.01 (LG, GLOBAL-PLAT4, wired)", // LG TV
- UserAgent{
- Browser{BrowserUnknown, Version{0, 0, 0}}, OS{PlatformLinux, OSLinux, Version{0, 0, 0}}, DeviceTV}},
-
- {"Mozilla/5.0 (X11; FreeBSD; U; Viera; de-DE) AppleWebKit/537.11 (KHTML, like Gecko) Viera/3.10.0 Chrome/23.0.1271.97 Safari/537.11", // Panasonic Viera
- UserAgent{
- Browser{BrowserChrome, Version{23, 0, 1271}}, OS{PlatformLinux, OSLinux, Version{0, 0, 0}}, DeviceTV}},
-
- // TODO: not catching "browser/" and reporting as safari -- ua string not being fully checked?
- // {"Mozilla/5.0 (DTV) AppleWebKit/531.2+ (KHTML, like Gecko) Espial/6.1.5 AQUOSBrowser/2.0 (US01DTV;V;0001;0001)", // Sharp Aquos
- // BrowserUnknown, Version{0,0,0}}, OS{PlatformUnknown, OSUnknown, Version{0,0,0}}, DeviceTV}},
-
- {"Roku/DVP-5.2 (025.02E03197A)", // Roku
- UserAgent{
- Browser{BrowserUnknown, Version{0, 0, 0}}, OS{PlatformUnknown, OSUnknown, Version{0, 0, 0}}, DeviceTV}},
-
- {"mozilla/5.0 (smart-tv; linux; tizen 2.3) applewebkit/538.1 (khtml, like gecko) samsungbrowser/1.0 tv safari/538.1", // Samsung SmartTV
- UserAgent{
- Browser{BrowserUnknown, Version{0, 0, 0}}, OS{PlatformLinux, OSLinux, Version{0, 0, 0}}, DeviceTV}},
-
- {"mozilla/5.0 (linux; u) applewebkit/537.36 (khtml, like gecko) version/4.0 mobile safari/537.36 smarttv/6.0 (netcast)",
- UserAgent{
- Browser{BrowserUnknown, Version{4, 0, 0}}, OS{PlatformLinux, OSLinux, Version{0, 0, 0}}, DeviceTV}},
-
- // Google search app (GSA) for iOS -- it's Safari in disguise as of v6
- {"Mozilla/5.0 (iPad; CPU OS 8_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/6.0.51363 Mobile/12F69 Safari/600.1.4",
- UserAgent{
- Browser{BrowserSafari, Version{8, 3, 0}}, OS{PlatformiPad, OSiOS, Version{8, 3, 0}}, DeviceTablet}},
-
- // Spotify (applicable for advertising applications)
- {"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Spotify/1.0.9.133 Safari/537.36",
- UserAgent{
- Browser{BrowserSpotify, Version{1, 0, 9}}, OS{PlatformWindows, OSWindows, Version{5, 1, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Spotify/1.0.9.133 Safari/537.36",
- UserAgent{
- Browser{BrowserSpotify, Version{1, 0, 9}}, OS{PlatformMac, OSMacOSX, Version{10, 10, 2}}, DeviceComputer}},
-
- // Bots
- {"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/600.2.5 (KHTML, like Gecko) Version/8.0.2 Safari/600.2.5 (Applebot/0.1; +http://www.apple.com/go/applebot)",
- UserAgent{
- Browser{BrowserAppleBot, Version{0, 0, 0}}, OS{PlatformBot, OSBot, Version{10, 10, 1}}, DeviceComputer}},
-
- {"Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)",
- UserAgent{
- Browser{BrowserBaiduBot, Version{0, 0, 0}}, OS{PlatformBot, OSBot, Version{0, 0, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)",
- UserAgent{
- Browser{BrowserBingBot, Version{0, 0, 0}}, OS{PlatformBot, OSBot, Version{0, 0, 0}}, DeviceComputer}},
-
- {"DuckDuckBot/1.0; (+http://duckduckgo.com/duckduckbot.html)",
- UserAgent{
- Browser{BrowserDuckDuckGoBot, Version{0, 0, 0}}, OS{PlatformBot, OSBot, Version{0, 0, 0}}, DeviceComputer}},
-
- {"facebookexternalhit/1.1 (+http://www.facebook.com/externalhit_uatext.php)",
- UserAgent{
- Browser{BrowserFacebookBot, Version{0, 0, 0}}, OS{PlatformBot, OSBot, Version{0, 0, 0}}, DeviceComputer}},
-
- {"Facebot/1.0",
- UserAgent{
- Browser{BrowserFacebookBot, Version{0, 0, 0}}, OS{PlatformBot, OSBot, Version{0, 0, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
- UserAgent{
- Browser{BrowserGoogleBot, Version{0, 0, 0}}, OS{PlatformBot, OSBot, Version{0, 0, 0}}, DeviceComputer}},
-
- {"LinkedInBot/1.0 (compatible; Mozilla/5.0; Jakarta Commons-HttpClient/3.1 +http://www.linkedin.com)",
- UserAgent{
- Browser{BrowserLinkedInBot, Version{0, 0, 0}}, OS{PlatformBot, OSBot, Version{0, 0, 0}}, DeviceComputer}},
-
- {"msnbot/2.0b (+http://search.msn.com/msnbot.htm)",
- UserAgent{
- Browser{BrowserMsnBot, Version{0, 0, 0}}, OS{PlatformBot, OSBot, Version{0, 0, 0}}, DeviceComputer}},
-
- {"Pingdom.com_bot_version_1.4_(http://www.pingdom.com/)",
- UserAgent{
- Browser{BrowserPingdomBot, Version{0, 0, 0}}, OS{PlatformBot, OSBot, Version{0, 0, 0}}, DeviceComputer}},
-
- {"Twitterbot/1.0",
- UserAgent{
- Browser{BrowserTwitterBot, Version{0, 0, 0}}, OS{PlatformBot, OSBot, Version{0, 0, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)",
- UserAgent{
- Browser{BrowserYandexBot, Version{0, 0, 0}}, OS{PlatformBot, OSBot, Version{0, 0, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)",
- UserAgent{
- Browser{BrowserYahooBot, Version{0, 0, 0}}, OS{PlatformBot, OSBot, Version{0, 0, 0}}, DeviceComputer}},
-
- // {"Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
- // BrowserBot, Version{0,0,0}}, OS{PlatformBot, OSBot, Version{6,0,0}}, DeviceComputer}},
-
- {"mozilla/5.0 (unknown; linux x86_64) applewebkit/538.1 (khtml, like gecko) phantomjs/2.1.1 safari/538.1",
- UserAgent{
- Browser{BrowserBot, Version{0, 0, 0}}, OS{PlatformBot, OSBot, Version{0, 0, 0}}, DeviceComputer}},
-
- // Unknown or partially handled
- {"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.4; en-US; rv:1.9.1b3pre) Gecko/20090223 SeaMonkey/2.0a3", //Seamonkey (~FF)
- UserAgent{
- Browser{BrowserFirefox, Version{0, 0, 0}}, OS{PlatformMac, OSMacOSX, Version{10, 4, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en; rv:1.9.0.8pre) Gecko/2009022800 Camino/2.0b3pre", //Camino (~FF)
- UserAgent{
- Browser{BrowserUnknown, Version{0, 0, 0}}, OS{PlatformMac, OSMacOSX, Version{10, 5, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Mobile; rv:26.0) Gecko/26.0 Firefox/26.0", //firefox OS
- UserAgent{
- Browser{BrowserFirefox, Version{26, 0, 0}}, OS{PlatformUnknown, OSUnknown, Version{0, 0, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.45 Safari/535.19", //chrome for android having requested desktop site
- UserAgent{
- Browser{BrowserChrome, Version{18, 0, 1025}}, OS{PlatformLinux, OSLinux, Version{0, 0, 0}}, DeviceComputer}},
-
- {"Opera/9.80 (S60; SymbOS; Opera Mobi/352; U; de) Presto/2.4.15 Version/10.00",
- UserAgent{
- Browser{BrowserOpera, Version{10, 0, 0}}, OS{PlatformUnknown, OSUnknown, Version{0, 0, 0}}, DevicePhone}},
-
- // BrowserQQ
- // {"Mozilla/5.0 (Windows NT 6.2; WOW64; Trident/7.0; Touch; .NET4.0E; .NET4.0C; .NET CLR 3.5.30729; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.3; Tablet PC 2.0; QQBrowser/7.6.21433.400; rv:11.0) like Gecko",
- // UserAgent{
- // Browser{BrowserQQ, Version{7,0,0}}, OS{PlatformWindows, OSWindows, Version{8,0,0}}, DeviceTablet}},
-
- // {"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.124 Safari/537.36 QQBrowser/9.0.2191.400",
- // UserAgent{
- // Browser{BrowserQQ, Version{9,0,0}}, OS{PlatformWindows, OSWindows, Version{7,0,0}}, DeviceComputer}},
-
- // ANDROID TESTS
-
- {"Mozilla/5.0 (Linux; U; Android 1.0; en-us; dream) AppleWebKit/525.10+ (KHTML,like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
- UserAgent{
- Browser{BrowserAndroid, Version{3, 0, 4}}, OS{PlatformLinux, OSAndroid, Version{1, 0, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 1.0; en-us; generic) AppleWebKit/525.10 (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
- UserAgent{
- Browser{BrowserAndroid, Version{3, 0, 4}}, OS{PlatformLinux, OSAndroid, Version{1, 0, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 1.0.3; de-de; A80KSC Build/ECLAIR) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
- UserAgent{
- Browser{BrowserAndroid, Version{4, 0, 0}}, OS{PlatformLinux, OSAndroid, Version{1, 0, 3}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 1.5; en-gb; T-Mobile G1 Build/CRC1) AppleWebKit/528.5+ (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
- UserAgent{
- Browser{BrowserAndroid, Version{3, 1, 2}}, OS{PlatformLinux, OSAndroid, Version{1, 5, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 1.5; es-; FBW1_4 Build/MASTER) AppleWebKit/525.10+ (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
- UserAgent{
- Browser{BrowserAndroid, Version{3, 0, 4}}, OS{PlatformLinux, OSAndroid, Version{1, 5, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux U; Android 1.5 en-us hero) AppleWebKit/525.10+ (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
- UserAgent{
- Browser{BrowserAndroid, Version{3, 0, 4}}, OS{PlatformLinux, OSAndroid, Version{1, 5, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 1.5; en-us; Opus One Build/RBE.00.00) AppleWebKit/528.18.1 (KHTML, like Gecko) Version/3.1.1 Mobile Safari/525.20.1",
- UserAgent{
- Browser{BrowserAndroid, Version{3, 1, 1}}, OS{PlatformLinux, OSAndroid, Version{1, 5, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 1.6; ar-us; SonyEricssonX10i Build/R2BA026) AppleWebKit/528.5+ (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
- UserAgent{
- Browser{BrowserAndroid, Version{3, 1, 2}}, OS{PlatformLinux, OSAndroid, Version{1, 6, 0}}, DevicePhone}},
-
- // TODO: support names of Android OS?
- //{"Mozilla/5.0 (Linux; U; Android Donut; de-de; HTC Tattoo 1.52.161.1 Build/Donut) AppleWebKit/528.5+ (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
- // UserAgent{
- // Browser{BrowserAndroid, Version{3, 1, 2}}, OS{PlatformLinux, OSAndroid, Version{1, 0, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 1.6; en-gb; HTC Tattoo Build/DRC79) AppleWebKit/525.10+ (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
- UserAgent{
- Browser{BrowserAndroid, Version{3, 0, 4}}, OS{PlatformLinux, OSAndroid, Version{1, 6, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 1.6; ja-jp; Docomo HT-03A Build/DRD08) AppleWebKit/525.10 (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
- UserAgent{
- Browser{BrowserAndroid, Version{3, 0, 4}}, OS{PlatformLinux, OSAndroid, Version{1, 6, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 2.1; en-us; Nexus One Build/ERD62) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
- UserAgent{
- Browser{BrowserAndroid, Version{4, 0, 0}}, OS{PlatformLinux, OSAndroid, Version{2, 1, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 2.1-update1; en-au; HTC_Desire_A8183 V1.16.841.1 Build/ERE27) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
- UserAgent{
- Browser{BrowserAndroid, Version{4, 0, 0}}, OS{PlatformLinux, OSAndroid, Version{2, 1, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 2.1; en-us; generic) AppleWebKit/525.10+ (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
- UserAgent{
- Browser{BrowserAndroid, Version{3, 0, 4}}, OS{PlatformLinux, OSAndroid, Version{2, 1, 0}}, DevicePhone}},
-
- // TODO support named versions of Android?
- {"Mozilla/5.0 (Linux; U; Android Eclair; en-us; sholes) AppleWebKit/525.10+ (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
- UserAgent{
- Browser{BrowserAndroid, Version{3, 0, 4}}, OS{PlatformLinux, OSAndroid, Version{0, 0, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 2.2; en-sa; HTC_DesireHD_A9191 Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
- UserAgent{
- Browser{BrowserAndroid, Version{4, 0, 0}}, OS{PlatformLinux, OSAndroid, Version{2, 2, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 2.2.1; en-gb; HTC_DesireZ_A7272 Build/FRG83D) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
- UserAgent{
- Browser{BrowserAndroid, Version{4, 0, 0}}, OS{PlatformLinux, OSAndroid, Version{2, 2, 1}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 2.3.3; en-us; Sensation_4G Build/GRI40) AppleWebKit/533.1 (KHTML, like Gecko) Version/5.0 Safari/533.16",
- UserAgent{
- Browser{BrowserAndroid, Version{5, 0, 0}}, OS{PlatformLinux, OSAndroid, Version{2, 3, 3}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 2.3.5; ko-kr; SHW-M250S Build/GINGERBREAD) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
- UserAgent{
- Browser{BrowserAndroid, Version{4, 0, 0}}, OS{PlatformLinux, OSAndroid, Version{2, 3, 5}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 2.3.7; ja-jp; L-02D Build/GWK74) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
- UserAgent{
- Browser{BrowserAndroid, Version{4, 0, 0}}, OS{PlatformLinux, OSAndroid, Version{2, 3, 7}}, DevicePhone}},
-
- // TODO: is tablet, not phone
- {"Mozilla/5.0 (Linux; U; Android 3.0; xx-xx; Transformer TF101 Build/HRI66) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
- UserAgent{
- Browser{BrowserAndroid, Version{4, 0, 0}}, OS{PlatformLinux, OSAndroid, Version{3, 0, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
- UserAgent{
- Browser{BrowserAndroid, Version{4, 0, 0}}, OS{PlatformLinux, OSAndroid, Version{3, 0, 0}}, DeviceTablet}},
-
- {"Mozilla/5.0 (Linux; U; Android 4.0.1; en-us; sdk Build/ICS_MR0) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30",
- UserAgent{
- Browser{BrowserAndroid, Version{4, 0, 0}}, OS{PlatformLinux, OSAndroid, Version{4, 0, 1}}, DevicePhone}},
-
- // TODO support "android-" version prefix
- // However, can't find reference to this naming scheme in real-world UA gathering
- // {"Mozilla/5.0 (Linux; U; Android-4.0.3; en-us; Galaxy Nexus Build/IML74K) AppleWebKit/535.7 (KHTML, like Gecko) CrMo/16.0.912.75 Mobile Safari/535.7",
- // UserAgent{
- // Browser{BrowserChrome, Version{16,0,0}}, OS{PlatformLinux, OSAndroid, Version{4,0,0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 4.1.1; en-us; Nexus S Build/JRO03E) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30",
- UserAgent{
- Browser{BrowserAndroid, Version{4, 0, 0}}, OS{PlatformLinux, OSAndroid, Version{4, 1, 1}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 4.1; en-gb; Build/JRN84D) AppleWebKit/534.30 (KHTML like Gecko) Version/4.0 Mobile Safari/534.30",
- UserAgent{
- Browser{BrowserAndroid, Version{4, 0, 0}}, OS{PlatformLinux, OSAndroid, Version{4, 1, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 4.1.1; el-gr; MB525 Build/JRO03H; CyanogenMod-10) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30",
- UserAgent{
- Browser{BrowserAndroid, Version{4, 0, 0}}, OS{PlatformLinux, OSAndroid, Version{4, 1, 1}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 4.1.1; fr-fr; MB525 Build/JRO03H; CyanogenMod-10) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30",
- UserAgent{
- Browser{BrowserAndroid, Version{4, 0, 0}}, OS{PlatformLinux, OSAndroid, Version{4, 1, 1}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; U; Android 4.2; en-us; Nexus 10 Build/JVP15I) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Safari/534.30",
- UserAgent{
- Browser{BrowserAndroid, Version{4, 0, 0}}, OS{PlatformLinux, OSAndroid, Version{4, 2, 0}}, DeviceTablet}},
-
- {"Mozilla/5.0 (Linux; U; Android 4.2; ro-ro; LT18i Build/4.1.B.0.431) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30",
- UserAgent{
- Browser{BrowserAndroid, Version{4, 0, 0}}, OS{PlatformLinux, OSAndroid, Version{4, 2, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; Android 4.3; Nexus 7 Build/JWR66D) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.111 Safari/537.36",
- UserAgent{
- Browser{BrowserChrome, Version{27, 0, 1453}}, OS{PlatformLinux, OSAndroid, Version{4, 3, 0}}, DeviceTablet}},
-
- {"Mozilla/5.0 (Linux; Android 4.4; Nexus 7 Build/KOT24) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.105 Safari/537.36",
- UserAgent{
- Browser{BrowserChrome, Version{30, 0, 1599}}, OS{PlatformLinux, OSAndroid, Version{4, 4, 0}}, DeviceTablet}},
-
- {"Mozilla/5.0 (Linux; Android 4.4; Nexus 4 Build/KRT16E) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.105 Mobile Safari",
- UserAgent{
- Browser{BrowserChrome, Version{30, 0, 1599}}, OS{PlatformLinux, OSAndroid, Version{4, 4, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; Android 6.0.1; SM-G930V Build/MMB29M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.98 Mobile Safari/537.36",
- UserAgent{
- Browser{BrowserChrome, Version{52, 0, 2743}}, OS{PlatformLinux, OSAndroid, Version{6, 0, 1}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; Android 7.0; Nexus 5X Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.98 Mobile Safari/537.36",
- UserAgent{
- Browser{BrowserChrome, Version{52, 0, 2743}}, OS{PlatformLinux, OSAndroid, Version{7, 0, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Linux; Android 7.0; Nexus 6P Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/52.0.2743.98 Mobile Safari/537.36",
- UserAgent{
- Browser{BrowserChrome, Version{52, 0, 2743}}, OS{PlatformLinux, OSAndroid, Version{7, 0, 0}}, DevicePhone}},
-
- // BLACKBERRY TESTS
-
- {"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0) BlackBerry8703e/4.1.0 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/104",
- UserAgent{
- Browser{BrowserBlackberry, Version{0, 0, 0}}, OS{PlatformBlackberry, OSBlackberry, Version{0, 0, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (BB10; Touch) AppleWebKit/537.10+ (KHTML, like Gecko) Version/10.1.0.4633 Mobile Safari/537.10+",
- UserAgent{
- Browser{BrowserBlackberry, Version{10, 1, 0}}, OS{PlatformBlackberry, OSBlackberry, Version{0, 0, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (BB10; Kbd) AppleWebKit/537.35+ (KHTML, like Gecko) Version/10.2.1.1925 Mobile Safari/537.35+",
- UserAgent{
- Browser{BrowserBlackberry, Version{10, 2, 1}}, OS{PlatformBlackberry, OSBlackberry, Version{0, 0, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (PlayBook; U; RIM Tablet OS 1.0.0; en-US) AppleWebKit/534.11 (KHTML, like Gecko) Version/7.1.0.7 Safari/534.11",
- UserAgent{
- Browser{BrowserBlackberry, Version{7, 1, 0}}, OS{PlatformBlackberry, OSBlackberry, Version{0, 0, 0}}, DeviceTablet}},
-
- {"Mozilla/5.0 (PlayBook; U; RIM Tablet OS 2.1.0; en-US) AppleWebKit/536.2+ (KHTML, like Gecko) Version/7.2.1.0 Safari/536.2+",
- UserAgent{
- Browser{BrowserBlackberry, Version{7, 2, 1}}, OS{PlatformBlackberry, OSBlackberry, Version{0, 0, 0}}, DeviceTablet}},
-
- {"Mozilla/5.0 (X11; U; CrOS i686 9.10.0; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.253.0 Safari/532.5",
- UserAgent{
- Browser{BrowserChrome, Version{4, 0, 253}}, OS{PlatformLinux, OSChromeOS, Version{0, 0, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (X11; CrOS armv7l 5500.100.6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.120 Safari/537.36",
- UserAgent{
- Browser{BrowserChrome, Version{34, 0, 1847}}, OS{PlatformLinux, OSChromeOS, Version{0, 0, 0}}, DeviceComputer}},
-
- // {"Mozilla/5.0 (Mobile; rv:14.0) Gecko/14.0 Firefox/14.0",
- // UserAgent{
- // Browser{BrowserFirefox, 14, OSFirefoxOS, 14}, DevicePhone}},
-
- // {"Mozilla/5.0 (Mobile; rv:17.0) Gecko/17.0 Firefox/17.0",
- // UserAgent{
- // Browser{BrowserFirefox, , OSFirefoxOS}, DevicePhone}},
-
- // {"Mozilla/5.0 (Mobile; rv:18.1) Gecko/18.1 Firefox/18.1",
- // UserAgent{
- // Browser{BrowserFirefox, , OSFirefoxOS}, DevicePhone}},
-
- // {"Mozilla/5.0 (Tablet; rv:18.1) Gecko/18.1 Firefox/18.1",
- // UserAgent{
- // Browser{BrowserFirefox, , OSFirefoxOS}, DevicePhone}},
-
- // {"Mozilla/5.0 (Mobile; LG-D300; rv:18.1) Gecko/18.1 Firefox/18.1",
- // UserAgent{
- // Browser{BrowserFirefox, , OSFirefoxOS}, DevicePhone}},
-
- {"Mozilla/5.0(iPad; U; CPU iPhone OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B314 Safari/531.21.10",
- UserAgent{
- Browser{BrowserSafari, Version{4, 0, 4}}, OS{PlatformiPad, OSiOS, Version{3, 2, 0}}, DeviceTablet}},
-
- {"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/6531.22.7",
- UserAgent{
- Browser{BrowserSafari, Version{4, 0, 5}}, OS{PlatformiPhone, OSiOS, Version{4, 0, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3",
- UserAgent{
- Browser{BrowserSafari, Version{5, 1, 0}}, OS{PlatformiPhone, OSiOS, Version{5, 0, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (iPad; CPU OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3",
- UserAgent{
- Browser{BrowserSafari, Version{5, 1, 0}}, OS{PlatformiPad, OSiOS, Version{5, 0, 0}}, DeviceTablet}},
-
- {"Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25",
- UserAgent{
- Browser{BrowserSafari, Version{6, 0, 0}}, OS{PlatformiPad, OSiOS, Version{6, 0, 0}}, DeviceTablet}},
-
- {"Mozilla/5.0 (iPhone; CPU iPhone OS 7_0 like Mac OS X) AppleWebKit/546.10 (KHTML, like Gecko) Version/6.0 Mobile/7E18WD Safari/8536.25",
- UserAgent{
- Browser{BrowserSafari, Version{6, 0, 0}}, OS{PlatformiPhone, OSiOS, Version{7, 0, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (iPad; CPU OS 7_0 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11A465 Safari/9537.53",
- UserAgent{
- Browser{BrowserSafari, Version{7, 0, 0}}, OS{PlatformiPad, OSiOS, Version{7, 0, 0}}, DeviceTablet}},
-
- {"Mozilla/5.0 (iPad; CPU OS 7_0_2 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11A501 Safari/9537.53",
- UserAgent{
- Browser{BrowserSafari, Version{7, 0, 0}}, OS{PlatformiPad, OSiOS, Version{7, 0, 2}}, DeviceTablet}},
-
- {"Mozilla/5.0 (iPhone; CPU iPhone OS 10_2_1 like Mac OS X) AppleWebKit/602.4.6 (KHTML, like Gecko) Mobile/14D27 [FBAN/FBIOS;FBAV/86.0.0.48.52;FBBV/53842252;FBDV/iPhone9,1;FBMD/iPhone;FBSN/iOS;FBSV/10.2.1;FBSS/2;FBCR/Verizon;FBID/phone;FBLC/en_US;FBOP/5;FBRV/0]",
- UserAgent{
- Browser{BrowserSafari, Version{10, 2, 1}}, OS{PlatformiPhone, OSiOS, Version{10, 2, 1}}, DevicePhone}},
-
- // TODO handle default browser based on iOS version
- // {"Mozilla/5.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/538.34.9 (KHTML, like Gecko) Mobile/12A4265u",
- // UserAgent{
- // Browser{BrowserSafari, Version{8,0,0}}, OS{PlatformiPhone, OSiOS, Version{8,0,0}}, DevicePhone}},
-
- // TODO extrapolate browser from iOS version
- // {"Mozilla/5.0 (iPad; CPU OS 8_0 like Mac OS X) AppleWebKit/538.34.9 (KHTML, like Gecko) Mobile/12A4265u",
- // UserAgent{
- // Browser{BrowserSafari, Version{8,0,0}}, OS{PlatformiPad, OSiOS, Version{8,0,0}}, DeviceTablet}},
-
- {"Mozilla/5.0 (iPhone; CPU iPhone OS 8_0_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12A405 Safari/600.1.4",
- UserAgent{
- Browser{BrowserSafari, Version{8, 0, 0}}, OS{PlatformiPhone, OSiOS, Version{8, 0, 2}}, DevicePhone}},
-
- {"Mozilla/5.0 (X11; U; Linux x86_64; en; rv:1.9.0.14) Gecko/20080528 Ubuntu/9.10 (karmic) Epiphany/2.22 Firefox/3.0",
- UserAgent{
- Browser{BrowserFirefox, Version{3, 0, 0}}, OS{PlatformLinux, OSLinux, Version{0, 0, 0}}, DeviceComputer}},
-
- // Can't parse browser due to limitation of user agent library
- {"Mozilla/5.0 (X11; U; Linux x86_64; zh-TW; rv:1.9.0.8) Gecko/2009032712 Ubuntu/8.04 (hardy) Firefox/3.0.8 GTB5",
- UserAgent{
- Browser{BrowserFirefox, Version{3, 0, 8}}, OS{PlatformLinux, OSLinux, Version{0, 0, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (compatible; Konqueror/3.5; Linux; x86_64) KHTML/3.5.5 (like Gecko) (Debian)",
- UserAgent{
- Browser{BrowserUnknown, Version{0, 0, 0}}, OS{PlatformLinux, OSLinux, Version{0, 0, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (X11; U; Linux i686; de; rv:1.9.1.5) Gecko/20091112 Iceweasel/3.5.5 (like Firefox/3.5.5; Debian-3.5.5-1)",
- UserAgent{
- Browser{BrowserFirefox, Version{3, 5, 5}}, OS{PlatformLinux, OSLinux, Version{0, 0, 0}}, DeviceComputer}},
-
- // TODO consider bot?
- // {"Miro/2.0.4 (http://www.getmiro.com/; Darwin 10.3.0 i386)",
- // UserAgent{
- // Browser{BrowserUnknown, Version{0,0,0}}, OS{PlatformMac, OSMacOSX, Version{3,0,0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.4; en-US; rv:1.9.1b3pre) Gecko/20090223 SeaMonkey/2.0a3",
- UserAgent{
- Browser{BrowserFirefox, Version{0, 0, 0}}, OS{PlatformMac, OSMacOSX, Version{10, 4, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_5; en-us) AppleWebKit/525.26.2 (KHTML, like Gecko) Version/3.2 Safari/525.26.12",
- UserAgent{
- Browser{BrowserSafari, Version{3, 2, 0}}, OS{PlatformMac, OSMacOSX, Version{10, 5, 5}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en; rv:1.9.0.8pre) Gecko/2009022800 Camino/2.0b3pre",
- UserAgent{
- Browser{BrowserUnknown, Version{0, 0, 0}}, OS{PlatformMac, OSMacOSX, Version{10, 5, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_2; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Chrome/5.0.329.0 Safari/533.1",
- UserAgent{
- Browser{BrowserChrome, Version{5, 0, 329}}, OS{PlatformMac, OSMacOSX, Version{10, 6, 2}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 (.NET CLR 3.5.30729)",
- UserAgent{
- Browser{BrowserFirefox, Version{3, 5, 6}}, OS{PlatformMac, OSMacOSX, Version{10, 6, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.52.7 (KHTML, like Gecko) Version/5.1.2 Safari/534.52.7",
- UserAgent{
- Browser{BrowserSafari, Version{5, 1, 2}}, OS{PlatformMac, OSMacOSX, Version{10, 7, 2}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:9.0) Gecko/20111222 Thunderbird/9.0.1",
- UserAgent{
- Browser{BrowserUnknown, Version{0, 0, 0}}, OS{PlatformMac, OSMacOSX, Version{10, 7, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.75 Safari/535.7",
- UserAgent{
- Browser{BrowserChrome, Version{16, 0, 912}}, OS{PlatformMac, OSMacOSX, Version{10, 7, 2}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8) AppleWebKit/535.18.5 (KHTML, like Gecko) Version/5.2 Safari/535.18.5",
- UserAgent{
- Browser{BrowserSafari, Version{5, 2, 0}}, OS{PlatformMac, OSMacOSX, Version{10, 8, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_8; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.5",
- UserAgent{
- Browser{BrowserChrome, Version{4, 0, 249}}, OS{PlatformMac, OSMacOSX, Version{10, 8, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9) AppleWebKit/537.35.1 (KHTML, like Gecko) Version/6.1 Safari/537.35.1",
- UserAgent{
- Browser{BrowserSafari, Version{6, 1, 0}}, OS{PlatformMac, OSMacOSX, Version{10, 9, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10) AppleWebKit/538.34.48 (KHTML, like Gecko) Version/8.0 Safari/538.35.8",
- UserAgent{
- Browser{BrowserSafari, Version{8, 0, 0}}, OS{PlatformMac, OSMacOSX, Version{10, 10, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10) AppleWebKit/538.32 (KHTML, like Gecko) Version/7.1 Safari/538.4",
- UserAgent{
- Browser{BrowserSafari, Version{7, 1, 0}}, OS{PlatformMac, OSMacOSX, Version{10, 10, 0}}, DeviceComputer}},
-
- {"Opera/9.80 (S60; SymbOS; Opera Mobi/352; U; de) Presto/2.4.15 Version/10.00",
- UserAgent{
- Browser{BrowserOpera, Version{10, 0, 0}}, OS{PlatformUnknown, OSUnknown, Version{0, 0, 0}}, DevicePhone}},
-
- {"Opera/9.80 (S60; SymbOS; Opera Mobi/352; U; de) Presto/2.4.15 Version/10.00",
- UserAgent{
- Browser{BrowserOpera, Version{10, 0, 0}}, OS{PlatformUnknown, OSUnknown, Version{0, 0, 0}}, DevicePhone}},
-
- // TODO: support OneBrowser? https://play.google.com/store/apps/details?id=com.tencent.ibibo.mtt&hl=en_GB
- // {"OneBrowser/3.1 (NokiaN70-1/5.0638.3.0.1)",
- // UserAgent{
- // Browser{BrowserUnknown, Version{0,0,0}}, OS{PlatformUnknown, OSUnknown, Version{0,0,0}}, DevicePhone}},
-
- // WebOS reports itself as safari :(
- {"Mozilla/5.0 (webOS/1.0; U; en-US) AppleWebKit/525.27.1 (KHTML, like Gecko) Version/1.0 Safari/525.27.1 Pre/1.0",
- UserAgent{
- Browser{BrowserUnknown, Version{1, 0, 0}}, OS{PlatformLinux, OSWebOS, Version{0, 0, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (webOS/1.4.1.1; U; en-US) AppleWebKit/532.2 (KHTML, like Gecko) Version/1.0 Safari/532.2 Pre/1.0",
- UserAgent{
- Browser{BrowserUnknown, Version{1, 0, 0}}, OS{PlatformLinux, OSWebOS, Version{0, 0, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; de-DE) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0",
- UserAgent{
- Browser{BrowserUnknown, Version{0, 0, 0}}, OS{PlatformLinux, OSWebOS, Version{0, 0, 0}}, DeviceTablet}},
-
- {"Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.2; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/234.40.1 Safari/534.6 TouchPad/1.0",
- UserAgent{
- Browser{BrowserUnknown, Version{0, 0, 0}}, OS{PlatformLinux, OSWebOS, Version{0, 0, 0}}, DeviceTablet}},
-
- {"Opera/9.30 (Nintendo Wii; U; ; 2047-7; fr)",
- UserAgent{
- Browser{BrowserOpera, Version{9, 30, 0}}, OS{PlatformNintendo, OSNintendo, Version{0, 0, 0}}, DeviceConsole}},
-
- {"Mozilla/5.0 (Nintendo WiiU) AppleWebKit/534.52 (KHTML, like Gecko) NX/2.1.0.8.21 NintendoBrowser/1.0.0.7494.US",
- UserAgent{
- Browser{BrowserUnknown, Version{0, 0, 0}}, OS{PlatformNintendo, OSNintendo, Version{0, 0, 0}}, DeviceConsole}},
-
- {"Mozilla/5.0 (Nintendo WiiU) AppleWebKit/536.28 (KHTML, like Gecko) NX/3.0.3.12.6 NintendoBrowser/2.0.0.9362.US",
- UserAgent{
- Browser{BrowserUnknown, Version{0, 0, 0}}, OS{PlatformNintendo, OSNintendo, Version{0, 0, 0}}, DeviceConsole}},
-
- // TODO fails to get opera first -- but is this a real UA string or an uncommon spoof?
- // {"Mozilla/4.0 (compatible; MSIE 5.0; Windows 2000) Opera 6.0 [en]",
- // BrowserIE, Version{5,0,0}}, OS{PlatformWindows, OSWindows, Version{4,0,0}}, DeviceComputer}},
-
- {"Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0; SV1; .NET CLR 1.1.4322; .NET CLR 1.0.3705; .NET CLR 2.0.50727)",
- UserAgent{
- Browser{BrowserIE, Version{5, 0, 1}}, OS{PlatformWindows, OSWindows, Version{5, 0, 0}}, DeviceComputer}},
-
- {"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/4.0; GTB6.4; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.3; OfficeLivePatch.0.0; .NET CLR 1.1.4322)",
- UserAgent{
- Browser{BrowserIE, Version{7, 0, 0}}, OS{PlatformWindows, OSWindows, Version{6, 1, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Windows; U; Windows NT 6.1; sk; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7",
- UserAgent{
- Browser{BrowserFirefox, Version{3, 5, 7}}, OS{PlatformWindows, OSWindows, Version{6, 1, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)",
- UserAgent{
- Browser{BrowserIE, Version{10, 0, 0}}, OS{PlatformWindows, OSWindows, Version{6, 2, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/536.5 (KHTML, like Gecko) YaBrowser/1.0.1084.5402 Chrome/19.0.1084.5402 Safari/536.5",
- UserAgent{
- Browser{BrowserChrome, Version{19, 0, 1084}}, OS{PlatformWindows, OSWindows, Version{6, 2, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.15 (KHTML, like Gecko) Chrome/24.0.1295.0 Safari/537.15",
- UserAgent{
- Browser{BrowserChrome, Version{24, 0, 1295}}, OS{PlatformWindows, OSWindows, Version{6, 2, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; Touch; rv:11.0) like Gecko",
- UserAgent{
- Browser{BrowserIE, Version{11, 0, 0}}, OS{PlatformWindows, OSWindows, Version{6, 3, 0}}, DeviceTablet}},
-
- {"Mozilla/5.0 (IE 11.0; Windows NT 6.3; Trident/7.0; .NET4.0E; .NET4.0C; rv:11.0) like Gecko",
- UserAgent{
- Browser{BrowserIE, Version{11, 0, 0}}, OS{PlatformWindows, OSWindows, Version{6, 3, 0}}, DeviceComputer}},
-
- // {"Mozilla/4.0 (compatible; MSIE 4.01; Windows 95)",
- // UserAgent{
- // Browser{BrowserIE, Version{5,0,0}}, OS{PlatformWindows, OSWindows95, Version{5,0,0}}, DeviceComputer}},
-
- // {"Mozilla/4.0 (compatible; MSIE 5.0; Windows 95) Opera 6.02 [en]",
- // UserAgent{
- // Browser{BrowserIE, Version{5,0,0}}, OS{PlatformWindows, OSWindows95, Version{5,0,0}}, DeviceComputer}},
-
- // {"Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98; YComp 5.0.0.0)",
- // UserAgent{
- // Browser{BrowserIE, Version{6,0,0}}, OS{PlatformWindows, OSWindows98, Version{5,0,0}}, DeviceComputer}},
-
- // {"Mozilla/4.0 (compatible; MSIE 4.01; Windows 98)",
- // UserAgent{
- // Browser{BrowserIE, Version{4,0,0}}, OS{PlatformWindows, OSWindows98, Version{5,0,0}}, DeviceComputer}},
-
- // {"Mozilla/5.0 (Windows; U; Windows 98; en-US; rv:1.8.1.8pre) Gecko/20071019 Firefox/2.0.0.8 Navigator/9.0.0.1",
- // UserAgent{
- // Browser{BrowserFirefox, Version{2,0,0}}, OS{PlatformWindows, OSWindows98, Version{5,0,0}}, DeviceComputer}},
-
- //Can't parse due to limitation of user agent library
- // {"Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016",
- // UserAgent{
- // Browser{ BrowserUnknown, Version{0,0,0}}, OS{PlatformWindowsPhone, OSWindowsPhone, Version{0,0,0}}, DevicePhone}},
-
- // {"Mozilla/4.0 (compatible; MSIE 4.01; Windows CE; 176x220)",
- // UserAgent{
- // Browser{BrowserIE, Version{4,0,0}}, OS{PlatformWindowsPhone, OSWindowsPhone, Version{0,0,0}}, DevicePhone}},
-
- // Can't parse browser due to limitation of user agent library
- // {"Mozilla/4.0 (compatible; MSIE 5.0; Windows ME) Opera 6.0 [de]",
- // UserAgent{
- // Browser{BrowserUnknown, OSWindowsME}, DeviceComputer}},
-
- {"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; InfoPath.2; .NET CLR 3.5.21022; .NET CLR 3.5.30729; MS-RTC LM 8; OfficeLiveConnector.1.4; OfficeLivePatch.1.3; .NET CLR 3.0.30729)",
- UserAgent{
- Browser{BrowserIE, Version{8, 0, 0}}, OS{PlatformWindows, OSWindows, Version{6, 0, 0}}, DeviceComputer}},
-
- {"Mozilla/5.0 (Windows; U; Windows NT 5.1; cs; rv:1.9.1.8) Gecko/20100202 Firefox/3.5.8",
- UserAgent{
- Browser{BrowserFirefox, Version{3, 5, 8}}, OS{PlatformWindows, OSWindows, Version{5, 1, 0}}, DeviceComputer}},
-
- {"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; )",
- UserAgent{
- Browser{BrowserIE, Version{7, 0, 0}}, OS{PlatformWindows, OSWindows, Version{5, 1, 0}}, DeviceComputer}},
-
- // Can't parse due to limitation of user agent library
- {"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Windows Phone 6.5.3.5)",
- UserAgent{
- Browser{BrowserIE, Version{6, 0, 0}}, OS{PlatformWindowsPhone, OSWindowsPhone, Version{6, 5, 3}}, DevicePhone}},
-
- // desktop mode for Windows Phone 7
- {"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; XBLWP7; ZuneWP7)",
- UserAgent{
- Browser{BrowserIE, Version{7, 0, 0}}, OS{PlatformWindows, OSWindows, Version{6, 1, 0}}, DeviceComputer}},
-
- // mobile mode for Windows Phone 7
- {"Mozilla/4.0 (compatible; MSIE 7.0; Windows Phone OS 7.0; Trident/3.1; IEMobile/7.0; HTC; T8788)",
- UserAgent{
- Browser{BrowserIE, Version{7, 0, 0}}, OS{PlatformWindowsPhone, OSWindowsPhone, Version{7, 0, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0)",
- UserAgent{
- Browser{BrowserIE, Version{9, 0, 0}}, OS{PlatformWindowsPhone, OSWindowsPhone, Version{7, 5, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (compatible; MSIE 10.0; Windows Phone 8.0; Trident/6.0; IEMobile/10.0; ARM; Touch; NOKIA; Lumia 920)",
- UserAgent{
- Browser{BrowserIE, Version{10, 0, 0}}, OS{PlatformWindowsPhone, OSWindowsPhone, Version{8, 0, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Windows Phone 8.1; ARM; Trident/7.0; Touch IEMobile/11.0; HTC; Windows Phone 8S by HTC) like Gecko",
- UserAgent{
- Browser{BrowserIE, Version{11, 0, 0}}, OS{PlatformWindowsPhone, OSWindowsPhone, Version{8, 1, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (Windows Phone 8.1; ARM; Trident/7.0; Touch IEMobile/11.0; NOKIA; 909) like Gecko",
- UserAgent{
- Browser{BrowserIE, Version{11, 0, 0}}, OS{PlatformWindowsPhone, OSWindowsPhone, Version{8, 1, 0}}, DevicePhone}},
-
- {"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; Xbox)",
- UserAgent{
- Browser{BrowserIE, Version{9, 0, 0}}, OS{PlatformXbox, OSXbox, Version{6, 1, 0}}, DeviceConsole}},
-}
-
-func TestAgentSurfer(t *testing.T) {
- for _, determined := range testUAVars {
- t.Run("", func(t *testing.T) {
- testFuncs := []func(string) *UserAgent{
- Parse,
- func(ua string) *UserAgent {
- u := new(UserAgent)
- ParseUserAgent(ua, u)
- return u
- },
- }
-
- for _, f := range testFuncs {
- ua := f(determined.UA)
-
- if ua.Browser.Name != determined.Browser.Name {
- t.Errorf("browserName: got %v, wanted %v", ua.Browser.Name, determined.Browser.Name)
- t.Logf("agent: %s", determined.UA)
- }
-
- if ua.Browser.Version != determined.Browser.Version {
- t.Errorf("browser version: got %d, wanted %d", ua.Browser.Version, determined.Browser.Version)
- t.Logf("agent: %s", determined.UA)
- }
-
- if ua.OS.Platform != determined.OS.Platform {
- t.Errorf("platform: got %v, wanted %v", ua.OS.Platform, determined.OS.Platform)
- t.Logf("agent: %s", determined.UA)
- }
-
- if ua.OS.Name != determined.OS.Name {
- t.Errorf("os: got %s, wanted %s", ua.OS.Name, determined.OS.Name)
- t.Logf("agent: %s", determined.UA)
- }
-
- if ua.OS.Version != determined.OS.Version {
- t.Errorf("os version: got %d, wanted %d", ua.OS.Version, determined.OS.Version)
- t.Logf("agent: %s", determined.UA)
- }
-
- if ua.DeviceType != determined.DeviceType {
- t.Errorf("device type: got %v, wanted %v", ua.DeviceType, determined.DeviceType)
- t.Logf("agent: %s", determined.UA)
- }
- }
- })
- }
-}
-
-func BenchmarkAgentSurfer(b *testing.B) {
- num := len(testUAVars)
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- Parse(testUAVars[i%num].UA)
- }
-}
-
-func BenchmarkAgentSurferReuse(b *testing.B) {
- dest := new(UserAgent)
- num := len(testUAVars)
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- dest.Reset()
- ParseUserAgent(testUAVars[i%num].UA, dest)
- }
-}
-
-func BenchmarkEvalSystem(b *testing.B) {
- num := len(testUAVars)
- v := UserAgent{}
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.evalOS(testUAVars[i%num].UA)
- }
-}
-
-func BenchmarkEvalBrowserName(b *testing.B) {
- num := len(testUAVars)
- v := UserAgent{}
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.evalBrowserName(testUAVars[i%num].UA)
- }
-}
-
-func BenchmarkEvalBrowserVersion(b *testing.B) {
- num := len(testUAVars)
- v := UserAgent{}
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.Browser.Name = testUAVars[i%num].Browser.Name
- v.evalBrowserVersion(testUAVars[i%num].UA)
- }
-}
-
-func BenchmarkEvalDevice(b *testing.B) {
- num := len(testUAVars)
- v := UserAgent{}
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- v.OS.Name = testUAVars[i%num].OS.Name
- v.OS.Platform = testUAVars[i%num].OS.Platform
- v.Browser.Name = testUAVars[i%num].Browser.Name
- v.evalDevice(testUAVars[i%num].UA)
- }
-}
-
-// Chrome for Mac
-func BenchmarkParseChromeMac(b *testing.B) {
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- Parse("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.130 Safari/537.36")
- }
-}
-
-// Chrome for Windows
-func BenchmarkParseChromeWin(b *testing.B) {
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- Parse("Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36")
- }
-}
-
-// Chrome for Android
-func BenchmarkParseChromeAndroid(b *testing.B) {
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- Parse("Mozilla/5.0 (Linux; Android 4.4.2; GT-P5210 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.93 Safari/537.36")
- }
-}
-
-// Safari for Mac
-func BenchmarkParseSafariMac(b *testing.B) {
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- Parse("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_4) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/8.0.7 Safari/600.7.12")
- }
-}
-
-// Safari for iPad
-func BenchmarkParseSafariiPad(b *testing.B) {
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- Parse("Mozilla/5.0 (iPad; CPU OS 8_1_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12B440 Safari/600.1.4")
- }
-}
diff --git a/vendor/github.com/cpanato/html2text/.travis.yml b/vendor/github.com/cpanato/html2text/.travis.yml
deleted file mode 100644
index 6c7f48efd..000000000
--- a/vendor/github.com/cpanato/html2text/.travis.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-language: go
-go:
- - tip
- - 1.8
- - 1.7
- - 1.6
- - 1.5
- - 1.4
- - 1.3
- - 1.2
-notifications:
- email:
- on_success: change
- on_failure: always
diff --git a/vendor/github.com/cpanato/html2text/README.md b/vendor/github.com/cpanato/html2text/README.md
deleted file mode 100644
index 57abf3ff3..000000000
--- a/vendor/github.com/cpanato/html2text/README.md
+++ /dev/null
@@ -1,108 +0,0 @@
-# html2text
-
-[![Documentation](https://godoc.org/github.com/cpanato/html2text?status.svg)](https://godoc.org/github.com/cpanato/html2text)
-[![Build Status](https://travis-ci.org/cpanato/html2text.svg?branch=master)](https://travis-ci.org/cpanato/html2text)
-[![Report Card](https://goreportcard.com/badge/github.com/jaytaylor/html2text)](https://goreportcard.com/report/github.com/cpanato/html2text)
-
-### Initial information
- This project was forked from [github.com/jaytaylor/html2text](https://github.com/jaytaylor/html2text) in order to use another clean bom library due the original one has no license.
-
-
-### Converts HTML into text
-
-
-## Introduction
-
-Ensure your emails are readable by all!
-
-Turns HTML into raw text, useful for sending fancy HTML emails with a equivalently nicely formatted TXT document as a fallback (e.g. for people who don't allow HTML emails or have other display issues).
-
-html2text is a simple golang package for rendering HTML into plaintext.
-
-There are still lots of improvements to be had, but FWIW this has worked fine for my [basic] HTML-2-text needs.
-
-It requires go 1.x or newer ;)
-
-
-## Download the package
-
-```bash
-go get github.com/cpanato/html2text
-```
-
-## Example usage
-
-```go
-package main
-
-import (
- "fmt"
-
- "github.com/cpanato/html2text"
-)
-
-func main() {
- inputHtml := `
- <html>
- <head>
- <title>My Mega Service</title>
- <link rel=\"stylesheet\" href=\"main.css\">
- <style type=\"text/css\">body { color: #fff; }</style>
- </head>
-
- <body>
- <div class="logo">
- <a href="http://mymegaservice.com/"><img src="/logo-image.jpg" alt="Mega Service"/></a>
- </div>
-
- <h1>Welcome to your new account on my service!</h1>
-
- <p>
- Here is some more information:
-
- <ul>
- <li>Link 1: <a href="https://example.com">Example.com</a></li>
- <li>Link 2: <a href="https://example2.com">Example2.com</a></li>
- <li>Something else</li>
- </ul>
- </p>
- </body>
- </html>
- `
-
- text, err := html2text.FromString(inputHtml)
- if err != nil {
- panic(err)
- }
- fmt.Println(text)
-}
-```
-
-Output:
-```
-Mega Service ( http://mymegaservice.com/ )
-
-******************************************
-Welcome to your new account on my service!
-******************************************
-
-Here is some more information:
-
-* Link 1: Example.com ( https://example.com )
-* Link 2: Example2.com ( https://example2.com )
-* Something else
-```
-
-
-## Unit-tests
-
-Running the unit-tests is straightforward and standard:
-
-```bash
-go test
-```
-
-
-# License
-
-Permissive MIT license.
diff --git a/vendor/github.com/cpanato/html2text/html2text.go b/vendor/github.com/cpanato/html2text/html2text.go
deleted file mode 100644
index 61774e8a0..000000000
--- a/vendor/github.com/cpanato/html2text/html2text.go
+++ /dev/null
@@ -1,312 +0,0 @@
-package html2text
-
-import (
- "bytes"
- "io"
- "io/ioutil"
- "regexp"
- "strings"
- "unicode"
-
- "github.com/dimchansky/utfbom"
-
- "golang.org/x/net/html"
- "golang.org/x/net/html/atom"
-)
-
-var (
- spacingRe = regexp.MustCompile(`[ \r\n\t]+`)
- newlineRe = regexp.MustCompile(`\n\n+`)
-)
-
-type textifyTraverseCtx struct {
- Buf bytes.Buffer
-
- prefix string
- blockquoteLevel int
- lineLength int
- endsWithSpace bool
- endsWithNewline bool
- justClosedDiv bool
-}
-
-func (ctx *textifyTraverseCtx) traverse(node *html.Node) error {
- switch node.Type {
- default:
- return ctx.traverseChildren(node)
-
- case html.TextNode:
- data := strings.Trim(spacingRe.ReplaceAllString(node.Data, " "), " ")
- return ctx.emit(data)
-
- case html.ElementNode:
- return ctx.handleElementNode(node)
- }
-}
-
-func (ctx *textifyTraverseCtx) handleElementNode(node *html.Node) error {
- ctx.justClosedDiv = false
- switch node.DataAtom {
- case atom.Br:
- return ctx.emit("\n")
-
- case atom.H1, atom.H2, atom.H3:
- subCtx := textifyTraverseCtx{}
- if err := subCtx.traverseChildren(node); err != nil {
- return err
- }
-
- str := subCtx.Buf.String()
- dividerLen := 0
- for _, line := range strings.Split(str, "\n") {
- if lineLen := len([]rune(line)); lineLen-1 > dividerLen {
- dividerLen = lineLen - 1
- }
- }
- divider := ""
- if node.DataAtom == atom.H1 {
- divider = strings.Repeat("*", dividerLen)
- } else {
- divider = strings.Repeat("-", dividerLen)
- }
-
- if node.DataAtom == atom.H3 {
- return ctx.emit("\n\n" + str + "\n" + divider + "\n\n")
- }
- return ctx.emit("\n\n" + divider + "\n" + str + "\n" + divider + "\n\n")
-
- case atom.Blockquote:
- ctx.blockquoteLevel++
- ctx.prefix = strings.Repeat(">", ctx.blockquoteLevel) + " "
- if err := ctx.emit("\n"); err != nil {
- return err
- }
- if ctx.blockquoteLevel == 1 {
- if err := ctx.emit("\n"); err != nil {
- return err
- }
- }
- if err := ctx.traverseChildren(node); err != nil {
- return err
- }
- ctx.blockquoteLevel--
- ctx.prefix = strings.Repeat(">", ctx.blockquoteLevel)
- if ctx.blockquoteLevel > 0 {
- ctx.prefix += " "
- }
- return ctx.emit("\n\n")
-
- case atom.Div:
- if ctx.lineLength > 0 {
- if err := ctx.emit("\n"); err != nil {
- return err
- }
- }
- if err := ctx.traverseChildren(node); err != nil {
- return err
- }
- var err error
- if ctx.justClosedDiv == false {
- err = ctx.emit("\n")
- }
- ctx.justClosedDiv = true
- return err
-
- case atom.Li:
- if err := ctx.emit("* "); err != nil {
- return err
- }
-
- if err := ctx.traverseChildren(node); err != nil {
- return err
- }
-
- return ctx.emit("\n")
-
- case atom.B, atom.Strong:
- subCtx := textifyTraverseCtx{}
- subCtx.endsWithSpace = true
- if err := subCtx.traverseChildren(node); err != nil {
- return err
- }
- str := subCtx.Buf.String()
- return ctx.emit("*" + str + "*")
-
- case atom.A:
- // If image is the only child, take its alt text as the link text
- if img := node.FirstChild; img != nil && node.LastChild == img && img.DataAtom == atom.Img {
- if altText := getAttrVal(img, "alt"); altText != "" {
- ctx.emit(altText)
- }
- } else if err := ctx.traverseChildren(node); err != nil {
- return err
- }
-
- hrefLink := ""
- if attrVal := getAttrVal(node, "href"); attrVal != "" {
- attrVal = ctx.normalizeHrefLink(attrVal)
- if attrVal != "" {
- hrefLink = "( " + attrVal + " )"
- }
- }
-
- return ctx.emit(hrefLink)
-
- case atom.P, atom.Ul, atom.Table:
- if err := ctx.emit("\n\n"); err != nil {
- return err
- }
-
- if err := ctx.traverseChildren(node); err != nil {
- return err
- }
-
- return ctx.emit("\n\n")
-
- case atom.Tr:
- if err := ctx.traverseChildren(node); err != nil {
- return err
- }
-
- return ctx.emit("\n")
-
- case atom.Style, atom.Script, atom.Head:
- // Ignore the subtree
- return nil
-
- default:
- return ctx.traverseChildren(node)
- }
-}
-func (ctx *textifyTraverseCtx) traverseChildren(node *html.Node) error {
- for c := node.FirstChild; c != nil; c = c.NextSibling {
- if err := ctx.traverse(c); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (ctx *textifyTraverseCtx) emit(data string) error {
- if len(data) == 0 {
- return nil
- }
- lines := ctx.breakLongLines(data)
- var err error
- for _, line := range lines {
- runes := []rune(line)
- startsWithSpace := unicode.IsSpace(runes[0])
- if !startsWithSpace && !ctx.endsWithSpace {
- ctx.Buf.WriteByte(' ')
- ctx.lineLength++
- }
- ctx.endsWithSpace = unicode.IsSpace(runes[len(runes)-1])
- for _, c := range line {
- _, err = ctx.Buf.WriteString(string(c))
- if err != nil {
- return err
- }
- ctx.lineLength++
- if c == '\n' {
- ctx.lineLength = 0
- if ctx.prefix != "" {
- _, err = ctx.Buf.WriteString(ctx.prefix)
- if err != nil {
- return err
- }
- }
- }
- }
- }
- return nil
-}
-
-func (ctx *textifyTraverseCtx) breakLongLines(data string) []string {
- // only break lines when we are in blockquotes
- if ctx.blockquoteLevel == 0 {
- return []string{data}
- }
- var ret []string
- runes := []rune(data)
- l := len(runes)
- existing := ctx.lineLength
- if existing >= 74 {
- ret = append(ret, "\n")
- existing = 0
- }
- for l+existing > 74 {
- i := 74 - existing
- for i >= 0 && !unicode.IsSpace(runes[i]) {
- i--
- }
- if i == -1 {
- // no spaces, so go the other way
- i = 74 - existing
- for i < l && !unicode.IsSpace(runes[i]) {
- i++
- }
- }
- ret = append(ret, string(runes[:i])+"\n")
- for i < l && unicode.IsSpace(runes[i]) {
- i++
- }
- runes = runes[i:]
- l = len(runes)
- existing = 0
- }
- if len(runes) > 0 {
- ret = append(ret, string(runes))
- }
- return ret
-}
-
-func (ctx *textifyTraverseCtx) normalizeHrefLink(link string) string {
- link = strings.TrimSpace(link)
- link = strings.TrimPrefix(link, "mailto:")
- return link
-}
-
-func getAttrVal(node *html.Node, attrName string) string {
- for _, attr := range node.Attr {
- if attr.Key == attrName {
- return attr.Val
- }
- }
-
- return ""
-}
-
-func FromHtmlNode(doc *html.Node) (string, error) {
- ctx := textifyTraverseCtx{
- Buf: bytes.Buffer{},
- }
- if err := ctx.traverse(doc); err != nil {
- return "", err
- }
-
- text := strings.TrimSpace(newlineRe.ReplaceAllString(
- strings.Replace(ctx.Buf.String(), "\n ", "\n", -1), "\n\n"))
- return text, nil
-
-}
-
-func FromReader(reader io.Reader) (string, error) {
- bs, err := ioutil.ReadAll(reader)
- newReader, _ := utfbom.Skip(bytes.NewReader(bs))
-
- doc, err := html.Parse(newReader)
- if err != nil {
- return "", err
- }
- return FromHtmlNode(doc)
-}
-
-func FromString(input string) (string, error) {
- bs := utfbom.SkipOnly(bytes.NewReader([]byte(input)))
- text, err := FromReader(bs)
- if err != nil {
- return "", err
- }
- return text, nil
-}
diff --git a/vendor/github.com/cpanato/html2text/html2text_test.go b/vendor/github.com/cpanato/html2text/html2text_test.go
deleted file mode 100644
index b30d68ac9..000000000
--- a/vendor/github.com/cpanato/html2text/html2text_test.go
+++ /dev/null
@@ -1,674 +0,0 @@
-package html2text
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "path"
- "regexp"
- "strings"
- "testing"
-)
-
-const (
- destPath = "testdata"
-)
-
-func TestParseUTF8(t *testing.T) {
- htmlFiles := []struct {
- file string
- keywordShouldNotExist string
- keywordShouldExist string
- }{
- {
- "utf8.html",
- "学习之道:美国公认学习第一书title",
- "次世界冠军赛上,我几近疯狂",
- },
- {
- "utf8_with_bom.xhtml",
- "1892年波兰文版序言title",
- "种新的波兰文本已成为必要",
- },
- }
-
- for _, htmlFile := range htmlFiles {
- bs, err := ioutil.ReadFile(path.Join(destPath, htmlFile.file))
- if err != nil {
- t.Fatal(err)
- }
- text, err := FromReader(bytes.NewReader(bs))
- if err != nil {
- t.Fatal(err)
- }
- if !strings.Contains(text, htmlFile.keywordShouldExist) {
- t.Fatalf("keyword %s should exists in file %s", htmlFile.keywordShouldExist, htmlFile.file)
- }
- if strings.Contains(text, htmlFile.keywordShouldNotExist) {
- t.Fatalf("keyword %s should not exists in file %s", htmlFile.keywordShouldNotExist, htmlFile.file)
- }
- }
-}
-
-func TestStrippingWhitespace(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {
- "test text",
- "test text",
- },
- {
- " \ttext\ntext\n",
- "text text",
- },
- {
- " \na \n\t \n \n a \t",
- "a a",
- },
- {
- "test text",
- "test text",
- },
- {
- "test&nbsp;&nbsp;&nbsp; text&nbsp;",
- "test    text",
- },
- }
-
- for _, testCase := range testCases {
- assertString(t, testCase.input, testCase.output)
- }
-}
-
-func TestParagraphsAndBreaks(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {
- "Test text",
- "Test text",
- },
- {
- "Test text<br>",
- "Test text",
- },
- {
- "Test text<br>Test",
- "Test text\nTest",
- },
- {
- "<p>Test text</p>",
- "Test text",
- },
- {
- "<p>Test text</p><p>Test text</p>",
- "Test text\n\nTest text",
- },
- {
- "\n<p>Test text</p>\n\n\n\t<p>Test text</p>\n",
- "Test text\n\nTest text",
- },
- {
- "\n<p>Test text<br/>Test text</p>\n",
- "Test text\nTest text",
- },
- {
- "\n<p>Test text<br> \tTest text<br></p>\n",
- "Test text\nTest text",
- },
- {
- "Test text<br><BR />Test text",
- "Test text\n\nTest text",
- },
- }
-
- for _, testCase := range testCases {
- assertString(t, testCase.input, testCase.output)
- }
-}
-
-func TestTables(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {
- "<table><tr><td></td><td></td></tr></table>",
- "",
- },
- {
- "<table><tr><td>cell1</td><td>cell2</td></tr></table>",
- "cell1 cell2",
- },
- {
- "<table><tr><td>row1</td></tr><tr><td>row2</td></tr></table>",
- "row1\nrow2",
- },
- {
- `<table>
- <tr><td>cell1-1</td><td>cell1-2</td></tr>
- <tr><td>cell2-1</td><td>cell2-2</td></tr>
- </table>`,
- "cell1-1 cell1-2\ncell2-1 cell2-2",
- },
- {
- "_<table><tr><td>cell</td></tr></table>_",
- "_\n\ncell\n\n_",
- },
- }
-
- for _, testCase := range testCases {
- assertString(t, testCase.input, testCase.output)
- }
-}
-
-func TestStrippingLists(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {
- "<ul></ul>",
- "",
- },
- {
- "<ul><li>item</li></ul>_",
- "* item\n\n_",
- },
- {
- "<li class='123'>item 1</li> <li>item 2</li>\n_",
- "* item 1\n* item 2\n_",
- },
- {
- "<li>item 1</li> \t\n <li>item 2</li> <li> item 3</li>\n_",
- "* item 1\n* item 2\n* item 3\n_",
- },
- }
-
- for _, testCase := range testCases {
- assertString(t, testCase.input, testCase.output)
- }
-}
-
-func TestLinks(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {
- `<a></a>`,
- ``,
- },
- {
- `<a href=""></a>`,
- ``,
- },
- {
- `<a href="http://example.com/"></a>`,
- `( http://example.com/ )`,
- },
- {
- `<a href="">Link</a>`,
- `Link`,
- },
- {
- `<a href="http://example.com/">Link</a>`,
- `Link ( http://example.com/ )`,
- },
- {
- `<a href="http://example.com/"><span class="a">Link</span></a>`,
- `Link ( http://example.com/ )`,
- },
- {
- "<a href='http://example.com/'>\n\t<span class='a'>Link</span>\n\t</a>",
- `Link ( http://example.com/ )`,
- },
- {
- "<a href='mailto:contact@example.org'>Contact Us</a>",
- `Contact Us ( contact@example.org )`,
- },
- {
- "<a href=\"http://example.com:80/~user?aaa=bb&amp;c=d,e,f#foo\">Link</a>",
- `Link ( http://example.com:80/~user?aaa=bb&c=d,e,f#foo )`,
- },
- {
- "<a title='title' href=\"http://example.com/\">Link</a>",
- `Link ( http://example.com/ )`,
- },
- {
- "<a href=\" http://example.com/ \"> Link </a>",
- `Link ( http://example.com/ )`,
- },
- {
- "<a href=\"http://example.com/a/\">Link A</a> <a href=\"http://example.com/b/\">Link B</a>",
- `Link A ( http://example.com/a/ ) Link B ( http://example.com/b/ )`,
- },
- {
- "<a href=\"%%LINK%%\">Link</a>",
- `Link ( %%LINK%% )`,
- },
- {
- "<a href=\"[LINK]\">Link</a>",
- `Link ( [LINK] )`,
- },
- {
- "<a href=\"{LINK}\">Link</a>",
- `Link ( {LINK} )`,
- },
- {
- "<a href=\"[[!unsubscribe]]\">Link</a>",
- `Link ( [[!unsubscribe]] )`,
- },
- {
- "<p>This is <a href=\"http://www.google.com\" >link1</a> and <a href=\"http://www.google.com\" >link2 </a> is next.</p>",
- `This is link1 ( http://www.google.com ) and link2 ( http://www.google.com ) is next.`,
- },
- }
-
- for _, testCase := range testCases {
- assertString(t, testCase.input, testCase.output)
- }
-}
-
-func TestImageAltTags(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {
- `<img />`,
- ``,
- },
- {
- `<img src="http://example.ru/hello.jpg" />`,
- ``,
- },
- {
- `<img alt="Example"/>`,
- ``,
- },
- {
- `<img src="http://example.ru/hello.jpg" alt="Example"/>`,
- ``,
- },
- // Images do matter if they are in a link
- {
- `<a href="http://example.com/"><img src="http://example.ru/hello.jpg" alt="Example"/></a>`,
- `Example ( http://example.com/ )`,
- },
- {
- `<a href="http://example.com/"><img src="http://example.ru/hello.jpg" alt="Example"></a>`,
- `Example ( http://example.com/ )`,
- },
- {
- `<a href='http://example.com/'><img src='http://example.ru/hello.jpg' alt='Example'/></a>`,
- `Example ( http://example.com/ )`,
- },
- {
- `<a href='http://example.com/'><img src='http://example.ru/hello.jpg' alt='Example'></a>`,
- `Example ( http://example.com/ )`,
- },
- }
-
- for _, testCase := range testCases {
- assertString(t, testCase.input, testCase.output)
- }
-}
-
-func TestHeadings(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {
- "<h1>Test</h1>",
- "****\nTest\n****",
- },
- {
- "\t<h1>\nTest</h1> ",
- "****\nTest\n****",
- },
- {
- "\t<h1>\nTest line 1<br>Test 2</h1> ",
- "***********\nTest line 1\nTest 2\n***********",
- },
- {
- "<h1>Test</h1> <h1>Test</h1>",
- "****\nTest\n****\n\n****\nTest\n****",
- },
- {
- "<h2>Test</h2>",
- "----\nTest\n----",
- },
- {
- "<h1><a href='http://example.com/'>Test</a></h1>",
- "****************************\nTest ( http://example.com/ )\n****************************",
- },
- {
- "<h3> <span class='a'>Test </span></h3>",
- "Test\n----",
- },
- }
-
- for _, testCase := range testCases {
- assertString(t, testCase.input, testCase.output)
- }
-
-}
-
-func TestBold(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {
- "<b>Test</b>",
- "*Test*",
- },
- {
- "\t<b>Test</b> ",
- "*Test*",
- },
- {
- "\t<b>Test line 1<br>Test 2</b> ",
- "*Test line 1\nTest 2*",
- },
- {
- "<b>Test</b> <b>Test</b>",
- "*Test* *Test*",
- },
- }
-
- for _, testCase := range testCases {
- assertString(t, testCase.input, testCase.output)
- }
-
-}
-
-func TestDiv(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {
- "<div>Test</div>",
- "Test",
- },
- {
- "\t<div>Test</div> ",
- "Test",
- },
- {
- "<div>Test line 1<div>Test 2</div></div>",
- "Test line 1\nTest 2",
- },
- {
- "Test 1<div>Test 2</div> <div>Test 3</div>Test 4",
- "Test 1\nTest 2\nTest 3\nTest 4",
- },
- }
-
- for _, testCase := range testCases {
- assertString(t, testCase.input, testCase.output)
- }
-
-}
-
-func TestBlockquotes(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {
- "<div>level 0<blockquote>level 1<br><blockquote>level 2</blockquote>level 1</blockquote><div>level 0</div></div>",
- "level 0\n> \n> level 1\n> \n>> level 2\n> \n> level 1\n\nlevel 0",
- },
- {
- "<blockquote>Test</blockquote>Test",
- "> \n> Test\n\nTest",
- },
- {
- "\t<blockquote> \nTest<br></blockquote> ",
- "> \n> Test\n>",
- },
- {
- "\t<blockquote> \nTest line 1<br>Test 2</blockquote> ",
- "> \n> Test line 1\n> Test 2",
- },
- {
- "<blockquote>Test</blockquote> <blockquote>Test</blockquote> Other Test",
- "> \n> Test\n\n> \n> Test\n\nOther Test",
- },
- {
- "<blockquote>Lorem ipsum Commodo id consectetur pariatur ea occaecat minim aliqua ad sit consequat quis ex commodo Duis incididunt eu mollit consectetur fugiat voluptate dolore in pariatur in commodo occaecat Ut occaecat velit esse labore aute quis commodo non sit dolore officia Excepteur cillum amet cupidatat culpa velit labore ullamco dolore mollit elit in aliqua dolor irure do</blockquote>",
- "> \n> Lorem ipsum Commodo id consectetur pariatur ea occaecat minim aliqua ad\n> sit consequat quis ex commodo Duis incididunt eu mollit consectetur fugiat\n> voluptate dolore in pariatur in commodo occaecat Ut occaecat velit esse\n> labore aute quis commodo non sit dolore officia Excepteur cillum amet\n> cupidatat culpa velit labore ullamco dolore mollit elit in aliqua dolor\n> irure do",
- },
- {
- "<blockquote>Lorem<b>ipsum</b><b>Commodo</b><b>id</b><b>consectetur</b><b>pariatur</b><b>ea</b><b>occaecat</b><b>minim</b><b>aliqua</b><b>ad</b><b>sit</b><b>consequat</b><b>quis</b><b>ex</b><b>commodo</b><b>Duis</b><b>incididunt</b><b>eu</b><b>mollit</b><b>consectetur</b><b>fugiat</b><b>voluptate</b><b>dolore</b><b>in</b><b>pariatur</b><b>in</b><b>commodo</b><b>occaecat</b><b>Ut</b><b>occaecat</b><b>velit</b><b>esse</b><b>labore</b><b>aute</b><b>quis</b><b>commodo</b><b>non</b><b>sit</b><b>dolore</b><b>officia</b><b>Excepteur</b><b>cillum</b><b>amet</b><b>cupidatat</b><b>culpa</b><b>velit</b><b>labore</b><b>ullamco</b><b>dolore</b><b>mollit</b><b>elit</b><b>in</b><b>aliqua</b><b>dolor</b><b>irure</b><b>do</b></blockquote>",
- "> \n> Lorem *ipsum* *Commodo* *id* *consectetur* *pariatur* *ea* *occaecat* *minim*\n> *aliqua* *ad* *sit* *consequat* *quis* *ex* *commodo* *Duis* *incididunt* *eu*\n> *mollit* *consectetur* *fugiat* *voluptate* *dolore* *in* *pariatur* *in* *commodo*\n> *occaecat* *Ut* *occaecat* *velit* *esse* *labore* *aute* *quis* *commodo*\n> *non* *sit* *dolore* *officia* *Excepteur* *cillum* *amet* *cupidatat* *culpa*\n> *velit* *labore* *ullamco* *dolore* *mollit* *elit* *in* *aliqua* *dolor* *irure*\n> *do*",
- },
- }
-
- for _, testCase := range testCases {
- assertString(t, testCase.input, testCase.output)
- }
-
-}
-
-func TestIgnoreStylesScriptsHead(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {
- "<style>Test</style>",
- "",
- },
- {
- "<style type=\"text/css\">body { color: #fff; }</style>",
- "",
- },
- {
- "<link rel=\"stylesheet\" href=\"main.css\">",
- "",
- },
- {
- "<script>Test</script>",
- "",
- },
- {
- "<script src=\"main.js\"></script>",
- "",
- },
- {
- "<script type=\"text/javascript\" src=\"main.js\"></script>",
- "",
- },
- {
- "<script type=\"text/javascript\">Test</script>",
- "",
- },
- {
- "<script type=\"text/ng-template\" id=\"template.html\"><a href=\"http://google.com\">Google</a></script>",
- "",
- },
- {
- "<script type=\"bla-bla-bla\" id=\"template.html\">Test</script>",
- "",
- },
- {
- `<html><head><title>Title</title></head><body></body></html>`,
- "",
- },
- }
-
- for _, testCase := range testCases {
- assertString(t, testCase.input, testCase.output)
- }
-}
-
-func TestText(t *testing.T) {
- testCases := []struct {
- input string
- expr string
- }{
- {
- `<li>
- <a href="/new" data-ga-click="Header, create new repository, icon:repo"><span class="octicon octicon-repo"></span> New repository</a>
- </li>`,
- `\* New repository \( /new \)`,
- },
- {
- `hi
-
- <br>
-
- hello <a href="https://google.com">google</a>
- <br><br>
- test<p>List:</p>
-
- <ul>
- <li><a href="foo">Foo</a></li>
- <li><a href="http://www.microshwhat.com/bar/soapy">Barsoap</a></li>
- <li>Baz</li>
- </ul>
-`,
- `hi
-hello google \( https://google.com \)
-
-test
-
-List:
-
-\* Foo \( foo \)
-\* Barsoap \( http://www.microshwhat.com/bar/soapy \)
-\* Baz`,
- },
- // Malformed input html.
- {
- `hi
-
- hello <a href="https://google.com">google</a>
-
- test<p>List:</p>
-
- <ul>
- <li><a href="foo">Foo</a>
- <li><a href="/
- bar/baz">Bar</a>
- <li>Baz</li>
- </ul>
- `,
- `hi hello google \( https://google.com \) test
-
-List:
-
-\* Foo \( foo \)
-\* Bar \( /\n[ \t]+bar/baz \)
-\* Baz`,
- },
- }
-
- for _, testCase := range testCases {
- assertRegexp(t, testCase.input, testCase.expr)
- }
-}
-
-type StringMatcher interface {
- MatchString(string) bool
- String() string
-}
-
-type RegexpStringMatcher string
-
-func (m RegexpStringMatcher) MatchString(str string) bool {
- return regexp.MustCompile(string(m)).MatchString(str)
-}
-func (m RegexpStringMatcher) String() string {
- return string(m)
-}
-
-type ExactStringMatcher string
-
-func (m ExactStringMatcher) MatchString(str string) bool {
- return string(m) == str
-}
-func (m ExactStringMatcher) String() string {
- return string(m)
-}
-
-func assertRegexp(t *testing.T, input string, outputRE string) {
- assertPlaintext(t, input, RegexpStringMatcher(outputRE))
-}
-
-func assertString(t *testing.T, input string, output string) {
- assertPlaintext(t, input, ExactStringMatcher(output))
-}
-
-func assertPlaintext(t *testing.T, input string, matcher StringMatcher) {
- text, err := FromString(input)
- if err != nil {
- t.Error(err)
- }
- if !matcher.MatchString(text) {
- t.Errorf("Input did not match expression\n"+
- "Input:\n>>>>\n%s\n<<<<\n\n"+
- "Output:\n>>>>\n%s\n<<<<\n\n"+
- "Expected output:\n>>>>\n%s\n<<<<\n\n",
- input, text, matcher.String())
- } else {
- t.Logf("input:\n\n%s\n\n\n\noutput:\n\n%s\n", input, text)
- }
-}
-
-func Example() {
- inputHtml := `
- <html>
- <head>
- <title>My Mega Service</title>
- <link rel=\"stylesheet\" href=\"main.css\">
- <style type=\"text/css\">body { color: #fff; }</style>
- </head>
-
- <body>
- <div class="logo">
- <a href="http://mymegaservice.com/"><img src="/logo-image.jpg" alt="Mega Service"/></a>
- </div>
-
- <h1>Welcome to your new account on my service!</h1>
-
- <p>
- Here is some more information:
-
- <ul>
- <li>Link 1: <a href="https://example.com">Example.com</a></li>
- <li>Link 2: <a href="https://example2.com">Example2.com</a></li>
- <li>Something else</li>
- </ul>
- </p>
- </body>
- </html>
- `
-
- text, err := FromString(inputHtml)
- if err != nil {
- panic(err)
- }
- fmt.Println(text)
-
- // Output:
- // Mega Service ( http://mymegaservice.com/ )
- //
- // ******************************************
- // Welcome to your new account on my service!
- // ******************************************
- //
- // Here is some more information:
- //
- // * Link 1: Example.com ( https://example.com )
- // * Link 2: Example2.com ( https://example2.com )
- // * Something else
-}
diff --git a/vendor/github.com/cpanato/html2text/testdata/utf8.html b/vendor/github.com/cpanato/html2text/testdata/utf8.html
deleted file mode 100755
index 53d401ce9..000000000
--- a/vendor/github.com/cpanato/html2text/testdata/utf8.html
+++ /dev/null
@@ -1,22 +0,0 @@
-<?xml version='1.0' encoding='utf-8'?>
-<html xmlns="http://www.w3.org/1999/xhtml">
-
-<head>
- <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- <title>学习之道:美国公认学习第一书title</title>
- <link href="stylesheet.css" rel="stylesheet" type="text/css" />
- <link href="page_styles.css" rel="stylesheet" type="text/css" />
-</head>
-
-<body class="calibre">
- <p id="filepos9452" class="calibre_"><span class="calibre6"><span class="bold">写在前面的话</span></span>
- </p>
- <p class="calibre_12">在台湾的那次世界冠军赛上,我几近疯狂,直至两年后的今天,我仍沉浸在这次的经历中。这是我生平第一次如此深入地审视我自己,甚至是第一次尝试审视自己。这个过程令人很是兴奋,同时也有点感觉怪异。我重新认识了自我,看到了自己的另外一面,自己从未发觉的另外一面。为了生存,为了取胜,我成了一名角斗士,彻头彻尾,简单纯粹。我并没有意识到这一角色早已在我的心中生根发芽,呼之欲出。也许,他的出现已是不可避免。</p>
- <p class="calibre_7">而我这全新的一面,与我一直熟识的那个乔希,那个曾经害怕黑暗的孩子,那个象棋手,那个狂热于雨水、反复诵读杰克·克鲁亚克作品的年轻人之间,又有什么样的联系呢?这些都是我正在努力弄清楚的问题。</p>
- <p class="calibre_7">自台湾赛事之后,我急切非常,一心想要回到训练中去,摆脱自己已经达到巅峰的想法。在过去的两年中,我已经重新开始。这是一个新的起点。前方的路还很长,有待进一步的探索。</p>
- <p class="calibre_7">这本书的创作耗费了相当多的时间和精力。在成长的过程中,我在我的小房间里从未想过等待我的会是这样的战斗。在创作中,我的思想逐渐成熟;爱恋从分崩离析,到失而复得,世界冠军头衔从失之交臂,到囊中取物。如果说在我人生的第一个二十九年中,我学到了什么,那就是,我们永远无法预测结局,无论是重要的比赛、冒险,还是轰轰烈烈的爱情。我们唯一可以肯定的只有,出乎意料。不管我们做了多么万全的准备,在生活的真实场景中,我们总是会处于陌生的境地。我们也许会无法冷静,失去理智,感觉似乎整个世界都在针对我们。在这个时候,我们所要做的是要付出加倍的努力,要表现得比预想得更好。我认为,关键在于准备好随机应变,准备好在所能想象的高压下发挥出创造力。</p>
- <p class="calibre_7">读者朋友们,我非常希望你们在读过这本书后,可以得到启发,甚至会得到触动,从而能够根据各自的天赋与特长,去实现自己的梦想。这就是我写作此书的目的。我在字里行间所传达的理念曾经使我受益匪浅,我很希望它们可以为大家提供一个基本的框架和方向。如果我的方法言之有理,那么就请接受它,琢磨它,并加之自己的见解。忘记我的那些数字。真正的掌握需要通过自己发现一些最能够引起共鸣的信息,并将其彻底地融合进来,直至成为一体,这样我们才能随心所欲地驾驭它。</p>
- <div class="mbp_pagebreak" id="calibre_pb_4"></div>
-</body>
-
-</html> \ No newline at end of file
diff --git a/vendor/github.com/cpanato/html2text/testdata/utf8_with_bom.xhtml b/vendor/github.com/cpanato/html2text/testdata/utf8_with_bom.xhtml
deleted file mode 100755
index 68f0ee707..000000000
--- a/vendor/github.com/cpanato/html2text/testdata/utf8_with_bom.xhtml
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0" encoding="utf-8" ?>
-<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="zh-CN">
-
-<head>
- <meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8" />
- <title>1892年波兰文版序言title</title>
- <link rel="stylesheet" href="css/stylesheet.css" type="text/css" />
-</head>
-
-<body>
- <div id="page30" />
- <h2 id="CHP2-6">1892年波兰文版序言<a id="wzyy_18_30" href="#wz_18_30"><sup>[18]</sup></a></h2>
- <p>出版共产主义宣言的一种新的波兰文本已成为必要,这一事实,引起了许多感想。</p>
- <p>首先值得注意的是,近来宣言在一定程度上已成为欧洲大陆大工业发展的一种尺度。一个国家的大工业越发展,该国工人中想认清自己作为工人阶级在有产阶级面前所处地位的要求就越增加,他们中间的社会主义运动也越扩大,因而对宣言的需求也越增长。这样,根据宣言用某国文字销行的份数,不仅能够相当确切地断定该国工人运动的状况,而且还能够相当确切地断定该国大工业发展的程度。</p>
- <p>因此,波兰文的新版本标志着波兰工业的决定性进步。从十年前发表的上一个版本以来确实有了这种进步,对此丝毫不容置疑。俄国的波兰,会议的波兰<a id="wzyy_19_30" href="#wz_19_30"><sup>[19]</sup></a>,成了俄罗斯帝国巨大的工业区。俄国大工业是零星分散的,一部分在芬兰湾沿岸,一部分在中央区(莫斯科和弗拉基米尔),第三部分在黑海和亚速海沿岸,还有另一些散布在别处;而波兰工业则紧缩于相对狭小的地区,享受到由这种积聚引起的长处与短处。这种长处是竞争着的俄罗斯工厂主所承认的,他们要求实行保护关税以对付波兰,尽管他们渴望使波兰人俄罗斯化。这种短处,对波兰工厂主与俄罗斯政府来说,表现在社会主义思想在波兰工人中间的迅速传播和对宣言需求的增长。</p>
- <p>但是,波兰工业的迅速发展——它超过了俄国工业——本身<a id="page31" />是波兰人民的坚强生命力的一个新证明,是波兰人民临近的民族复兴的一个新保证。而一个独立强盛的波兰的复兴,不只是一件同波兰人有关、而且是同我们大家有关的事情。只有当每个民族在自己内部完全自主时,欧洲各民族间真诚的国际合作才是可能的。1848年革命在无产阶级旗帜下,使无产阶级的战士最终只作了资产阶级的工作,这次革命通过自己遗嘱的执行者路易·波拿巴和俾斯麦也实现了意大利、德国和匈牙利的独立。然而波兰,它从1792年以来为革命做的比所有这三个国家总共做的还要多,而当它1863年失败于强大十倍的俄军的时候,人们却把它抛弃不顾了。贵族既未能保持住、也未能重新争得波兰的独立;今天波兰的独立对资产阶级至少是无所谓的。然而波兰的独立对于欧洲各民族和谐的合作是必需的。这种独立只有年轻的波兰无产阶级才能争得,而且在它的手中会很好地保持住。因为欧洲所有其余的工人都象波兰工人自己一样也需要波兰的独立。</p>
- <p>弗·恩格斯</p>
- <p>1892年2月10日于伦敦</p>
- <div id="page74" />
- <div><a id="wz_18_30" href="#wzyy_18_30">[18]</a> 恩格斯用德文为《宣言》新的波兰文本写了这篇序言。1892年由波兰社会主义者在伦敦办的《黎明》杂志社出版。序言寄出后,恩格斯写信给门德尔森(1892年2月11日),信中说,他很愿意学会波兰文,并且深入研究波兰工人运动的发展,以便能够为《宣言》的下一版写一篇更详细的序言。——第20页</div>
- <div><a id="wz_19_30" href="#wzyy_19_30">[19]</a> 指维也纳会议的波兰,即根据1814—1815年维也纳会议的决定,以波兰王国的正式名义割给俄国的那部分波兰土地。——第20页</div>
-</body>
-
-</html> \ No newline at end of file
diff --git a/vendor/github.com/davecgh/go-spew/.travis.yml b/vendor/github.com/davecgh/go-spew/.travis.yml
index 984e0736e..34a55c287 100644
--- a/vendor/github.com/davecgh/go-spew/.travis.yml
+++ b/vendor/github.com/davecgh/go-spew/.travis.yml
@@ -1,14 +1,27 @@
language: go
+go_import_path: github.com/davecgh/go-spew
go:
- - 1.5.4
- - 1.6.3
- - 1.7
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
+ - tip
+sudo: false
install:
- - go get -v golang.org/x/tools/cmd/cover
+ - go get -v github.com/alecthomas/gometalinter
+ - gometalinter --install
script:
- - go test -v -tags=safe ./spew
- - go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov
+ - export PATH=$PATH:$HOME/gopath/bin
+ - export GORACE="halt_on_error=1"
+ - test -z "$(gometalinter --disable-all
+ --enable=gofmt
+ --enable=golint
+ --enable=vet
+ --enable=gosimple
+ --enable=unconvert
+ --deadline=4m ./spew | tee /dev/stderr)"
+ - go test -v -race -tags safe ./spew
+ - go test -v -race -tags testcgo ./spew -covermode=atomic -coverprofile=profile.cov
after_success:
- go get -v github.com/mattn/goveralls
- - export PATH=$PATH:$HOME/gopath/bin
- goveralls -coverprofile=profile.cov -service=travis-ci
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
index bb6733231..bc52e96f2 100644
--- a/vendor/github.com/davecgh/go-spew/LICENSE
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -1,8 +1,8 @@
ISC License
-Copyright (c) 2012-2013 Dave Collins <dave@davec.name>
+Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
-Permission to use, copy, modify, and distribute this software for any
+Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
diff --git a/vendor/github.com/davecgh/go-spew/README.md b/vendor/github.com/davecgh/go-spew/README.md
index 556170ae6..f6ed02c3b 100644
--- a/vendor/github.com/davecgh/go-spew/README.md
+++ b/vendor/github.com/davecgh/go-spew/README.md
@@ -1,10 +1,9 @@
go-spew
=======
-[![Build Status](https://travis-ci.org/davecgh/go-spew.png?branch=master)]
-(https://travis-ci.org/davecgh/go-spew) [![Coverage Status]
-(https://coveralls.io/repos/davecgh/go-spew/badge.png?branch=master)]
-(https://coveralls.io/r/davecgh/go-spew?branch=master)
+[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)](https://travis-ci.org/davecgh/go-spew)
+[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
+[![Coverage Status](https://img.shields.io/coveralls/davecgh/go-spew.svg)](https://coveralls.io/r/davecgh/go-spew?branch=master)
Go-spew implements a deep pretty printer for Go data structures to aid in
debugging. A comprehensive suite of tests with 100% test coverage is provided
@@ -19,8 +18,7 @@ post about it
## Documentation
-[![GoDoc](https://godoc.org/github.com/davecgh/go-spew/spew?status.png)]
-(http://godoc.org/github.com/davecgh/go-spew/spew)
+[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/davecgh/go-spew/spew)
Full `go doc` style documentation for the project can be viewed online without
installing this package by using the excellent GoDoc site here:
@@ -160,6 +158,15 @@ options. See the ConfigState documentation for more details.
App Engine or with the "safe" build tag specified.
Pointer method invocation is enabled by default.
+* DisablePointerAddresses
+ DisablePointerAddresses specifies whether to disable the printing of
+ pointer addresses. This is useful when diffing data structures in tests.
+
+* DisableCapacities
+ DisableCapacities specifies whether to disable the printing of capacities
+ for arrays, slices, maps and channels. This is useful when diffing data
+ structures in tests.
+
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
@@ -191,4 +198,4 @@ using the unsafe package.
## License
-Go-spew is licensed under the liberal ISC License.
+Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
index d42a0bc4a..7f166c3a3 100644
--- a/vendor/github.com/davecgh/go-spew/spew/bypass.go
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2015 Dave Collins <dave@davec.name>
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -41,9 +41,9 @@ var (
// after commit 82f48826c6c7 which changed the format again to mirror
// the original format. Code in the init function updates these offsets
// as necessary.
- offsetPtr = uintptr(ptrSize)
+ offsetPtr = ptrSize
offsetScalar = uintptr(0)
- offsetFlag = uintptr(ptrSize * 2)
+ offsetFlag = ptrSize * 2
// flagKindWidth and flagKindShift indicate various bits that the
// reflect package uses internally to track kind information.
@@ -58,7 +58,7 @@ var (
// changed their positions. Code in the init function updates these
// flags as necessary.
flagKindWidth = uintptr(5)
- flagKindShift = uintptr(flagKindWidth - 1)
+ flagKindShift = flagKindWidth - 1
flagRO = uintptr(1 << 0)
flagIndir = uintptr(1 << 1)
)
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
index e47a4e795..1fe3cf3d5 100644
--- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2015 Dave Collins <dave@davec.name>
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
index 14f02dc15..1be8ce945 100644
--- a/vendor/github.com/davecgh/go-spew/spew/common.go
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) {
w.Write(closeParenBytes)
}
-// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
// prefix to Writer w.
func printHexPtr(w io.Writer, p uintptr) {
// Null pointer.
diff --git a/vendor/github.com/davecgh/go-spew/spew/common_test.go b/vendor/github.com/davecgh/go-spew/spew/common_test.go
index 39b7525b3..0f5ce47dc 100644
--- a/vendor/github.com/davecgh/go-spew/spew/common_test.go
+++ b/vendor/github.com/davecgh/go-spew/spew/common_test.go
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
index 555282723..2e3d22f31 100644
--- a/vendor/github.com/davecgh/go-spew/spew/config.go
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -67,6 +67,15 @@ type ConfigState struct {
// Google App Engine or with the "safe" build tag specified.
DisablePointerMethods bool
+ // DisablePointerAddresses specifies whether to disable the printing of
+ // pointer addresses. This is useful when diffing data structures in tests.
+ DisablePointerAddresses bool
+
+ // DisableCapacities specifies whether to disable the printing of capacities
+ // for arrays, slices, maps and channels. This is useful when diffing
+ // data structures in tests.
+ DisableCapacities bool
+
// ContinueOnMethod specifies whether or not recursion should continue once
// a custom error or Stringer interface is invoked. The default, false,
// means it will print the results of invoking the custom error or Stringer
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
index 5be0c4060..aacaac6f1 100644
--- a/vendor/github.com/davecgh/go-spew/spew/doc.go
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -91,6 +91,15 @@ The following configuration options are available:
which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
+ * DisablePointerAddresses
+ DisablePointerAddresses specifies whether to disable the printing of
+ pointer addresses. This is useful when diffing data structures in tests.
+
+ * DisableCapacities
+ DisableCapacities specifies whether to disable the printing of
+ capacities for arrays, slices, maps and channels. This is useful when
+ diffing data structures in tests.
+
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
index a0ff95e27..f78d89fc1 100644
--- a/vendor/github.com/davecgh/go-spew/spew/dump.go
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -35,16 +35,16 @@ var (
// cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them.
- cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
+ cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump
// them.
- cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
+ cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them.
- cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
+ cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
)
// dumpState contains information about the state of a dump operation.
@@ -129,7 +129,7 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
d.w.Write(closeParenBytes)
// Display pointer information.
- if len(pointerChain) > 0 {
+ if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
d.w.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
@@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
// Display dereferenced value.
d.w.Write(openParenBytes)
switch {
- case nilFound == true:
+ case nilFound:
d.w.Write(nilAngleBytes)
- case cycleFound == true:
+ case cycleFound:
d.w.Write(circularBytes)
default:
@@ -282,13 +282,13 @@ func (d *dumpState) dump(v reflect.Value) {
case reflect.Map, reflect.String:
valueLen = v.Len()
}
- if valueLen != 0 || valueCap != 0 {
+ if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
d.w.Write(openParenBytes)
if valueLen != 0 {
d.w.Write(lenEqualsBytes)
printInt(d.w, int64(valueLen), 10)
}
- if valueCap != 0 {
+ if !d.cs.DisableCapacities && valueCap != 0 {
if valueLen != 0 {
d.w.Write(spaceBytes)
}
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump_test.go b/vendor/github.com/davecgh/go-spew/spew/dump_test.go
index 2b320401d..5aad9c7af 100644
--- a/vendor/github.com/davecgh/go-spew/spew/dump_test.go
+++ b/vendor/github.com/davecgh/go-spew/spew/dump_test.go
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -70,7 +70,7 @@ import (
"github.com/davecgh/go-spew/spew"
)
-// dumpTest is used to describe a test to be perfomed against the Dump method.
+// dumpTest is used to describe a test to be performed against the Dump method.
type dumpTest struct {
in interface{}
wants []string
diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go
index ed3e3c31a..108baa55f 100644
--- a/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go
+++ b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2013 Dave Collins <dave@davec.name>
+// Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -82,18 +82,20 @@ func addCgoDumpTests() {
v5Len := fmt.Sprintf("%d", v5l)
v5Cap := fmt.Sprintf("%d", v5c)
v5t := "[6]testdata._Ctype_uint8_t"
+ v5t2 := "[6]testdata._Ctype_uchar"
v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " +
"{\n 00000000 74 65 73 74 35 00 " +
" |test5.|\n}"
- addDumpTest(v5, "("+v5t+") "+v5s+"\n")
+ addDumpTest(v5, "("+v5t+") "+v5s+"\n", "("+v5t2+") "+v5s+"\n")
// C typedefed unsigned char array.
v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray()
v6Len := fmt.Sprintf("%d", v6l)
v6Cap := fmt.Sprintf("%d", v6c)
v6t := "[6]testdata._Ctype_custom_uchar_t"
+ v6t2 := "[6]testdata._Ctype_uchar"
v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " +
"{\n 00000000 74 65 73 74 36 00 " +
" |test6.|\n}"
- addDumpTest(v6, "("+v6t+") "+v6s+"\n")
+ addDumpTest(v6, "("+v6t+") "+v6s+"\n", "("+v6t2+") "+v6s+"\n")
}
diff --git a/vendor/github.com/davecgh/go-spew/spew/example_test.go b/vendor/github.com/davecgh/go-spew/spew/example_test.go
index de6c4e309..c6ec8c6d5 100644
--- a/vendor/github.com/davecgh/go-spew/spew/example_test.go
+++ b/vendor/github.com/davecgh/go-spew/spew/example_test.go
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
index ecf3b80e2..b04edb7d7 100644
--- a/vendor/github.com/davecgh/go-spew/spew/format.go
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) {
// Display dereferenced value.
switch {
- case nilFound == true:
+ case nilFound:
f.fs.Write(nilAngleBytes)
- case cycleFound == true:
+ case cycleFound:
f.fs.Write(circularShortBytes)
default:
diff --git a/vendor/github.com/davecgh/go-spew/spew/format_test.go b/vendor/github.com/davecgh/go-spew/spew/format_test.go
index b664b3f13..0719eb916 100644
--- a/vendor/github.com/davecgh/go-spew/spew/format_test.go
+++ b/vendor/github.com/davecgh/go-spew/spew/format_test.go
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -75,7 +75,7 @@ import (
"github.com/davecgh/go-spew/spew"
)
-// formatterTest is used to describe a test to be perfomed against NewFormatter.
+// formatterTest is used to describe a test to be performed against NewFormatter.
type formatterTest struct {
format string
in interface{}
@@ -1536,14 +1536,14 @@ func TestPrintSortedKeys(t *testing.T) {
t.Errorf("Sorted keys mismatch 3:\n %v %v", s, expected)
}
- s = cfg.Sprint(map[testStruct]int{testStruct{1}: 1, testStruct{3}: 3, testStruct{2}: 2})
+ s = cfg.Sprint(map[testStruct]int{{1}: 1, {3}: 3, {2}: 2})
expected = "map[ts.1:1 ts.2:2 ts.3:3]"
if s != expected {
t.Errorf("Sorted keys mismatch 4:\n %v %v", s, expected)
}
if !spew.UnsafeDisabled {
- s = cfg.Sprint(map[testStructP]int{testStructP{1}: 1, testStructP{3}: 3, testStructP{2}: 2})
+ s = cfg.Sprint(map[testStructP]int{{1}: 1, {3}: 3, {2}: 2})
expected = "map[ts.1:1 ts.2:2 ts.3:3]"
if s != expected {
t.Errorf("Sorted keys mismatch 5:\n %v %v", s, expected)
diff --git a/vendor/github.com/davecgh/go-spew/spew/internal_test.go b/vendor/github.com/davecgh/go-spew/spew/internal_test.go
index 1069ee21c..e312b4fad 100644
--- a/vendor/github.com/davecgh/go-spew/spew/internal_test.go
+++ b/vendor/github.com/davecgh/go-spew/spew/internal_test.go
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -36,10 +36,7 @@ type dummyFmtState struct {
}
func (dfs *dummyFmtState) Flag(f int) bool {
- if f == int('+') {
- return true
- }
- return false
+ return f == int('+')
}
func (dfs *dummyFmtState) Precision() (int, bool) {
diff --git a/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go
index 863b62cf5..a0c612ec3 100644
--- a/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go
+++ b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2013-2015 Dave Collins <dave@davec.name>
+// Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
index d8233f542..32c0e3388 100644
--- a/vendor/github.com/davecgh/go-spew/spew/spew.go
+++ b/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew_test.go b/vendor/github.com/davecgh/go-spew/spew/spew_test.go
index dbbc08567..b70466c69 100644
--- a/vendor/github.com/davecgh/go-spew/spew/spew_test.go
+++ b/vendor/github.com/davecgh/go-spew/spew/spew_test.go
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -130,12 +130,19 @@ func initSpewTests() {
scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true}
scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1}
scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true}
+ scsNoPtrAddr := &spew.ConfigState{DisablePointerAddresses: true}
+ scsNoCap := &spew.ConfigState{DisableCapacities: true}
// Variables for tests on types which implement Stringer interface with and
// without a pointer receiver.
ts := stringer("test")
tps := pstringer("test")
+ type ptrTester struct {
+ s *struct{}
+ }
+ tptr := &ptrTester{s: &struct{}{}}
+
// depthTester is used to test max depth handling for structs, array, slices
// and maps.
type depthTester struct {
@@ -192,6 +199,10 @@ func initSpewTests() {
{scsContinue, fCSFprint, "", te, "(error: 10) 10"},
{scsContinue, fCSFdump, "", te, "(spew_test.customError) " +
"(error: 10) 10\n"},
+ {scsNoPtrAddr, fCSFprint, "", tptr, "<*>{<*>{}}"},
+ {scsNoPtrAddr, fCSSdump, "", tptr, "(*spew_test.ptrTester)({\ns: (*struct {})({\n})\n})\n"},
+ {scsNoCap, fCSSdump, "", make([]string, 0, 10), "([]string) {\n}\n"},
+ {scsNoCap, fCSSdump, "", make([]string, 1, 10), "([]string) (len=1) {\n(string) \"\"\n}\n"},
}
}
diff --git a/vendor/github.com/disintegration/imaging/helpers.go b/vendor/github.com/disintegration/imaging/helpers.go
index 5fb2b5aac..d4e73f120 100644
--- a/vendor/github.com/disintegration/imaging/helpers.go
+++ b/vendor/github.com/disintegration/imaging/helpers.go
@@ -76,8 +76,32 @@ func Open(filename string) (image.Image, error) {
return img, err
}
+type encodeConfig struct {
+ jpegQuality int
+}
+
+var defaultEncodeConfig = encodeConfig{
+ jpegQuality: 95,
+}
+
+// EncodeOption sets an optional parameter for the Encode and Save functions.
+type EncodeOption func(*encodeConfig)
+
+// JPEGQuality returns an EncodeOption that sets the output JPEG quality.
+// Quality ranges from 1 to 100 inclusive, higher is better. Default is 95.
+func JPEGQuality(quality int) EncodeOption {
+ return func(c *encodeConfig) {
+ c.jpegQuality = quality
+ }
+}
+
// Encode writes the image img to w in the specified format (JPEG, PNG, GIF, TIFF or BMP).
-func Encode(w io.Writer, img image.Image, format Format) error {
+func Encode(w io.Writer, img image.Image, format Format, opts ...EncodeOption) error {
+ cfg := defaultEncodeConfig
+ for _, option := range opts {
+ option(&cfg)
+ }
+
var err error
switch format {
case JPEG:
@@ -92,9 +116,9 @@ func Encode(w io.Writer, img image.Image, format Format) error {
}
}
if rgba != nil {
- err = jpeg.Encode(w, rgba, &jpeg.Options{Quality: 95})
+ err = jpeg.Encode(w, rgba, &jpeg.Options{Quality: cfg.jpegQuality})
} else {
- err = jpeg.Encode(w, img, &jpeg.Options{Quality: 95})
+ err = jpeg.Encode(w, img, &jpeg.Options{Quality: cfg.jpegQuality})
}
case PNG:
@@ -113,7 +137,16 @@ func Encode(w io.Writer, img image.Image, format Format) error {
// Save saves the image to file with the specified filename.
// The format is determined from the filename extension: "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported.
-func Save(img image.Image, filename string) (err error) {
+//
+// Examples:
+//
+// // Save the image as PNG.
+// err := imaging.Save(img, "out.png")
+//
+// // Save the image as JPEG with optional quality parameter set to 80.
+// err := imaging.Save(img, "out.jpg", imaging.JPEGQuality(80))
+//
+func Save(img image.Image, filename string, opts ...EncodeOption) (err error) {
formats := map[string]Format{
".jpg": JPEG,
".jpeg": JPEG,
@@ -136,7 +169,7 @@ func Save(img image.Image, filename string) (err error) {
}
defer file.Close()
- return Encode(file, img, f)
+ return Encode(file, img, f, opts...)
}
// New creates a new image with the specified width and height, and fills it with the specified color.
diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml
index 3a5c933bc..981d1bb81 100644
--- a/vendor/github.com/fsnotify/fsnotify/.travis.yml
+++ b/vendor/github.com/fsnotify/fsnotify/.travis.yml
@@ -2,12 +2,14 @@ sudo: false
language: go
go:
- - 1.6.3
+ - 1.8.x
+ - 1.9.x
- tip
matrix:
allow_failures:
- go: tip
+ fast_finish: true
before_script:
- go get -u github.com/golang/lint/golint
diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS
index 0a5bf8f61..5ab5d41c5 100644
--- a/vendor/github.com/fsnotify/fsnotify/AUTHORS
+++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS
@@ -8,8 +8,10 @@
# Please keep the list sorted.
+Aaron L <aaron@bettercoder.net>
Adrien Bustany <adrien@bustany.org>
Amit Krishnan <amit.krishnan@oracle.com>
+Anmol Sethi <me@anmol.io>
Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
Bruno Bigras <bigras.bruno@gmail.com>
Caleb Spare <cespare@gmail.com>
@@ -26,6 +28,7 @@ Kelvin Fo <vmirage@gmail.com>
Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
Matt Layher <mdlayher@gmail.com>
Nathan Youngman <git@nathany.com>
+Nickolai Zeldovich <nickolai@csail.mit.edu>
Patrick <patrick@dropbox.com>
Paul Hammond <paul@paulhammond.org>
Pawel Knap <pawelknap88@gmail.com>
@@ -33,12 +36,15 @@ Pieter Droogendijk <pieter@binky.org.uk>
Pursuit92 <JoshChase@techpursuit.net>
Riku Voipio <riku.voipio@linaro.org>
Rob Figueiredo <robfig@gmail.com>
+Rodrigo Chiossi <rodrigochiossi@gmail.com>
Slawek Ligus <root@ooz.ie>
Soge Zhang <zhssoge@gmail.com>
Tiffany Jernigan <tiffany.jernigan@intel.com>
Tilak Sharma <tilaks@google.com>
+Tom Payne <twpayne@gmail.com>
Travis Cline <travis.cline@gmail.com>
Tudor Golubenco <tudor.g@gmail.com>
+Vahe Khachikyan <vahe@live.ca>
Yukang <moorekang@gmail.com>
bronze1man <bronze1man@gmail.com>
debrando <denis.brandolini@gmail.com>
diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
index 40d7660d5..be4d7ea2c 100644
--- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
+++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
@@ -1,5 +1,15 @@
# Changelog
+## v1.4.7 / 2018-01-09
+
+* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
+* Tests: Fix missing verb on format string (thanks @rchiossi)
+* Linux: Fix deadlock in Remove (thanks @aarondl)
+* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
+* Docs: Moved FAQ into the README (thanks @vahe)
+* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
+* Docs: replace references to OS X with macOS
+
## v1.4.2 / 2016-10-10
* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
@@ -79,7 +89,7 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn
## v1.0.2 / 2014-08-17
-* [Fix] Missing create events on OS X. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
## v1.0.0 / 2014-08-15
@@ -142,7 +152,7 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn
## v0.9.2 / 2014-08-17
-* [Backport] Fix missing create events on OS X. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
## v0.9.1 / 2014-06-12
@@ -161,7 +171,7 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn
## v0.8.11 / 2013-11-02
* [Doc] Add Changelog [#72][] (thanks @nathany)
-* [Doc] Spotlight and double modify events on OS X [#62][] (reported by @paulhammond)
+* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
## v0.8.10 / 2013-10-19
diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
index 6a81ba489..828a60b24 100644
--- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
+++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
@@ -17,7 +17,7 @@ Please indicate that you have signed the CLA in your pull request.
### How fsnotify is Developed
* Development is done on feature branches.
-* Tests are run on BSD, Linux, OS X and Windows.
+* Tests are run on BSD, Linux, macOS and Windows.
* Pull requests are reviewed and [applied to master][am] using [hub][].
* Maintainers may modify or squash commits rather than asking contributors to.
* To issue a new release, the maintainers will:
@@ -44,7 +44,7 @@ This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/
### Testing
-fsnotify uses build tags to compile different code on Linux, BSD, OS X, and Windows.
+fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
@@ -58,7 +58,7 @@ To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
-Right now there is no equivalent solution for Windows and OS X, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
+Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
### Maintainers
diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md
index 3c891e349..399320741 100644
--- a/vendor/github.com/fsnotify/fsnotify/README.md
+++ b/vendor/github.com/fsnotify/fsnotify/README.md
@@ -8,14 +8,14 @@ fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather
go get -u golang.org/x/sys/...
```
-Cross platform: Windows, Linux, BSD and OS X.
+Cross platform: Windows, Linux, BSD and macOS.
|Adapter |OS |Status |
|----------|----------|----------|
|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
-|kqueue |BSD, OS X, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
+|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
-|FSEvents |OS X |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
+|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
|fanotify |Linux 2.6.37+ | |
|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
@@ -23,7 +23,7 @@ Cross platform: Windows, Linux, BSD and OS X.
\* Android and iOS are untested.
-Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) for usage. Consult the [Wiki](https://github.com/fsnotify/fsnotify/wiki) for the FAQ and further information.
+Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
## API stability
@@ -41,6 +41,35 @@ Please refer to [CONTRIBUTING][] before opening an issue or pull request.
See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
+## FAQ
+
+**When a file is moved to another directory is it still being watched?**
+
+No (it shouldn't be, unless you are watching where it was moved to).
+
+**When I watch a directory, are all subdirectories watched as well?**
+
+No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
+
+**Do I have to watch the Error and Event channels in a separate goroutine?**
+
+As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
+
+**Why am I receiving multiple events for the same file on OS X?**
+
+Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
+
+**How many files can be watched at once?**
+
+There are OS-specific limits as to how many watches can be created:
+* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
+* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
+
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#18]: https://github.com/fsnotify/fsnotify/issues/18
+[#11]: https://github.com/fsnotify/fsnotify/issues/11
+[#7]: https://github.com/howeyc/fsnotify/issues/7
+
[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
## Related Projects
diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
index e7f55fee7..190bf0de5 100644
--- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go
+++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
@@ -9,6 +9,7 @@ package fsnotify
import (
"bytes"
+ "errors"
"fmt"
)
@@ -60,3 +61,6 @@ func (op Op) String() string {
func (e Event) String() string {
return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
}
+
+// Common errors that can be reported by a watcher
+var ErrEventOverflow = errors.New("fsnotify queue overflow")
diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify_test.go b/vendor/github.com/fsnotify/fsnotify/fsnotify_test.go
index 9d6d72afc..f9771d9df 100644
--- a/vendor/github.com/fsnotify/fsnotify/fsnotify_test.go
+++ b/vendor/github.com/fsnotify/fsnotify/fsnotify_test.go
@@ -6,7 +6,11 @@
package fsnotify
-import "testing"
+import (
+ "os"
+ "testing"
+ "time"
+)
func TestEventStringWithValue(t *testing.T) {
for opMask, expectedString := range map[Op]string{
@@ -38,3 +42,29 @@ func TestEventOpStringWithNoValue(t *testing.T) {
t.Fatalf("Expected %s, got: %v", expectedOpString, event.Op.String())
}
}
+
+// TestWatcherClose tests that the goroutine started by creating the watcher can be
+// signalled to return at any time, even if there is no goroutine listening on the events
+// or errors channels.
+func TestWatcherClose(t *testing.T) {
+ t.Parallel()
+
+ name := tempMkFile(t, "")
+ w := newWatcher(t)
+ err := w.Add(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = os.Remove(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Allow the watcher to receive the event.
+ time.Sleep(time.Millisecond * 100)
+
+ err = w.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go
index f3b74c51f..d9fd1b88a 100644
--- a/vendor/github.com/fsnotify/fsnotify/inotify.go
+++ b/vendor/github.com/fsnotify/fsnotify/inotify.go
@@ -24,7 +24,6 @@ type Watcher struct {
Events chan Event
Errors chan error
mu sync.Mutex // Map access
- cv *sync.Cond // sync removing on rm_watch with IN_IGNORE
fd int
poller *fdPoller
watches map[string]*watch // Map of inotify watches (key: path)
@@ -56,7 +55,6 @@ func NewWatcher() (*Watcher, error) {
done: make(chan struct{}),
doneResp: make(chan struct{}),
}
- w.cv = sync.NewCond(&w.mu)
go w.readEvents()
return w, nil
@@ -103,21 +101,23 @@ func (w *Watcher) Add(name string) error {
var flags uint32 = agnosticEvents
w.mu.Lock()
- watchEntry, found := w.watches[name]
- w.mu.Unlock()
- if found {
- watchEntry.flags |= flags
- flags |= unix.IN_MASK_ADD
+ defer w.mu.Unlock()
+ watchEntry := w.watches[name]
+ if watchEntry != nil {
+ flags |= watchEntry.flags | unix.IN_MASK_ADD
}
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
if wd == -1 {
return errno
}
- w.mu.Lock()
- w.watches[name] = &watch{wd: uint32(wd), flags: flags}
- w.paths[wd] = name
- w.mu.Unlock()
+ if watchEntry == nil {
+ w.watches[name] = &watch{wd: uint32(wd), flags: flags}
+ w.paths[wd] = name
+ } else {
+ watchEntry.wd = uint32(wd)
+ watchEntry.flags = flags
+ }
return nil
}
@@ -135,6 +135,13 @@ func (w *Watcher) Remove(name string) error {
if !ok {
return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
}
+
+ // We successfully removed the watch if InotifyRmWatch doesn't return an
+ // error, we need to clean up our internal state to ensure it matches
+ // inotify's kernel state.
+ delete(w.paths, int(watch.wd))
+ delete(w.watches, name)
+
// inotify_rm_watch will return EINVAL if the file has been deleted;
// the inotify will already have been removed.
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
@@ -152,13 +159,6 @@ func (w *Watcher) Remove(name string) error {
return errno
}
- // wait until ignoreLinux() deleting maps
- exists := true
- for exists {
- w.cv.Wait()
- _, exists = w.watches[name]
- }
-
return nil
}
@@ -245,13 +245,31 @@ func (w *Watcher) readEvents() {
mask := uint32(raw.Mask)
nameLen := uint32(raw.Len)
+
+ if mask&unix.IN_Q_OVERFLOW != 0 {
+ select {
+ case w.Errors <- ErrEventOverflow:
+ case <-w.done:
+ return
+ }
+ }
+
// If the event happened to the watched directory or the watched file, the kernel
// doesn't append the filename to the event, but we would like to always fill the
// the "Name" field with a valid filename. We retrieve the path of the watch from
// the "paths" map.
w.mu.Lock()
- name := w.paths[int(raw.Wd)]
+ name, ok := w.paths[int(raw.Wd)]
+ // IN_DELETE_SELF occurs when the file/directory being watched is removed.
+ // This is a sign to clean up the maps, otherwise we are no longer in sync
+ // with the inotify kernel state which has already deleted the watch
+ // automatically.
+ if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
+ delete(w.paths, int(raw.Wd))
+ delete(w.watches, name)
+ }
w.mu.Unlock()
+
if nameLen > 0 {
// Point "bytes" at the first byte of the filename
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
@@ -262,7 +280,7 @@ func (w *Watcher) readEvents() {
event := newEvent(name, mask)
// Send the events that are not ignored on the events channel
- if !event.ignoreLinux(w, raw.Wd, mask) {
+ if !event.ignoreLinux(mask) {
select {
case w.Events <- event:
case <-w.done:
@@ -279,15 +297,9 @@ func (w *Watcher) readEvents() {
// Certain types of events can be "ignored" and not sent over the Events
// channel. Such as events marked ignore by the kernel, or MODIFY events
// against files that do not exist.
-func (e *Event) ignoreLinux(w *Watcher, wd int32, mask uint32) bool {
+func (e *Event) ignoreLinux(mask uint32) bool {
// Ignore anything the inotify API says to ignore
if mask&unix.IN_IGNORED == unix.IN_IGNORED {
- w.mu.Lock()
- defer w.mu.Unlock()
- name := w.paths[int(wd)]
- delete(w.paths, int(wd))
- delete(w.watches, name)
- w.cv.Broadcast()
return true
}
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_test.go b/vendor/github.com/fsnotify/fsnotify/inotify_test.go
index a4bb202d1..54f3f00eb 100644
--- a/vendor/github.com/fsnotify/fsnotify/inotify_test.go
+++ b/vendor/github.com/fsnotify/fsnotify/inotify_test.go
@@ -293,25 +293,23 @@ func TestInotifyRemoveTwice(t *testing.T) {
t.Fatalf("Failed to add testFile: %v", err)
}
- err = os.Remove(testFile)
+ err = w.Remove(testFile)
if err != nil {
- t.Fatalf("Failed to remove testFile: %v", err)
+ t.Fatalf("wanted successful remove but got: %v", err)
}
err = w.Remove(testFile)
if err == nil {
t.Fatalf("no error on removing invalid file")
}
- s1 := fmt.Sprintf("%s", err)
- err = w.Remove(testFile)
- if err == nil {
- t.Fatalf("no error on removing invalid file")
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if len(w.watches) != 0 {
+ t.Fatalf("Expected watches len is 0, but got: %d, %v", len(w.watches), w.watches)
}
- s2 := fmt.Sprintf("%s", err)
-
- if s1 != s2 {
- t.Fatalf("receive different error - %s / %s", s1, s2)
+ if len(w.paths) != 0 {
+ t.Fatalf("Expected paths len is 0, but got: %d, %v", len(w.paths), w.paths)
}
}
@@ -358,3 +356,94 @@ func TestInotifyInnerMapLength(t *testing.T) {
t.Fatalf("Expected paths len is 0, but got: %d, %v", len(w.paths), w.paths)
}
}
+
+func TestInotifyOverflow(t *testing.T) {
+ // We need to generate many more events than the
+ // fs.inotify.max_queued_events sysctl setting.
+ // We use multiple goroutines (one per directory)
+ // to speed up file creation.
+ numDirs := 128
+ numFiles := 1024
+
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ for dn := 0; dn < numDirs; dn++ {
+ testSubdir := fmt.Sprintf("%s/%d", testDir, dn)
+
+ err := os.Mkdir(testSubdir, 0777)
+ if err != nil {
+ t.Fatalf("Cannot create subdir: %v", err)
+ }
+
+ err = w.Add(testSubdir)
+ if err != nil {
+ t.Fatalf("Failed to add subdir: %v", err)
+ }
+ }
+
+ errChan := make(chan error, numDirs*numFiles)
+
+ for dn := 0; dn < numDirs; dn++ {
+ testSubdir := fmt.Sprintf("%s/%d", testDir, dn)
+
+ go func() {
+ for fn := 0; fn < numFiles; fn++ {
+ testFile := fmt.Sprintf("%s/%d", testSubdir, fn)
+
+ handle, err := os.Create(testFile)
+ if err != nil {
+ errChan <- fmt.Errorf("Create failed: %v", err)
+ continue
+ }
+
+ err = handle.Close()
+ if err != nil {
+ errChan <- fmt.Errorf("Close failed: %v", err)
+ continue
+ }
+ }
+ }()
+ }
+
+ creates := 0
+ overflows := 0
+
+ after := time.After(10 * time.Second)
+ for overflows == 0 && creates < numDirs*numFiles {
+ select {
+ case <-after:
+ t.Fatalf("Not done")
+ case err := <-errChan:
+ t.Fatalf("Got an error from file creator goroutine: %v", err)
+ case err := <-w.Errors:
+ if err == ErrEventOverflow {
+ overflows++
+ } else {
+ t.Fatalf("Got an error from watcher: %v", err)
+ }
+ case evt := <-w.Events:
+ if !strings.HasPrefix(evt.Name, testDir) {
+ t.Fatalf("Got an event for an unknown file: %s", evt.Name)
+ }
+ if evt.Op == Create {
+ creates++
+ }
+ }
+ }
+
+ if creates == numDirs*numFiles {
+ t.Fatalf("Could not trigger overflow")
+ }
+
+ if overflows == 0 {
+ t.Fatalf("No overflow and not enough creates (expected %d, got %d)",
+ numDirs*numFiles, creates)
+ }
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/integration_darwin_test.go b/vendor/github.com/fsnotify/fsnotify/integration_darwin_test.go
index 5564554f7..cd6adc273 100644
--- a/vendor/github.com/fsnotify/fsnotify/integration_darwin_test.go
+++ b/vendor/github.com/fsnotify/fsnotify/integration_darwin_test.go
@@ -13,9 +13,9 @@ import (
"golang.org/x/sys/unix"
)
-// testExchangedataForWatcher tests the watcher with the exchangedata operation on OS X.
+// testExchangedataForWatcher tests the watcher with the exchangedata operation on macOS.
//
-// This is widely used for atomic saves on OS X, e.g. TextMate and in Apple's NSDocument.
+// This is widely used for atomic saves on macOS, e.g. TextMate and in Apple's NSDocument.
//
// See https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/exchangedata.2.html
// Also see: https://github.com/textmate/textmate/blob/cd016be29489eba5f3c09b7b70b06da134dda550/Frameworks/io/src/swap_file_data.cc#L20
diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go
index c2b4acb18..86e76a3d6 100644
--- a/vendor/github.com/fsnotify/fsnotify/kqueue.go
+++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go
@@ -22,7 +22,7 @@ import (
type Watcher struct {
Events chan Event
Errors chan error
- done chan bool // Channel for sending a "quit message" to the reader goroutine
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
kq int // File descriptor (as returned by the kqueue() syscall).
@@ -56,7 +56,7 @@ func NewWatcher() (*Watcher, error) {
externalWatches: make(map[string]bool),
Events: make(chan Event),
Errors: make(chan error),
- done: make(chan bool),
+ done: make(chan struct{}),
}
go w.readEvents()
@@ -71,10 +71,8 @@ func (w *Watcher) Close() error {
return nil
}
w.isClosed = true
- w.mu.Unlock()
// copy paths to remove while locked
- w.mu.Lock()
var pathsToRemove = make([]string, 0, len(w.watches))
for name := range w.watches {
pathsToRemove = append(pathsToRemove, name)
@@ -82,15 +80,12 @@ func (w *Watcher) Close() error {
w.mu.Unlock()
// unlock before calling Remove, which also locks
- var err error
for _, name := range pathsToRemove {
- if e := w.Remove(name); e != nil && err == nil {
- err = e
- }
+ w.Remove(name)
}
- // Send "quit" message to the reader goroutine:
- w.done <- true
+ // send a "quit" message to the reader goroutine
+ close(w.done)
return nil
}
@@ -266,17 +261,12 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
func (w *Watcher) readEvents() {
eventBuffer := make([]unix.Kevent_t, 10)
+loop:
for {
// See if there is a message on the "done" channel
select {
case <-w.done:
- err := unix.Close(w.kq)
- if err != nil {
- w.Errors <- err
- }
- close(w.Events)
- close(w.Errors)
- return
+ break loop
default:
}
@@ -284,7 +274,11 @@ func (w *Watcher) readEvents() {
kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
// EINTR is okay, the syscall was interrupted before timeout expired.
if err != nil && err != unix.EINTR {
- w.Errors <- err
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ break loop
+ }
continue
}
@@ -319,8 +313,12 @@ func (w *Watcher) readEvents() {
if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
w.sendDirectoryChangeEvents(event.Name)
} else {
- // Send the event on the Events channel
- w.Events <- event
+ // Send the event on the Events channel.
+ select {
+ case w.Events <- event:
+ case <-w.done:
+ break loop
+ }
}
if event.Op&Remove == Remove {
@@ -352,6 +350,18 @@ func (w *Watcher) readEvents() {
kevents = kevents[1:]
}
}
+
+ // cleanup
+ err := unix.Close(w.kq)
+ if err != nil {
+ // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
+ select {
+ case w.Errors <- err:
+ default:
+ }
+ }
+ close(w.Events)
+ close(w.Errors)
}
// newEvent returns an platform-independent Event based on kqueue Fflags.
@@ -407,7 +417,11 @@ func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
// Get all files
files, err := ioutil.ReadDir(dirPath)
if err != nil {
- w.Errors <- err
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ return
+ }
}
// Search for new files
@@ -428,7 +442,11 @@ func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInf
w.mu.Unlock()
if !doesExist {
// Send create event
- w.Events <- newCreateEvent(filePath)
+ select {
+ case w.Events <- newCreateEvent(filePath):
+ case <-w.done:
+ return
+ }
}
// like watchDirectoryFiles (but without doing another ReadDir)
diff --git a/vendor/github.com/go-ldap/ldap/.travis.yml b/vendor/github.com/go-ldap/ldap/.travis.yml
index e32a2aa75..9782c9bac 100644
--- a/vendor/github.com/go-ldap/ldap/.travis.yml
+++ b/vendor/github.com/go-ldap/ldap/.travis.yml
@@ -1,8 +1,8 @@
language: go
env:
global:
- - VET_VERSIONS="1.6 1.7 tip"
- - LINT_VERSIONS="1.6 1.7 tip"
+ - VET_VERSIONS="1.6 1.7 1.8 1.9 tip"
+ - LINT_VERSIONS="1.6 1.7 1.8 1.9 tip"
go:
- 1.2
- 1.3
@@ -10,6 +10,8 @@ go:
- 1.5
- 1.6
- 1.7
+ - 1.8
+ - 1.9
- tip
matrix:
fast_finish: true
diff --git a/vendor/github.com/go-ldap/ldap/Makefile b/vendor/github.com/go-ldap/ldap/Makefile
index c1fc96657..a9d351c76 100644
--- a/vendor/github.com/go-ldap/ldap/Makefile
+++ b/vendor/github.com/go-ldap/ldap/Makefile
@@ -1,5 +1,15 @@
.PHONY: default install build test quicktest fmt vet lint
+GO_VERSION := $(shell go version | cut -d' ' -f3 | cut -d. -f2)
+
+# Only use the `-race` flag on newer versions of Go
+IS_OLD_GO := $(shell test $(GO_VERSION) -le 2 && echo true)
+ifeq ($(IS_OLD_GO),true)
+ RACE_FLAG :=
+else
+ RACE_FLAG := -race -cpu 1,2,4
+endif
+
default: fmt vet lint build quicktest
install:
@@ -9,7 +19,7 @@ build:
go build -v ./...
test:
- go test -v -cover ./...
+ go test -v $(RACE_FLAG) -cover ./...
quicktest:
go test ./...
diff --git a/vendor/github.com/go-ldap/ldap/atomic_value.go b/vendor/github.com/go-ldap/ldap/atomic_value.go
new file mode 100644
index 000000000..bccf7573e
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/atomic_value.go
@@ -0,0 +1,13 @@
+// +build go1.4
+
+package ldap
+
+import (
+ "sync/atomic"
+)
+
+// For compilers that support it, we just use the underlying sync/atomic.Value
+// type.
+type atomicValue struct {
+ atomic.Value
+}
diff --git a/vendor/github.com/go-ldap/ldap/atomic_value_go13.go b/vendor/github.com/go-ldap/ldap/atomic_value_go13.go
new file mode 100644
index 000000000..04920bb26
--- /dev/null
+++ b/vendor/github.com/go-ldap/ldap/atomic_value_go13.go
@@ -0,0 +1,28 @@
+// +build !go1.4
+
+package ldap
+
+import (
+ "sync"
+)
+
+// This is a helper type that emulates the use of the "sync/atomic.Value"
+// struct that's available in Go 1.4 and up.
+type atomicValue struct {
+ value interface{}
+ lock sync.RWMutex
+}
+
+func (av *atomicValue) Store(val interface{}) {
+ av.lock.Lock()
+ av.value = val
+ av.lock.Unlock()
+}
+
+func (av *atomicValue) Load() interface{} {
+ av.lock.RLock()
+ ret := av.value
+ av.lock.RUnlock()
+
+ return ret
+}
diff --git a/vendor/github.com/go-ldap/ldap/conn.go b/vendor/github.com/go-ldap/ldap/conn.go
index b5bd99adb..eb28eb472 100644
--- a/vendor/github.com/go-ldap/ldap/conn.go
+++ b/vendor/github.com/go-ldap/ldap/conn.go
@@ -11,6 +11,7 @@ import (
"log"
"net"
"sync"
+ "sync/atomic"
"time"
"gopkg.in/asn1-ber.v1"
@@ -82,20 +83,18 @@ const (
type Conn struct {
conn net.Conn
isTLS bool
- isClosing bool
- closeErr error
+ closing uint32
+ closeErr atomicValue
isStartingTLS bool
Debug debugging
- chanConfirm chan bool
+ chanConfirm chan struct{}
messageContexts map[int64]*messageContext
chanMessage chan *messagePacket
chanMessageID chan int64
- wgSender sync.WaitGroup
wgClose sync.WaitGroup
- once sync.Once
outstandingRequests uint
messageMutex sync.Mutex
- requestTimeout time.Duration
+ requestTimeout int64
}
var _ Client = &Conn{}
@@ -142,7 +141,7 @@ func DialTLS(network, addr string, config *tls.Config) (*Conn, error) {
func NewConn(conn net.Conn, isTLS bool) *Conn {
return &Conn{
conn: conn,
- chanConfirm: make(chan bool),
+ chanConfirm: make(chan struct{}),
chanMessageID: make(chan int64),
chanMessage: make(chan *messagePacket, 10),
messageContexts: map[int64]*messageContext{},
@@ -158,12 +157,22 @@ func (l *Conn) Start() {
l.wgClose.Add(1)
}
+// isClosing returns whether or not we're currently closing.
+func (l *Conn) isClosing() bool {
+ return atomic.LoadUint32(&l.closing) == 1
+}
+
+// setClosing sets the closing value to true
+func (l *Conn) setClosing() bool {
+ return atomic.CompareAndSwapUint32(&l.closing, 0, 1)
+}
+
// Close closes the connection.
func (l *Conn) Close() {
- l.once.Do(func() {
- l.isClosing = true
- l.wgSender.Wait()
+ l.messageMutex.Lock()
+ defer l.messageMutex.Unlock()
+ if l.setClosing() {
l.Debug.Printf("Sending quit message and waiting for confirmation")
l.chanMessage <- &messagePacket{Op: MessageQuit}
<-l.chanConfirm
@@ -171,27 +180,25 @@ func (l *Conn) Close() {
l.Debug.Printf("Closing network connection")
if err := l.conn.Close(); err != nil {
- log.Print(err)
+ log.Println(err)
}
l.wgClose.Done()
- })
+ }
l.wgClose.Wait()
}
// SetTimeout sets the time after a request is sent that a MessageTimeout triggers
func (l *Conn) SetTimeout(timeout time.Duration) {
if timeout > 0 {
- l.requestTimeout = timeout
+ atomic.StoreInt64(&l.requestTimeout, int64(timeout))
}
}
// Returns the next available messageID
func (l *Conn) nextMessageID() int64 {
- if l.chanMessageID != nil {
- if messageID, ok := <-l.chanMessageID; ok {
- return messageID
- }
+ if messageID, ok := <-l.chanMessageID; ok {
+ return messageID
}
return 0
}
@@ -258,7 +265,7 @@ func (l *Conn) sendMessage(packet *ber.Packet) (*messageContext, error) {
}
func (l *Conn) sendMessageWithFlags(packet *ber.Packet, flags sendMessageFlags) (*messageContext, error) {
- if l.isClosing {
+ if l.isClosing() {
return nil, NewError(ErrorNetwork, errors.New("ldap: connection closed"))
}
l.messageMutex.Lock()
@@ -297,7 +304,7 @@ func (l *Conn) sendMessageWithFlags(packet *ber.Packet, flags sendMessageFlags)
func (l *Conn) finishMessage(msgCtx *messageContext) {
close(msgCtx.done)
- if l.isClosing {
+ if l.isClosing() {
return
}
@@ -316,12 +323,12 @@ func (l *Conn) finishMessage(msgCtx *messageContext) {
}
func (l *Conn) sendProcessMessage(message *messagePacket) bool {
- if l.isClosing {
+ l.messageMutex.Lock()
+ defer l.messageMutex.Unlock()
+ if l.isClosing() {
return false
}
- l.wgSender.Add(1)
l.chanMessage <- message
- l.wgSender.Done()
return true
}
@@ -333,15 +340,14 @@ func (l *Conn) processMessages() {
for messageID, msgCtx := range l.messageContexts {
// If we are closing due to an error, inform anyone who
// is waiting about the error.
- if l.isClosing && l.closeErr != nil {
- msgCtx.sendResponse(&PacketResponse{Error: l.closeErr})
+ if l.isClosing() && l.closeErr.Load() != nil {
+ msgCtx.sendResponse(&PacketResponse{Error: l.closeErr.Load().(error)})
}
l.Debug.Printf("Closing channel for MessageID %d", messageID)
close(msgCtx.responses)
delete(l.messageContexts, messageID)
}
close(l.chanMessageID)
- l.chanConfirm <- true
close(l.chanConfirm)
}()
@@ -350,11 +356,7 @@ func (l *Conn) processMessages() {
select {
case l.chanMessageID <- messageID:
messageID++
- case message, ok := <-l.chanMessage:
- if !ok {
- l.Debug.Printf("Shutting down - message channel is closed")
- return
- }
+ case message := <-l.chanMessage:
switch message.Op {
case MessageQuit:
l.Debug.Printf("Shutting down - quit message received")
@@ -377,14 +379,15 @@ func (l *Conn) processMessages() {
l.messageContexts[message.MessageID] = message.Context
// Add timeout if defined
- if l.requestTimeout > 0 {
+ requestTimeout := time.Duration(atomic.LoadInt64(&l.requestTimeout))
+ if requestTimeout > 0 {
go func() {
defer func() {
if err := recover(); err != nil {
log.Printf("ldap: recovered panic in RequestTimeout: %v", err)
}
}()
- time.Sleep(l.requestTimeout)
+ time.Sleep(requestTimeout)
timeoutMessage := &messagePacket{
Op: MessageTimeout,
MessageID: message.MessageID,
@@ -397,7 +400,7 @@ func (l *Conn) processMessages() {
if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
msgCtx.sendResponse(&PacketResponse{message.Packet, nil})
} else {
- log.Printf("Received unexpected message %d, %v", message.MessageID, l.isClosing)
+ log.Printf("Received unexpected message %d, %v", message.MessageID, l.isClosing())
ber.PrintPacket(message.Packet)
}
case MessageTimeout:
@@ -439,8 +442,8 @@ func (l *Conn) reader() {
packet, err := ber.ReadPacket(l.conn)
if err != nil {
// A read error is expected here if we are closing the connection...
- if !l.isClosing {
- l.closeErr = fmt.Errorf("unable to read LDAP response packet: %s", err)
+ if !l.isClosing() {
+ l.closeErr.Store(fmt.Errorf("unable to read LDAP response packet: %s", err))
l.Debug.Printf("reader error: %s", err.Error())
}
return
diff --git a/vendor/github.com/go-ldap/ldap/conn_test.go b/vendor/github.com/go-ldap/ldap/conn_test.go
index 10766bbd4..488754d16 100644
--- a/vendor/github.com/go-ldap/ldap/conn_test.go
+++ b/vendor/github.com/go-ldap/ldap/conn_test.go
@@ -60,7 +60,7 @@ func TestUnresponsiveConnection(t *testing.T) {
// TestFinishMessage tests that we do not enter deadlock when a goroutine makes
// a request but does not handle all responses from the server.
-func TestConn(t *testing.T) {
+func TestFinishMessage(t *testing.T) {
ptc := newPacketTranslatorConn()
defer ptc.Close()
@@ -174,16 +174,12 @@ func testSendUnhandledResponsesAndFinish(t *testing.T, ptc *packetTranslatorConn
}
func runWithTimeout(t *testing.T, timeout time.Duration, f func()) {
- runtime.Gosched()
-
done := make(chan struct{})
go func() {
f()
close(done)
}()
- runtime.Gosched()
-
select {
case <-done: // Success!
case <-time.After(timeout):
@@ -192,7 +188,7 @@ func runWithTimeout(t *testing.T, timeout time.Duration, f func()) {
}
}
-// packetTranslatorConn is a helful type which can be used with various tests
+// packetTranslatorConn is a helpful type which can be used with various tests
// in this package. It implements the net.Conn interface to be used as an
// underlying connection for a *ldap.Conn. Most methods are no-ops but the
// Read() and Write() methods are able to translate ber-encoded packets for
@@ -245,7 +241,7 @@ func (c *packetTranslatorConn) Read(b []byte) (n int, err error) {
}
// SendResponse writes the given response packet to the response buffer for
-// this conection, signalling any goroutine waiting to read a response.
+// this connection, signalling any goroutine waiting to read a response.
func (c *packetTranslatorConn) SendResponse(packet *ber.Packet) error {
c.lock.Lock()
defer c.lock.Unlock()
diff --git a/vendor/github.com/go-ldap/ldap/debug.go b/vendor/github.com/go-ldap/ldap/debug.go
index b8a7ecbff..7279fc251 100644
--- a/vendor/github.com/go-ldap/ldap/debug.go
+++ b/vendor/github.com/go-ldap/ldap/debug.go
@@ -6,7 +6,7 @@ import (
"gopkg.in/asn1-ber.v1"
)
-// debbuging type
+// debugging type
// - has a Printf method to write the debug output
type debugging bool
diff --git a/vendor/github.com/go-ldap/ldap/dn.go b/vendor/github.com/go-ldap/ldap/dn.go
index a8ece3142..34e9023af 100644
--- a/vendor/github.com/go-ldap/ldap/dn.go
+++ b/vendor/github.com/go-ldap/ldap/dn.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
-// File contains DN parsing functionallity
+// File contains DN parsing functionality
//
// https://tools.ietf.org/html/rfc4514
//
@@ -52,7 +52,7 @@ import (
"fmt"
"strings"
- ber "gopkg.in/asn1-ber.v1"
+ "gopkg.in/asn1-ber.v1"
)
// AttributeTypeAndValue represents an attributeTypeAndValue from https://tools.ietf.org/html/rfc4514
@@ -143,6 +143,9 @@ func ParseDN(str string) (*DN, error) {
}
} else if char == ',' || char == '+' {
// We're done with this RDN or value, push it
+ if len(attribute.Type) == 0 {
+ return nil, errors.New("incomplete type, value pair")
+ }
attribute.Value = stringFromBuffer()
rdn.Attributes = append(rdn.Attributes, attribute)
attribute = new(AttributeTypeAndValue)
diff --git a/vendor/github.com/go-ldap/ldap/dn_test.go b/vendor/github.com/go-ldap/ldap/dn_test.go
index 5055cc15b..af5fc1468 100644
--- a/vendor/github.com/go-ldap/ldap/dn_test.go
+++ b/vendor/github.com/go-ldap/ldap/dn_test.go
@@ -75,11 +75,13 @@ func TestSuccessfulDNParsing(t *testing.T) {
func TestErrorDNParsing(t *testing.T) {
testcases := map[string]string{
- "*": "DN ended with incomplete type, value pair",
- "cn=Jim\\0Test": "Failed to decode escaped character: encoding/hex: invalid byte: U+0054 'T'",
- "cn=Jim\\0": "Got corrupted escaped character",
- "DC=example,=net": "DN ended with incomplete type, value pair",
- "1=#0402486": "Failed to decode BER encoding: encoding/hex: odd length hex string",
+ "*": "DN ended with incomplete type, value pair",
+ "cn=Jim\\0Test": "Failed to decode escaped character: encoding/hex: invalid byte: U+0054 'T'",
+ "cn=Jim\\0": "Got corrupted escaped character",
+ "DC=example,=net": "DN ended with incomplete type, value pair",
+ "1=#0402486": "Failed to decode BER encoding: encoding/hex: odd length hex string",
+ "test,DC=example,DC=com": "incomplete type, value pair",
+ "=test,DC=example,DC=com": "incomplete type, value pair",
}
for test, answer := range testcases {
diff --git a/vendor/github.com/go-ldap/ldap/error.go b/vendor/github.com/go-ldap/ldap/error.go
index ff697873d..4cccb537f 100644
--- a/vendor/github.com/go-ldap/ldap/error.go
+++ b/vendor/github.com/go-ldap/ldap/error.go
@@ -97,6 +97,13 @@ var LDAPResultCodeMap = map[uint8]string{
LDAPResultObjectClassModsProhibited: "Object Class Mods Prohibited",
LDAPResultAffectsMultipleDSAs: "Affects Multiple DSAs",
LDAPResultOther: "Other",
+
+ ErrorNetwork: "Network Error",
+ ErrorFilterCompile: "Filter Compile Error",
+ ErrorFilterDecompile: "Filter Decompile Error",
+ ErrorDebugging: "Debugging Error",
+ ErrorUnexpectedMessage: "Unexpected Message",
+ ErrorUnexpectedResponse: "Unexpected Response",
}
func getLDAPResultCode(packet *ber.Packet) (code uint8, description string) {
diff --git a/vendor/github.com/go-ldap/ldap/error_test.go b/vendor/github.com/go-ldap/ldap/error_test.go
index c010ebe3e..e456431bd 100644
--- a/vendor/github.com/go-ldap/ldap/error_test.go
+++ b/vendor/github.com/go-ldap/ldap/error_test.go
@@ -49,7 +49,7 @@ func TestConnReadErr(t *testing.T) {
// Send the signal after a short amount of time.
time.AfterFunc(10*time.Millisecond, func() { conn.signals <- expectedError })
- // This should block until the underlyiny conn gets the error signal
+ // This should block until the underlying conn gets the error signal
// which should bubble up through the reader() goroutine, close the
// connection, and
_, err := ldapConn.Search(searchReq)
@@ -58,7 +58,7 @@ func TestConnReadErr(t *testing.T) {
}
}
-// signalErrConn is a helful type used with TestConnReadErr. It implements the
+// signalErrConn is a helpful type used with TestConnReadErr. It implements the
// net.Conn interface to be used as a connection for the test. Most methods are
// no-ops but the Read() method blocks until it receives a signal which it
// returns as an error.
diff --git a/vendor/github.com/go-ldap/ldap/example_test.go b/vendor/github.com/go-ldap/ldap/example_test.go
index b018a9664..650af0a43 100644
--- a/vendor/github.com/go-ldap/ldap/example_test.go
+++ b/vendor/github.com/go-ldap/ldap/example_test.go
@@ -9,7 +9,7 @@ import (
)
// ExampleConn_Bind demonstrates how to bind a connection to an ldap user
-// allowing access to restricted attrabutes that user has access to
+// allowing access to restricted attributes that user has access to
func ExampleConn_Bind() {
l, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", "ldap.example.com", 389))
if err != nil {
@@ -63,10 +63,10 @@ func ExampleConn_StartTLS() {
log.Fatal(err)
}
- // Opertations via l are now encrypted
+ // Operations via l are now encrypted
}
-// ExampleConn_Compare demonstrates how to comapre an attribute with a value
+// ExampleConn_Compare demonstrates how to compare an attribute with a value
func ExampleConn_Compare() {
l, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", "ldap.example.com", 389))
if err != nil {
@@ -193,7 +193,7 @@ func Example_userAuthentication() {
searchRequest := ldap.NewSearchRequest(
"dc=example,dc=com",
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
- fmt.Sprintf("(&(objectClass=organizationalPerson)&(uid=%s))", username),
+ fmt.Sprintf("(&(objectClass=organizationalPerson)(uid=%s))", username),
[]string{"dn"},
nil,
)
@@ -215,7 +215,7 @@ func Example_userAuthentication() {
log.Fatal(err)
}
- // Rebind as the read only user for any futher queries
+ // Rebind as the read only user for any further queries
err = l.Bind(bindusername, bindpassword)
if err != nil {
log.Fatal(err)
@@ -240,7 +240,7 @@ func Example_beherappolicy() {
if ppolicyControl != nil {
ppolicy = ppolicyControl.(*ldap.ControlBeheraPasswordPolicy)
} else {
- log.Printf("ppolicyControl response not avaliable.\n")
+ log.Printf("ppolicyControl response not available.\n")
}
if err != nil {
errStr := "ERROR: Cannot bind: " + err.Error()
diff --git a/vendor/github.com/go-ldap/ldap/filter.go b/vendor/github.com/go-ldap/ldap/filter.go
index 7eae310f1..3858a2865 100644
--- a/vendor/github.com/go-ldap/ldap/filter.go
+++ b/vendor/github.com/go-ldap/ldap/filter.go
@@ -82,7 +82,10 @@ func CompileFilter(filter string) (*ber.Packet, error) {
if err != nil {
return nil, err
}
- if pos != len(filter) {
+ switch {
+ case pos > len(filter):
+ return nil, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
+ case pos < len(filter):
return nil, NewError(ErrorFilterCompile, errors.New("ldap: finished compiling filter with extra at end: "+fmt.Sprint(filter[pos:])))
}
return packet, nil
diff --git a/vendor/github.com/go-ldap/ldap/filter_test.go b/vendor/github.com/go-ldap/ldap/filter_test.go
index ae1b79b0c..2b019ac5d 100644
--- a/vendor/github.com/go-ldap/ldap/filter_test.go
+++ b/vendor/github.com/go-ldap/ldap/filter_test.go
@@ -132,6 +132,12 @@ var testFilters = []compileTest{
expectedErr: "unexpected end of filter",
},
compileTest{
+ filterStr: `((cn=)`,
+ expectedFilter: ``,
+ expectedType: 0,
+ expectedErr: "unexpected end of filter",
+ },
+ compileTest{
filterStr: `(&(objectclass=inetorgperson)(cn=中文))`,
expectedFilter: `(&(objectclass=inetorgperson)(cn=\e4\b8\ad\e6\96\87))`,
expectedType: 0,
diff --git a/vendor/github.com/go-ldap/ldap/ldap.go b/vendor/github.com/go-ldap/ldap/ldap.go
index d27e639d0..496924756 100644
--- a/vendor/github.com/go-ldap/ldap/ldap.go
+++ b/vendor/github.com/go-ldap/ldap/ldap.go
@@ -9,7 +9,7 @@ import (
"io/ioutil"
"os"
- ber "gopkg.in/asn1-ber.v1"
+ "gopkg.in/asn1-ber.v1"
)
// LDAP Application Codes
diff --git a/vendor/github.com/go-ldap/ldap/passwdmodify.go b/vendor/github.com/go-ldap/ldap/passwdmodify.go
index 26110ccf4..7d8246fd1 100644
--- a/vendor/github.com/go-ldap/ldap/passwdmodify.go
+++ b/vendor/github.com/go-ldap/ldap/passwdmodify.go
@@ -135,10 +135,10 @@ func (l *Conn) PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*Pa
extendedResponse := packet.Children[1]
for _, child := range extendedResponse.Children {
if child.Tag == 11 {
- passwordModifyReponseValue := ber.DecodePacket(child.Data.Bytes())
- if len(passwordModifyReponseValue.Children) == 1 {
- if passwordModifyReponseValue.Children[0].Tag == 0 {
- result.GeneratedPassword = ber.DecodeString(passwordModifyReponseValue.Children[0].Data.Bytes())
+ passwordModifyResponseValue := ber.DecodePacket(child.Data.Bytes())
+ if len(passwordModifyResponseValue.Children) == 1 {
+ if passwordModifyResponseValue.Children[0].Tag == 0 {
+ result.GeneratedPassword = ber.DecodeString(passwordModifyResponseValue.Children[0].Data.Bytes())
}
}
}
diff --git a/vendor/github.com/go-ldap/ldap/search_test.go b/vendor/github.com/go-ldap/ldap/search_test.go
index efb8147d1..5f77b22e9 100644
--- a/vendor/github.com/go-ldap/ldap/search_test.go
+++ b/vendor/github.com/go-ldap/ldap/search_test.go
@@ -15,7 +15,7 @@ func TestNewEntry(t *testing.T) {
"delta": {"value"},
"epsilon": {"value"},
}
- exectedEntry := NewEntry(dn, attributes)
+ executedEntry := NewEntry(dn, attributes)
iteration := 0
for {
@@ -23,8 +23,8 @@ func TestNewEntry(t *testing.T) {
break
}
testEntry := NewEntry(dn, attributes)
- if !reflect.DeepEqual(exectedEntry, testEntry) {
- t.Fatalf("consequent calls to NewEntry did not yield the same result:\n\texpected:\n\t%s\n\tgot:\n\t%s\n", exectedEntry, testEntry)
+ if !reflect.DeepEqual(executedEntry, testEntry) {
+ t.Fatalf("subsequent calls to NewEntry did not yield the same result:\n\texpected:\n\t%s\n\tgot:\n\t%s\n", executedEntry, testEntry)
}
iteration = iteration + 1
}
diff --git a/vendor/github.com/go-redis/redis/.travis.yml b/vendor/github.com/go-redis/redis/.travis.yml
index f49927ee8..c95b3e6c6 100644
--- a/vendor/github.com/go-redis/redis/.travis.yml
+++ b/vendor/github.com/go-redis/redis/.travis.yml
@@ -5,7 +5,6 @@ services:
- redis-server
go:
- - 1.4.x
- 1.7.x
- 1.8.x
- 1.9.x
@@ -13,7 +12,6 @@ go:
matrix:
allow_failures:
- - go: 1.4.x
- go: tip
install:
diff --git a/vendor/github.com/go-redis/redis/README.md b/vendor/github.com/go-redis/redis/README.md
index 0a2a67124..9f349764a 100644
--- a/vendor/github.com/go-redis/redis/README.md
+++ b/vendor/github.com/go-redis/redis/README.md
@@ -2,6 +2,7 @@
[![Build Status](https://travis-ci.org/go-redis/redis.png?branch=master)](https://travis-ci.org/go-redis/redis)
[![GoDoc](https://godoc.org/github.com/go-redis/redis?status.svg)](https://godoc.org/github.com/go-redis/redis)
+[![Airbrake](https://img.shields.io/badge/kudos-airbrake.io-orange.svg)](https://airbrake.io)
Supports:
@@ -66,14 +67,14 @@ func ExampleClient() {
val2, err := client.Get("key2").Result()
if err == redis.Nil {
- fmt.Println("key2 does not exists")
+ fmt.Println("key2 does not exist")
} else if err != nil {
panic(err)
} else {
fmt.Println("key2", val2)
}
// Output: key value
- // key2 does not exists
+ // key2 does not exist
}
```
diff --git a/vendor/github.com/go-redis/redis/cluster.go b/vendor/github.com/go-redis/redis/cluster.go
index c81fc1d57..accdb3d27 100644
--- a/vendor/github.com/go-redis/redis/cluster.go
+++ b/vendor/github.com/go-redis/redis/cluster.go
@@ -226,7 +226,7 @@ func (c *clusterNodes) NextGeneration() uint32 {
}
// GC removes unused nodes.
-func (c *clusterNodes) GC(generation uint32) error {
+func (c *clusterNodes) GC(generation uint32) {
var collected []*clusterNode
c.mu.Lock()
for i := 0; i < len(c.addrs); {
@@ -243,14 +243,11 @@ func (c *clusterNodes) GC(generation uint32) error {
}
c.mu.Unlock()
- var firstErr error
- for _, node := range collected {
- if err := node.Client.Close(); err != nil && firstErr == nil {
- firstErr = err
+ time.AfterFunc(time.Minute, func() {
+ for _, node := range collected {
+ _ = node.Client.Close()
}
- }
-
- return firstErr
+ })
}
func (c *clusterNodes) All() ([]*clusterNode, error) {
@@ -533,16 +530,22 @@ func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
return info
}
+func cmdSlot(cmd Cmder, pos int) int {
+ if pos == 0 {
+ return hashtag.RandomSlot()
+ }
+ firstKey := cmd.stringArg(pos)
+ return hashtag.Slot(firstKey)
+}
+
func (c *ClusterClient) cmdSlot(cmd Cmder) int {
cmdInfo := c.cmdInfo(cmd.Name())
- firstKey := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
- return hashtag.Slot(firstKey)
+ return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo))
}
func (c *ClusterClient) cmdSlotAndNode(state *clusterState, cmd Cmder) (int, *clusterNode, error) {
cmdInfo := c.cmdInfo(cmd.Name())
- firstKey := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
- slot := hashtag.Slot(firstKey)
+ slot := cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo))
if cmdInfo != nil && cmdInfo.ReadOnly && c.opt.ReadOnly {
if c.opt.RouteByLatency {
@@ -590,6 +593,10 @@ func (c *ClusterClient) Watch(fn func(*Tx) error, keys ...string) error {
break
}
+ if internal.IsRetryableError(err, true) {
+ continue
+ }
+
moved, ask, addr := internal.IsMovedError(err)
if moved || ask {
c.lazyReloadState()
@@ -600,6 +607,13 @@ func (c *ClusterClient) Watch(fn func(*Tx) error, keys ...string) error {
continue
}
+ if err == pool.ErrClosed {
+ node, err = state.slotMasterNode(slot)
+ if err != nil {
+ return err
+ }
+ }
+
return err
}
@@ -635,10 +649,10 @@ func (c *ClusterClient) Process(cmd Cmder) error {
if ask {
pipe := node.Client.Pipeline()
- pipe.Process(NewCmd("ASKING"))
- pipe.Process(cmd)
+ _ = pipe.Process(NewCmd("ASKING"))
+ _ = pipe.Process(cmd)
_, err = pipe.Exec()
- pipe.Close()
+ _ = pipe.Close()
ask = false
} else {
err = node.Client.Process(cmd)
@@ -679,6 +693,14 @@ func (c *ClusterClient) Process(cmd Cmder) error {
continue
}
+ if err == pool.ErrClosed {
+ _, node, err = c.cmdSlotAndNode(state, cmd)
+ if err != nil {
+ cmd.setErr(err)
+ return err
+ }
+ }
+
break
}
@@ -915,7 +937,11 @@ func (c *ClusterClient) pipelineExec(cmds []Cmder) error {
for node, cmds := range cmdsMap {
cn, _, err := node.Client.getConn()
if err != nil {
- setCmdsErr(cmds, err)
+ if err == pool.ErrClosed {
+ c.remapCmds(cmds, failedCmds)
+ } else {
+ setCmdsErr(cmds, err)
+ }
continue
}
@@ -955,6 +981,18 @@ func (c *ClusterClient) mapCmdsByNode(cmds []Cmder) (map[*clusterNode][]Cmder, e
return cmdsMap, nil
}
+func (c *ClusterClient) remapCmds(cmds []Cmder, failedCmds map[*clusterNode][]Cmder) {
+ remappedCmds, err := c.mapCmdsByNode(cmds)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return
+ }
+
+ for node, cmds := range remappedCmds {
+ failedCmds[node] = cmds
+ }
+}
+
func (c *ClusterClient) pipelineProcessCmds(
node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder,
) error {
@@ -1061,7 +1099,11 @@ func (c *ClusterClient) txPipelineExec(cmds []Cmder) error {
for node, cmds := range cmdsMap {
cn, _, err := node.Client.getConn()
if err != nil {
- setCmdsErr(cmds, err)
+ if err == pool.ErrClosed {
+ c.remapCmds(cmds, failedCmds)
+ } else {
+ setCmdsErr(cmds, err)
+ }
continue
}
diff --git a/vendor/github.com/go-redis/redis/cluster_test.go b/vendor/github.com/go-redis/redis/cluster_test.go
index 6f3677b93..43f3261bc 100644
--- a/vendor/github.com/go-redis/redis/cluster_test.go
+++ b/vendor/github.com/go-redis/redis/cluster_test.go
@@ -536,6 +536,32 @@ var _ = Describe("ClusterClient", func() {
Expect(nodesList).Should(HaveLen(1))
})
+ It("should RANDOMKEY", func() {
+ const nkeys = 100
+
+ for i := 0; i < nkeys; i++ {
+ err := client.Set(fmt.Sprintf("key%d", i), "value", 0).Err()
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ var keys []string
+ addKey := func(key string) {
+ for _, k := range keys {
+ if k == key {
+ return
+ }
+ }
+ keys = append(keys, key)
+ }
+
+ for i := 0; i < nkeys*10; i++ {
+ key := client.RandomKey().Val()
+ addKey(key)
+ }
+
+ Expect(len(keys)).To(BeNumerically("~", nkeys, nkeys/10))
+ })
+
assertClusterClient()
})
diff --git a/vendor/github.com/go-redis/redis/command.go b/vendor/github.com/go-redis/redis/command.go
index d2688082a..598ed9800 100644
--- a/vendor/github.com/go-redis/redis/command.go
+++ b/vendor/github.com/go-redis/redis/command.go
@@ -82,13 +82,13 @@ func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
if cmd.stringArg(2) != "0" {
return 3
} else {
- return -1
+ return 0
}
case "publish":
return 1
}
if info == nil {
- return -1
+ return 0
}
return int(info.FirstKeyPos)
}
@@ -675,6 +675,44 @@ func (cmd *StringIntMapCmd) readReply(cn *pool.Conn) error {
//------------------------------------------------------------------------------
+type StringStructMapCmd struct {
+ baseCmd
+
+ val map[string]struct{}
+}
+
+var _ Cmder = (*StringStructMapCmd)(nil)
+
+func NewStringStructMapCmd(args ...interface{}) *StringStructMapCmd {
+ return &StringStructMapCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *StringStructMapCmd) Val() map[string]struct{} {
+ return cmd.val
+}
+
+func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringStructMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStructMapCmd) readReply(cn *pool.Conn) error {
+ var v interface{}
+ v, cmd.err = cn.Rd.ReadArrayReply(stringStructMapParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.(map[string]struct{})
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
type ZSliceCmd struct {
baseCmd
diff --git a/vendor/github.com/go-redis/redis/commands.go b/vendor/github.com/go-redis/redis/commands.go
index c04b3c49b..569342cfa 100644
--- a/vendor/github.com/go-redis/redis/commands.go
+++ b/vendor/github.com/go-redis/redis/commands.go
@@ -143,6 +143,7 @@ type Cmdable interface {
SInterStore(destination string, keys ...string) *IntCmd
SIsMember(key string, member interface{}) *BoolCmd
SMembers(key string) *StringSliceCmd
+ SMembersMap(key string) *StringStructMapCmd
SMove(source, destination string, member interface{}) *BoolCmd
SPop(key string) *StringCmd
SPopN(key string, count int64) *StringSliceCmd
@@ -676,6 +677,7 @@ func (c *cmdable) DecrBy(key string, decrement int64) *IntCmd {
return cmd
}
+// Redis `GET key` command. It returns redis.Nil error when key does not exist.
func (c *cmdable) Get(key string) *StringCmd {
cmd := NewStringCmd("get", key)
c.process(cmd)
@@ -1163,12 +1165,20 @@ func (c *cmdable) SIsMember(key string, member interface{}) *BoolCmd {
return cmd
}
+// Redis `SMEMBERS key` command output as a slice
func (c *cmdable) SMembers(key string) *StringSliceCmd {
cmd := NewStringSliceCmd("smembers", key)
c.process(cmd)
return cmd
}
+// Redis `SMEMBERS key` command output as a map
+func (c *cmdable) SMembersMap(key string) *StringStructMapCmd {
+ cmd := NewStringStructMapCmd("smembers", key)
+ c.process(cmd)
+ return cmd
+}
+
func (c *cmdable) SMove(source, destination string, member interface{}) *BoolCmd {
cmd := NewBoolCmd("smove", source, destination, member)
c.process(cmd)
diff --git a/vendor/github.com/go-redis/redis/commands_test.go b/vendor/github.com/go-redis/redis/commands_test.go
index 6b81f23cf..715379556 100644
--- a/vendor/github.com/go-redis/redis/commands_test.go
+++ b/vendor/github.com/go-redis/redis/commands_test.go
@@ -1848,6 +1848,17 @@ var _ = Describe("Commands", func() {
Expect(sMembers.Val()).To(ConsistOf([]string{"Hello", "World"}))
})
+ It("should SMembersMap", func() {
+ sAdd := client.SAdd("set", "Hello")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+ sAdd = client.SAdd("set", "World")
+ Expect(sAdd.Err()).NotTo(HaveOccurred())
+
+ sMembersMap := client.SMembersMap("set")
+ Expect(sMembersMap.Err()).NotTo(HaveOccurred())
+ Expect(sMembersMap.Val()).To(Equal(map[string]struct{}{"Hello": struct{}{}, "World": struct{}{}}))
+ })
+
It("should SMove", func() {
sAdd := client.SAdd("set1", "one")
Expect(sAdd.Err()).NotTo(HaveOccurred())
diff --git a/vendor/github.com/go-redis/redis/example_test.go b/vendor/github.com/go-redis/redis/example_test.go
index 7e04cd487..4d18ddb94 100644
--- a/vendor/github.com/go-redis/redis/example_test.go
+++ b/vendor/github.com/go-redis/redis/example_test.go
@@ -96,14 +96,14 @@ func ExampleClient() {
val2, err := client.Get("key2").Result()
if err == redis.Nil {
- fmt.Println("key2 does not exists")
+ fmt.Println("key2 does not exist")
} else if err != nil {
panic(err)
} else {
fmt.Println("key2", val2)
}
// Output: key value
- // key2 does not exists
+ // key2 does not exist
}
func ExampleClient_Set() {
diff --git a/vendor/github.com/go-redis/redis/internal/hashtag/hashtag.go b/vendor/github.com/go-redis/redis/internal/hashtag/hashtag.go
index 2866488e5..8c7ebbfa6 100644
--- a/vendor/github.com/go-redis/redis/internal/hashtag/hashtag.go
+++ b/vendor/github.com/go-redis/redis/internal/hashtag/hashtag.go
@@ -55,13 +55,17 @@ func Key(key string) string {
return key
}
+func RandomSlot() int {
+ return rand.Intn(SlotNumber)
+}
+
// hashSlot returns a consistent slot number between 0 and 16383
// for any given string key.
func Slot(key string) int {
- key = Key(key)
if key == "" {
- return rand.Intn(SlotNumber)
+ return RandomSlot()
}
+ key = Key(key)
return int(crc16sum(key)) % SlotNumber
}
diff --git a/vendor/github.com/go-redis/redis/internal/proto/scan.go b/vendor/github.com/go-redis/redis/internal/proto/scan.go
index 03c8b59aa..0329ffd99 100644
--- a/vendor/github.com/go-redis/redis/internal/proto/scan.go
+++ b/vendor/github.com/go-redis/redis/internal/proto/scan.go
@@ -123,8 +123,9 @@ func ScanSlice(data []string, slice interface{}) error {
next := internal.MakeSliceNextElemFunc(v)
for i, s := range data {
elem := next()
- if err := Scan(internal.StringToBytes(s), elem.Addr().Interface()); err != nil {
- return fmt.Errorf("redis: ScanSlice(index=%d value=%q) failed: %s", i, s, err)
+ if err := Scan([]byte(s), elem.Addr().Interface()); err != nil {
+ err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %s", i, s, err)
+ return err
}
}
diff --git a/vendor/github.com/go-redis/redis/internal/safe.go b/vendor/github.com/go-redis/redis/internal/safe.go
index 870fe541f..dc5f4cc8a 100644
--- a/vendor/github.com/go-redis/redis/internal/safe.go
+++ b/vendor/github.com/go-redis/redis/internal/safe.go
@@ -5,7 +5,3 @@ package internal
func BytesToString(b []byte) string {
return string(b)
}
-
-func StringToBytes(s string) []byte {
- return []byte(s)
-}
diff --git a/vendor/github.com/go-redis/redis/internal/unsafe.go b/vendor/github.com/go-redis/redis/internal/unsafe.go
index c18b25c17..3ae48c14b 100644
--- a/vendor/github.com/go-redis/redis/internal/unsafe.go
+++ b/vendor/github.com/go-redis/redis/internal/unsafe.go
@@ -3,25 +3,10 @@
package internal
import (
- "reflect"
"unsafe"
)
+// BytesToString converts byte slice to string.
func BytesToString(b []byte) string {
- bytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- strHeader := reflect.StringHeader{
- Data: bytesHeader.Data,
- Len: bytesHeader.Len,
- }
- return *(*string)(unsafe.Pointer(&strHeader))
-}
-
-func StringToBytes(s string) []byte {
- sh := (*reflect.StringHeader)(unsafe.Pointer(&s))
- bh := reflect.SliceHeader{
- Data: sh.Data,
- Len: sh.Len,
- Cap: sh.Len,
- }
- return *(*[]byte)(unsafe.Pointer(&bh))
+ return *(*string)(unsafe.Pointer(&b))
}
diff --git a/vendor/github.com/go-redis/redis/options_test.go b/vendor/github.com/go-redis/redis/options_test.go
index 6a4af7169..211f6b195 100644
--- a/vendor/github.com/go-redis/redis/options_test.go
+++ b/vendor/github.com/go-redis/redis/options_test.go
@@ -71,7 +71,7 @@ func TestParseURL(t *testing.T) {
t.Run(c.u, func(t *testing.T) {
o, err := ParseURL(c.u)
if c.err == nil && err != nil {
- t.Fatalf("unexpected error: '%q'", err)
+ t.Fatalf("unexpected error: %q", err)
return
}
if c.err != nil && err != nil {
diff --git a/vendor/github.com/go-redis/redis/parser.go b/vendor/github.com/go-redis/redis/parser.go
index 1d7ec630e..b378abc4e 100644
--- a/vendor/github.com/go-redis/redis/parser.go
+++ b/vendor/github.com/go-redis/redis/parser.go
@@ -98,6 +98,20 @@ func stringIntMapParser(rd *proto.Reader, n int64) (interface{}, error) {
}
// Implements proto.MultiBulkParse
+func stringStructMapParser(rd *proto.Reader, n int64) (interface{}, error) {
+ m := make(map[string]struct{}, n)
+ for i := int64(0); i < n; i++ {
+ key, err := rd.ReadStringReply()
+ if err != nil {
+ return nil, err
+ }
+
+ m[key] = struct{}{}
+ }
+ return m, nil
+}
+
+// Implements proto.MultiBulkParse
func zSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
zz := make([]Z, n/2)
for i := int64(0); i < n; i += 2 {
diff --git a/vendor/github.com/go-redis/redis/redis.go b/vendor/github.com/go-redis/redis/redis.go
index 230091b3e..37ffafd97 100644
--- a/vendor/github.com/go-redis/redis/redis.go
+++ b/vendor/github.com/go-redis/redis/redis.go
@@ -11,7 +11,7 @@ import (
"github.com/go-redis/redis/internal/proto"
)
-// Redis nil reply, .e.g. when key does not exist.
+// Redis nil reply returned when key does not exist.
const Nil = internal.Nil
func init() {
diff --git a/vendor/github.com/go-redis/redis/ring.go b/vendor/github.com/go-redis/redis/ring.go
index a30c32102..c11ef6bc2 100644
--- a/vendor/github.com/go-redis/redis/ring.go
+++ b/vendor/github.com/go-redis/redis/ring.go
@@ -298,6 +298,9 @@ func (c *Ring) cmdInfo(name string) *CommandInfo {
if err != nil {
return nil
}
+ if c.cmdsInfo == nil {
+ return nil
+ }
info := c.cmdsInfo[name]
if info == nil {
internal.Logf("info for cmd=%s not found", name)
@@ -343,7 +346,11 @@ func (c *Ring) shardByName(name string) (*ringShard, error) {
func (c *Ring) cmdShard(cmd Cmder) (*ringShard, error) {
cmdInfo := c.cmdInfo(cmd.Name())
- firstKey := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
+ pos := cmdFirstKeyPos(cmd, cmdInfo)
+ if pos == 0 {
+ return c.randomShard()
+ }
+ firstKey := cmd.stringArg(pos)
return c.shardByKey(firstKey)
}
diff --git a/vendor/github.com/go-redis/redis/universal.go b/vendor/github.com/go-redis/redis/universal.go
index 29eb12b18..ea42f6984 100644
--- a/vendor/github.com/go-redis/redis/universal.go
+++ b/vendor/github.com/go-redis/redis/universal.go
@@ -114,6 +114,8 @@ func (o *UniversalOptions) simple() *Options {
type UniversalClient interface {
Cmdable
Process(cmd Cmder) error
+ Subscribe(channels ...string) *PubSub
+ PSubscribe(channels ...string) *PubSub
Close() error
}
diff --git a/vendor/github.com/go-sql-driver/mysql/.gitignore b/vendor/github.com/go-sql-driver/mysql/.gitignore
index ba8e0cb3a..2de28da16 100644
--- a/vendor/github.com/go-sql-driver/mysql/.gitignore
+++ b/vendor/github.com/go-sql-driver/mysql/.gitignore
@@ -6,3 +6,4 @@
Icon?
ehthumbs.db
Thumbs.db
+.idea
diff --git a/vendor/github.com/go-sql-driver/mysql/.travis.yml b/vendor/github.com/go-sql-driver/mysql/.travis.yml
index c1cc10aaf..e922f9187 100644
--- a/vendor/github.com/go-sql-driver/mysql/.travis.yml
+++ b/vendor/github.com/go-sql-driver/mysql/.travis.yml
@@ -1,13 +1,92 @@
sudo: false
language: go
go:
- - 1.2
- - 1.3
- - 1.4
- - 1.5
- - 1.6
- - 1.7
- - tip
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
+ - master
+
+before_install:
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
before_script:
+ - echo -e "[server]\ninnodb_log_file_size=256MB\ninnodb_buffer_pool_size=512MB\nmax_allowed_packet=16MB" | sudo tee -a /etc/mysql/my.cnf
+ - sudo service mysql restart
+ - .travis/wait_mysql.sh
- mysql -e 'create database gotest;'
+
+matrix:
+ include:
+ - env: DB=MYSQL57
+ sudo: required
+ dist: trusty
+ go: 1.9.x
+ services:
+ - docker
+ before_install:
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+ - docker pull mysql:5.7
+ - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
+ mysql:5.7 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB
+ - sleep 30
+ - cp .travis/docker.cnf ~/.my.cnf
+ - mysql --print-defaults
+ - .travis/wait_mysql.sh
+ before_script:
+ - export MYSQL_TEST_USER=gotest
+ - export MYSQL_TEST_PASS=secret
+ - export MYSQL_TEST_ADDR=127.0.0.1:3307
+ - export MYSQL_TEST_CONCURRENT=1
+
+ - env: DB=MARIA55
+ sudo: required
+ dist: trusty
+ go: 1.9.x
+ services:
+ - docker
+ before_install:
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+ - docker pull mariadb:5.5
+ - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
+ mariadb:5.5 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB
+ - sleep 30
+ - cp .travis/docker.cnf ~/.my.cnf
+ - mysql --print-defaults
+ - .travis/wait_mysql.sh
+ before_script:
+ - export MYSQL_TEST_USER=gotest
+ - export MYSQL_TEST_PASS=secret
+ - export MYSQL_TEST_ADDR=127.0.0.1:3307
+ - export MYSQL_TEST_CONCURRENT=1
+
+ - env: DB=MARIA10_1
+ sudo: required
+ dist: trusty
+ go: 1.9.x
+ services:
+ - docker
+ before_install:
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+ - docker pull mariadb:10.1
+ - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
+ mariadb:10.1 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB
+ - sleep 30
+ - cp .travis/docker.cnf ~/.my.cnf
+ - mysql --print-defaults
+ - .travis/wait_mysql.sh
+ before_script:
+ - export MYSQL_TEST_USER=gotest
+ - export MYSQL_TEST_PASS=secret
+ - export MYSQL_TEST_ADDR=127.0.0.1:3307
+ - export MYSQL_TEST_CONCURRENT=1
+
+script:
+ - go test -v -covermode=count -coverprofile=coverage.out
+ - go vet ./...
+ - test -z "$(gofmt -d -s . | tee /dev/stderr)"
+after_script:
+ - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci
diff --git a/vendor/github.com/go-sql-driver/mysql/.travis/docker.cnf b/vendor/github.com/go-sql-driver/mysql/.travis/docker.cnf
new file mode 100644
index 000000000..e57754e5a
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/.travis/docker.cnf
@@ -0,0 +1,5 @@
+[client]
+user = gotest
+password = secret
+host = 127.0.0.1
+port = 3307
diff --git a/vendor/github.com/go-sql-driver/mysql/.travis/wait_mysql.sh b/vendor/github.com/go-sql-driver/mysql/.travis/wait_mysql.sh
new file mode 100755
index 000000000..abcf5f0ae
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/.travis/wait_mysql.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+while :
+do
+ sleep 3
+ if mysql -e 'select version()'; then
+ break
+ fi
+done
diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS
index 692c186fd..4702c83ab 100644
--- a/vendor/github.com/go-sql-driver/mysql/AUTHORS
+++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS
@@ -12,35 +12,58 @@
# Individual Persons
Aaron Hopkins <go-sql-driver at die.net>
+Achille Roussel <achille.roussel at gmail.com>
Arne Hormann <arnehormann at gmail.com>
+Asta Xie <xiemengjun at gmail.com>
+Bulat Gaifullin <gaifullinbf at gmail.com>
Carlos Nieto <jose.carlos at menteslibres.net>
Chris Moos <chris at tech9computers.com>
+Daniel Montoya <dsmontoyam at gmail.com>
Daniel Nichter <nil at codenode.com>
Daniël van Eeden <git at myname.nl>
+Dave Protasowski <dprotaso at gmail.com>
DisposaBoy <disposaboy at dby.me>
+Egor Smolyakov <egorsmkv at gmail.com>
+Evan Shaw <evan at vendhq.com>
Frederick Mayle <frederickmayle at gmail.com>
Gustavo Kristic <gkristic at gmail.com>
Hanno Braun <mail at hannobraun.com>
Henri Yandell <flamefew at gmail.com>
Hirotaka Yamamoto <ymmt2005 at gmail.com>
+ICHINOSE Shogo <shogo82148 at gmail.com>
INADA Naoki <songofacandy at gmail.com>
+Jacek Szwec <szwec.jacek at gmail.com>
James Harr <james.harr at gmail.com>
+Jeff Hodges <jeff at somethingsimilar.com>
+Jeffrey Charles <jeffreycharles at gmail.com>
Jian Zhen <zhenjl at gmail.com>
Joshua Prunier <joshua.prunier at gmail.com>
Julien Lefevre <julien.lefevr at gmail.com>
Julien Schmidt <go-sql-driver at julienschmidt.com>
+Justin Li <jli at j-li.net>
+Justin Nuß <nuss.justin at gmail.com>
Kamil Dziedzic <kamil at klecza.pl>
Kevin Malachowski <kevin at chowski.com>
+Kieron Woodhouse <kieron.woodhouse at infosum.com>
Lennart Rudolph <lrudolph at hmc.edu>
Leonardo YongUk Kim <dalinaum at gmail.com>
+Linh Tran Tuan <linhduonggnu at gmail.com>
+Lion Yang <lion at aosc.xyz>
Luca Looz <luca.looz92 at gmail.com>
Lucas Liu <extrafliu at gmail.com>
Luke Scott <luke at webconnex.com>
+Maciej Zimnoch <maciej.zimnoch@codilime.com>
Michael Woolnough <michael.woolnough at gmail.com>
Nicola Peduzzi <thenikso at gmail.com>
Olivier Mengué <dolmen at cpan.org>
+oscarzhao <oscarzhaosl at gmail.com>
Paul Bonser <misterpib at gmail.com>
+Peter Schultz <peter.schultz at classmarkets.com>
+Rebecca Chin <rchin at pivotal.io>
+Reed Allman <rdallman10 at gmail.com>
Runrioter Wung <runrioter at gmail.com>
+Robert Russell <robert at rrbrussell.com>
+Shuode Li <elemount at qq.com>
Soroush Pour <me at soroushjp.com>
Stan Putrya <root.vagner at gmail.com>
Stanley Gunawan <gunawan.stanley at gmail.com>
@@ -52,5 +75,9 @@ Zhenye Xie <xiezhenye at gmail.com>
# Organizations
Barracuda Networks, Inc.
+Counting Ltd.
Google Inc.
+InfoSum Ltd.
+Keybase Inc.
+Pivotal Inc.
Stripe Inc.
diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md
index a16012f81..299198d53 100644
--- a/vendor/github.com/go-sql-driver/mysql/README.md
+++ b/vendor/github.com/go-sql-driver/mysql/README.md
@@ -1,6 +1,6 @@
# Go-MySQL-Driver
-A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) package
+A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) package
![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin")
@@ -15,6 +15,9 @@ A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) packa
* [Address](#address)
* [Parameters](#parameters)
* [Examples](#examples)
+ * [Connection pool and timeouts](#connection-pool-and-timeouts)
+ * [context.Context Support](#contextcontext-support)
+ * [ColumnType Support](#columntype-support)
* [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
* [time.Time support](#timetime-support)
* [Unicode support](#unicode-support)
@@ -26,31 +29,31 @@ A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) packa
## Features
* Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
* Native Go implementation. No C-bindings, just pure Go
- * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](http://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
+ * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](https://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
* Automatic handling of broken connections
* Automatic Connection Pooling *(by database/sql package)*
* Supports queries larger than 16MB
- * Full [`sql.RawBytes`](http://golang.org/pkg/database/sql/#RawBytes) support.
+ * Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support.
* Intelligent `LONG DATA` handling in prepared statements
* Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support
* Optional `time.Time` parsing
* Optional placeholder interpolation
## Requirements
- * Go 1.2 or higher
+ * Go 1.7 or higher. We aim to support the 3 latest versions of Go.
* MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
---------------------------------------
## Installation
-Simple install the package to your [$GOPATH](http://code.google.com/p/go-wiki/wiki/GOPATH "GOPATH") with the [go tool](http://golang.org/cmd/go/ "go command") from shell:
+Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
```bash
-$ go get github.com/go-sql-driver/mysql
+$ go get -u github.com/go-sql-driver/mysql
```
-Make sure [Git is installed](http://git-scm.com/downloads) on your machine and in your system's `PATH`.
+Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
## Usage
-_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](http://golang.org/pkg/database/sql) API then.
+_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then.
Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
```go
@@ -95,13 +98,14 @@ Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mys
Passwords can consist of any character. Escaping is **not** necessary.
#### Protocol
-See [net.Dial](http://golang.org/pkg/net/#Dial) for more information which networks are available.
+See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available.
In general you should use an Unix domain socket if available and TCP otherwise for best performance.
#### Address
-For TCP and UDP networks, addresses have the form `host:port`.
+For TCP and UDP networks, addresses have the form `host[:port]`.
+If `port` is omitted, the default port will be used.
If `host` is a literal IPv6 address, it must be enclosed in square brackets.
-The functions [net.JoinHostPort](http://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](http://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
+The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`.
@@ -136,9 +140,9 @@ Default: false
```
Type: bool
Valid Values: true, false
-Default: false
+Default: true
```
-`allowNativePasswords=true` allows the usage of the mysql native password method.
+`allowNativePasswords=false` disallows the usage of MySQL native password method.
##### `allowOldPasswords`
@@ -220,19 +224,19 @@ Valid Values: <escaped name>
Default: UTC
```
-Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](http://golang.org/pkg/time/#LoadLocation) for details.
+Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](https://golang.org/pkg/time/#LoadLocation) for details.
Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
-Please keep in mind, that param values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
+Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
##### `maxAllowedPacket`
```
Type: decimal number
-Default: 0
+Default: 4194304
```
-Max packet size allowed in bytes. Use `maxAllowedPacket=0` to automatically fetch the `max_allowed_packet` variable from server.
+Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*.
##### `multiStatements`
@@ -260,13 +264,13 @@ Default: false
##### `readTimeout`
```
-Type: decimal number
+Type: duration
Default: 0
```
-I/O read timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
-##### `strict`
+##### `rejectReadOnly`
```
Type: bool
@@ -274,20 +278,37 @@ Valid Values: true, false
Default: false
```
-`strict=true` enables a driver-side strict mode in which MySQL warnings are treated as errors. This mode should not be used in production as it may lead to data corruption in certain situations.
-A server-side strict mode, which is safe for production use, can be set via the [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html) system variable.
+`rejectReadOnly=true` causes the driver to reject read-only connections. This
+is for a possible race condition during an automatic failover, where the mysql
+client gets connected to a read-only replica after the failover.
+
+Note that this should be a fairly rare case, as an automatic failover normally
+happens when the primary is down, and the race condition shouldn't happen
+unless it comes back up online as soon as the failover is kicked off. On the
+other hand, when this happens, a MySQL application can get stuck on a
+read-only connection until restarted. It is however fairly easy to reproduce,
+for example, using a manual failover on AWS Aurora's MySQL-compatible cluster.
+
+If you are not relying on read-only transactions to reject writes that aren't
+supposed to happen, setting this on some MySQL providers (such as AWS Aurora)
+is safer for failovers.
+
+Note that ERROR 1290 can be returned for a `read-only` server and this option will
+cause a retry for that error. However the same error number is used for some
+other cases. You should ensure your application will never cause an ERROR 1290
+except for `read-only` mode when enabling this option.
-By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes.
##### `timeout`
```
-Type: decimal number
+Type: duration
Default: OS default
```
-*Driver* side connection timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout).
+Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
##### `tls`
@@ -297,16 +318,17 @@ Valid Values: true, false, skip-verify, <name>
Default: false
```
-`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](http://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
+`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
+
##### `writeTimeout`
```
-Type: decimal number
+Type: duration
Default: 0
```
-I/O write timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
##### System Variables
@@ -317,9 +339,9 @@ Any other parameters are interpreted as system variables:
* `<string_var>=%27<value>%27`: `SET <string_var>='<value>'`
Rules:
-* The values for string variables must be quoted with '
+* The values for string variables must be quoted with `'`.
* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!
- (which implies values of string variables must be wrapped with `%27`)
+ (which implies values of string variables must be wrapped with `%27`).
Examples:
* `autocommit=1`: `SET autocommit=1`
@@ -380,6 +402,18 @@ No Database preselected:
user:password@/
```
+
+### Connection pool and timeouts
+The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
+
+## `ColumnType` Support
+This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported.
+
+## `context.Context` Support
+Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
+See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.
+
+
### `LOAD DATA LOCAL INFILE` support
For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
```go
@@ -390,17 +424,17 @@ Files must be whitelisted by registering them with `mysql.RegisterLocalFile(file
To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::<name>` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
-See the [godoc of Go-MySQL-Driver](http://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
+See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
### `time.Time` support
-The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your programm.
+The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program.
-However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](http://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
+However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
-Alternatively you can use the [`NullTime`](http://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
+Alternatively you can use the [`NullTime`](https://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
### Unicode support
@@ -412,7 +446,6 @@ Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAM
See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support.
-
## Testing / Development
To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
@@ -431,13 +464,13 @@ Mozilla summarizes the license scope as follows:
That means:
- * You can **use** the **unchanged** source code both in private and commercially
- * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0)
- * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**
+ * You can **use** the **unchanged** source code both in private and commercially.
+ * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0).
+ * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**.
-Please read the [MPL 2.0 FAQ](http://www.mozilla.org/MPL/2.0/FAQ.html) if you have further questions regarding the license.
+Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license.
-You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
+You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE).
![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow")
diff --git a/vendor/github.com/go-sql-driver/mysql/appengine.go b/vendor/github.com/go-sql-driver/mysql/appengine.go
index 565614eef..be41f2ee6 100644
--- a/vendor/github.com/go-sql-driver/mysql/appengine.go
+++ b/vendor/github.com/go-sql-driver/mysql/appengine.go
@@ -11,7 +11,7 @@
package mysql
import (
- "appengine/cloudsql"
+ "google.golang.org/appengine/cloudsql"
)
func init() {
diff --git a/vendor/github.com/go-sql-driver/mysql/benchmark_go18_test.go b/vendor/github.com/go-sql-driver/mysql/benchmark_go18_test.go
new file mode 100644
index 000000000..d6a7e9d6e
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/benchmark_go18_test.go
@@ -0,0 +1,93 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.8
+
+package mysql
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "runtime"
+ "testing"
+)
+
+func benchmarkQueryContext(b *testing.B, db *sql.DB, p int) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ db.SetMaxIdleConns(p * runtime.GOMAXPROCS(0))
+
+ tb := (*TB)(b)
+ stmt := tb.checkStmt(db.PrepareContext(ctx, "SELECT val FROM foo WHERE id=?"))
+ defer stmt.Close()
+
+ b.SetParallelism(p)
+ b.ReportAllocs()
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ var got string
+ for pb.Next() {
+ tb.check(stmt.QueryRow(1).Scan(&got))
+ if got != "one" {
+ b.Fatalf("query = %q; want one", got)
+ }
+ }
+ })
+}
+
+func BenchmarkQueryContext(b *testing.B) {
+ db := initDB(b,
+ "DROP TABLE IF EXISTS foo",
+ "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
+ `INSERT INTO foo VALUES (1, "one")`,
+ `INSERT INTO foo VALUES (2, "two")`,
+ )
+ defer db.Close()
+ for _, p := range []int{1, 2, 3, 4} {
+ b.Run(fmt.Sprintf("%d", p), func(b *testing.B) {
+ benchmarkQueryContext(b, db, p)
+ })
+ }
+}
+
+func benchmarkExecContext(b *testing.B, db *sql.DB, p int) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ db.SetMaxIdleConns(p * runtime.GOMAXPROCS(0))
+
+ tb := (*TB)(b)
+ stmt := tb.checkStmt(db.PrepareContext(ctx, "DO 1"))
+ defer stmt.Close()
+
+ b.SetParallelism(p)
+ b.ReportAllocs()
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if _, err := stmt.ExecContext(ctx); err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+
+func BenchmarkExecContext(b *testing.B) {
+ db := initDB(b,
+ "DROP TABLE IF EXISTS foo",
+ "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
+ `INSERT INTO foo VALUES (1, "one")`,
+ `INSERT INTO foo VALUES (2, "two")`,
+ )
+ defer db.Close()
+ for _, p := range []int{1, 2, 3, 4} {
+ b.Run(fmt.Sprintf("%d", p), func(b *testing.B) {
+ benchmarkQueryContext(b, db, p)
+ })
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/benchmark_test.go b/vendor/github.com/go-sql-driver/mysql/benchmark_test.go
index 7da833a2a..c1de8672b 100644
--- a/vendor/github.com/go-sql-driver/mysql/benchmark_test.go
+++ b/vendor/github.com/go-sql-driver/mysql/benchmark_test.go
@@ -48,11 +48,7 @@ func initDB(b *testing.B, queries ...string) *sql.DB {
db := tb.checkDB(sql.Open("mysql", dsn))
for _, query := range queries {
if _, err := db.Exec(query); err != nil {
- if w, ok := err.(MySQLWarnings); ok {
- b.Logf("warning on %q: %v", query, w)
- } else {
- b.Fatalf("error on %q: %v", query, err)
- }
+ b.Fatalf("error on %q: %v", query, err)
}
}
return db
diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go
index 82079cfb9..136c9e4d1 100644
--- a/vendor/github.com/go-sql-driver/mysql/collations.go
+++ b/vendor/github.com/go-sql-driver/mysql/collations.go
@@ -9,6 +9,7 @@
package mysql
const defaultCollation = "utf8_general_ci"
+const binaryCollation = "binary"
// A list of available collations mapped to the internal ID.
// To update this map use the following MySQL query:
diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go
index d82c728f3..e57061412 100644
--- a/vendor/github.com/go-sql-driver/mysql/connection.go
+++ b/vendor/github.com/go-sql-driver/mysql/connection.go
@@ -10,12 +10,23 @@ package mysql
import (
"database/sql/driver"
+ "io"
"net"
"strconv"
"strings"
"time"
)
+// a copy of context.Context for Go 1.7 and earlier
+type mysqlContext interface {
+ Done() <-chan struct{}
+ Err() error
+
+ // defined in context.Context, but not used in this driver:
+ // Deadline() (deadline time.Time, ok bool)
+ // Value(key interface{}) interface{}
+}
+
type mysqlConn struct {
buf buffer
netConn net.Conn
@@ -29,7 +40,14 @@ type mysqlConn struct {
status statusFlag
sequence uint8
parseTime bool
- strict bool
+
+ // for context support (Go 1.8+)
+ watching bool
+ watcher chan<- mysqlContext
+ closech chan struct{}
+ finished chan<- struct{}
+ canceled atomicError // set non-nil if conn is canceled
+ closed atomicBool // set when conn is closed, before closech is closed
}
// Handles parameters set in DSN after the connection is established
@@ -62,22 +80,41 @@ func (mc *mysqlConn) handleParams() (err error) {
return
}
+func (mc *mysqlConn) markBadConn(err error) error {
+ if mc == nil {
+ return err
+ }
+ if err != errBadConnNoWrite {
+ return err
+ }
+ return driver.ErrBadConn
+}
+
func (mc *mysqlConn) Begin() (driver.Tx, error) {
- if mc.netConn == nil {
+ return mc.begin(false)
+}
+
+func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) {
+ if mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
- err := mc.exec("START TRANSACTION")
+ var q string
+ if readOnly {
+ q = "START TRANSACTION READ ONLY"
+ } else {
+ q = "START TRANSACTION"
+ }
+ err := mc.exec(q)
if err == nil {
return &mysqlTx{mc}, err
}
-
- return nil, err
+ return nil, mc.markBadConn(err)
}
func (mc *mysqlConn) Close() (err error) {
// Makes Close idempotent
- if mc.netConn != nil {
+ if !mc.closed.IsSet() {
err = mc.writeCommandPacket(comQuit)
}
@@ -91,26 +128,39 @@ func (mc *mysqlConn) Close() (err error) {
// is called before auth or on auth failure because MySQL will have already
// closed the network connection.
func (mc *mysqlConn) cleanup() {
+ if !mc.closed.TrySet(true) {
+ return
+ }
+
// Makes cleanup idempotent
- if mc.netConn != nil {
- if err := mc.netConn.Close(); err != nil {
- errLog.Print(err)
+ close(mc.closech)
+ if mc.netConn == nil {
+ return
+ }
+ if err := mc.netConn.Close(); err != nil {
+ errLog.Print(err)
+ }
+}
+
+func (mc *mysqlConn) error() error {
+ if mc.closed.IsSet() {
+ if err := mc.canceled.Value(); err != nil {
+ return err
}
- mc.netConn = nil
+ return ErrInvalidConn
}
- mc.cfg = nil
- mc.buf.nc = nil
+ return nil
}
func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
- if mc.netConn == nil {
+ if mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
err := mc.writeCommandPacketStr(comStmtPrepare, query)
if err != nil {
- return nil, err
+ return nil, mc.markBadConn(err)
}
stmt := &mysqlStmt{
@@ -144,7 +194,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
if buf == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return "", driver.ErrBadConn
+ return "", ErrInvalidConn
}
buf = buf[:0]
argPos := 0
@@ -257,7 +307,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
}
func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
- if mc.netConn == nil {
+ if mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
@@ -271,7 +321,6 @@ func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, err
return nil, err
}
query = prepared
- args = nil
}
mc.affectedRows = 0
mc.insertId = 0
@@ -283,32 +332,43 @@ func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, err
insertId: int64(mc.insertId),
}, err
}
- return nil, err
+ return nil, mc.markBadConn(err)
}
// Internal function to execute commands
func (mc *mysqlConn) exec(query string) error {
// Send command
- err := mc.writeCommandPacketStr(comQuery, query)
- if err != nil {
- return err
+ if err := mc.writeCommandPacketStr(comQuery, query); err != nil {
+ return mc.markBadConn(err)
}
// Read Result
resLen, err := mc.readResultSetHeaderPacket()
- if err == nil && resLen > 0 {
- if err = mc.readUntilEOF(); err != nil {
+ if err != nil {
+ return err
+ }
+
+ if resLen > 0 {
+ // columns
+ if err := mc.readUntilEOF(); err != nil {
return err
}
- err = mc.readUntilEOF()
+ // rows
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
}
- return err
+ return mc.discardResults()
}
func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
- if mc.netConn == nil {
+ return mc.query(query, args)
+}
+
+func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) {
+ if mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
@@ -322,7 +382,6 @@ func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, erro
return nil, err
}
query = prepared
- args = nil
}
// Send command
err := mc.writeCommandPacketStr(comQuery, query)
@@ -335,15 +394,22 @@ func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, erro
rows.mc = mc
if resLen == 0 {
- // no columns, no more data
- return emptyRows{}, nil
+ rows.rs.done = true
+
+ switch err := rows.NextResultSet(); err {
+ case nil, io.EOF:
+ return rows, nil
+ default:
+ return nil, err
+ }
}
+
// Columns
- rows.columns, err = mc.readColumns(resLen)
+ rows.rs.columns, err = mc.readColumns(resLen)
return rows, err
}
}
- return nil, err
+ return nil, mc.markBadConn(err)
}
// Gets the value of the given MySQL System Variable
@@ -359,7 +425,7 @@ func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
if err == nil {
rows := new(textRows)
rows.mc = mc
- rows.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
+ rows.rs.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
if resLen > 0 {
// Columns
@@ -375,3 +441,21 @@ func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
}
return nil, err
}
+
+// finish is called when the query has canceled.
+func (mc *mysqlConn) cancel(err error) {
+ mc.canceled.Set(err)
+ mc.cleanup()
+}
+
+// finish is called when the query has succeeded.
+func (mc *mysqlConn) finish() {
+ if !mc.watching || mc.finished == nil {
+ return
+ }
+ select {
+ case mc.finished <- struct{}{}:
+ mc.watching = false
+ case <-mc.closech:
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connection_go18.go b/vendor/github.com/go-sql-driver/mysql/connection_go18.go
new file mode 100644
index 000000000..1306b70b7
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/connection_go18.go
@@ -0,0 +1,202 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.8
+
+package mysql
+
+import (
+ "context"
+ "database/sql"
+ "database/sql/driver"
+)
+
+// Ping implements driver.Pinger interface
+func (mc *mysqlConn) Ping(ctx context.Context) error {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return driver.ErrBadConn
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return err
+ }
+ defer mc.finish()
+
+ if err := mc.writeCommandPacket(comPing); err != nil {
+ return err
+ }
+ if _, err := mc.readResultOK(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// BeginTx implements driver.ConnBeginTx interface
+func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer mc.finish()
+
+ if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault {
+ level, err := mapIsolationLevel(opts.Isolation)
+ if err != nil {
+ return nil, err
+ }
+ err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return mc.begin(opts.ReadOnly)
+}
+
+func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ rows, err := mc.query(query, dargs)
+ if err != nil {
+ mc.finish()
+ return nil, err
+ }
+ rows.finish = mc.finish
+ return rows, err
+}
+
+func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer mc.finish()
+
+ return mc.Exec(query, dargs)
+}
+
+func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ stmt, err := mc.Prepare(query)
+ mc.finish()
+ if err != nil {
+ return nil, err
+ }
+
+ select {
+ default:
+ case <-ctx.Done():
+ stmt.Close()
+ return nil, ctx.Err()
+ }
+ return stmt, nil
+}
+
+func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := stmt.mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ rows, err := stmt.query(dargs)
+ if err != nil {
+ stmt.mc.finish()
+ return nil, err
+ }
+ rows.finish = stmt.mc.finish
+ return rows, err
+}
+
+func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := stmt.mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer stmt.mc.finish()
+
+ return stmt.Exec(dargs)
+}
+
+func (mc *mysqlConn) watchCancel(ctx context.Context) error {
+ if mc.watching {
+ // Reach here if canceled,
+ // so the connection is already invalid
+ mc.cleanup()
+ return nil
+ }
+ if ctx.Done() == nil {
+ return nil
+ }
+
+ mc.watching = true
+ select {
+ default:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ if mc.watcher == nil {
+ return nil
+ }
+
+ mc.watcher <- ctx
+
+ return nil
+}
+
+func (mc *mysqlConn) startWatcher() {
+ watcher := make(chan mysqlContext, 1)
+ mc.watcher = watcher
+ finished := make(chan struct{})
+ mc.finished = finished
+ go func() {
+ for {
+ var ctx mysqlContext
+ select {
+ case ctx = <-watcher:
+ case <-mc.closech:
+ return
+ }
+
+ select {
+ case <-ctx.Done():
+ mc.cancel(ctx.Err())
+ case <-finished:
+ case <-mc.closech:
+ return
+ }
+ }
+ }()
+}
+
+func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) {
+ nv.Value, err = converter{}.ConvertValue(nv.Value)
+ return
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connection_go18_test.go b/vendor/github.com/go-sql-driver/mysql/connection_go18_test.go
new file mode 100644
index 000000000..2719ab3b7
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/connection_go18_test.go
@@ -0,0 +1,30 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.8
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "testing"
+)
+
+func TestCheckNamedValue(t *testing.T) {
+ value := driver.NamedValue{Value: ^uint64(0)}
+ x := &mysqlConn{}
+ err := x.CheckNamedValue(&value)
+
+ if err != nil {
+ t.Fatal("uint64 high-bit not convertible", err)
+ }
+
+ if value.Value != "18446744073709551615" {
+ t.Fatalf("uint64 high-bit not converted, got %#v %T", value.Value, value.Value)
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go
index 88cfff3fd..4a19ca523 100644
--- a/vendor/github.com/go-sql-driver/mysql/const.go
+++ b/vendor/github.com/go-sql-driver/mysql/const.go
@@ -9,7 +9,8 @@
package mysql
const (
- minProtocolVersion byte = 10
+ defaultMaxAllowedPacket = 4 << 20 // 4 MiB
+ minProtocolVersion = 10
maxPacketSize = 1<<24 - 1
timeFormat = "2006-01-02 15:04:05.999999"
)
@@ -87,8 +88,10 @@ const (
)
// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType
+type fieldType byte
+
const (
- fieldTypeDecimal byte = iota
+ fieldTypeDecimal fieldType = iota
fieldTypeTiny
fieldTypeShort
fieldTypeLong
@@ -107,7 +110,7 @@ const (
fieldTypeBit
)
const (
- fieldTypeJSON byte = iota + 0xf5
+ fieldTypeJSON fieldType = iota + 0xf5
fieldTypeNewDecimal
fieldTypeEnum
fieldTypeSet
diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go
index 0022d1f1e..d42ce7a3d 100644
--- a/vendor/github.com/go-sql-driver/mysql/driver.go
+++ b/vendor/github.com/go-sql-driver/mysql/driver.go
@@ -4,7 +4,7 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
-// Package mysql provides a MySQL driver for Go's database/sql package
+// Package mysql provides a MySQL driver for Go's database/sql package.
//
// The driver should be used via the database/sql package:
//
@@ -22,6 +22,11 @@ import (
"net"
)
+// watcher interface is used for context support (From Go 1.8)
+type watcher interface {
+ startWatcher()
+}
+
// MySQLDriver is exported to make the driver directly accessible.
// In general the driver is used via the database/sql package.
type MySQLDriver struct{}
@@ -52,13 +57,13 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
mc := &mysqlConn{
maxAllowedPacket: maxPacketSize,
maxWriteSize: maxPacketSize - 1,
+ closech: make(chan struct{}),
}
mc.cfg, err = ParseDSN(dsn)
if err != nil {
return nil, err
}
mc.parseTime = mc.cfg.ParseTime
- mc.strict = mc.cfg.Strict
// Connect to Server
if dial, ok := dials[mc.cfg.Net]; ok {
@@ -81,6 +86,11 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
}
}
+ // Call startWatcher for context support (From Go 1.8)
+ if s, ok := interface{}(mc).(watcher); ok {
+ s.startWatcher()
+ }
+
mc.buf = newBuffer(mc.netConn)
// Set I/O timeouts
diff --git a/vendor/github.com/go-sql-driver/mysql/driver_go18_test.go b/vendor/github.com/go-sql-driver/mysql/driver_go18_test.go
new file mode 100644
index 000000000..e461455dd
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/driver_go18_test.go
@@ -0,0 +1,798 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.8
+
+package mysql
+
+import (
+ "context"
+ "database/sql"
+ "database/sql/driver"
+ "fmt"
+ "math"
+ "reflect"
+ "testing"
+ "time"
+)
+
+// static interface implementation checks of mysqlConn
+var (
+ _ driver.ConnBeginTx = &mysqlConn{}
+ _ driver.ConnPrepareContext = &mysqlConn{}
+ _ driver.ExecerContext = &mysqlConn{}
+ _ driver.Pinger = &mysqlConn{}
+ _ driver.QueryerContext = &mysqlConn{}
+)
+
+// static interface implementation checks of mysqlStmt
+var (
+ _ driver.StmtExecContext = &mysqlStmt{}
+ _ driver.StmtQueryContext = &mysqlStmt{}
+)
+
+// Ensure that all the driver interfaces are implemented
+var (
+ // _ driver.RowsColumnTypeLength = &binaryRows{}
+ // _ driver.RowsColumnTypeLength = &textRows{}
+ _ driver.RowsColumnTypeDatabaseTypeName = &binaryRows{}
+ _ driver.RowsColumnTypeDatabaseTypeName = &textRows{}
+ _ driver.RowsColumnTypeNullable = &binaryRows{}
+ _ driver.RowsColumnTypeNullable = &textRows{}
+ _ driver.RowsColumnTypePrecisionScale = &binaryRows{}
+ _ driver.RowsColumnTypePrecisionScale = &textRows{}
+ _ driver.RowsColumnTypeScanType = &binaryRows{}
+ _ driver.RowsColumnTypeScanType = &textRows{}
+ _ driver.RowsNextResultSet = &binaryRows{}
+ _ driver.RowsNextResultSet = &textRows{}
+)
+
+func TestMultiResultSet(t *testing.T) {
+ type result struct {
+ values [][]int
+ columns []string
+ }
+
+ // checkRows is a helper test function to validate rows containing 3 result
+ // sets with specific values and columns. The basic query would look like this:
+ //
+ // SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
+ // SELECT 0 UNION SELECT 1;
+ // SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;
+ //
+ // to distinguish test cases the first string argument is put in front of
+ // every error or fatal message.
+ checkRows := func(desc string, rows *sql.Rows, dbt *DBTest) {
+ expected := []result{
+ {
+ values: [][]int{{1, 2}, {3, 4}},
+ columns: []string{"col1", "col2"},
+ },
+ {
+ values: [][]int{{1, 2, 3}, {4, 5, 6}},
+ columns: []string{"col1", "col2", "col3"},
+ },
+ }
+
+ var res1 result
+ for rows.Next() {
+ var res [2]int
+ if err := rows.Scan(&res[0], &res[1]); err != nil {
+ dbt.Fatal(err)
+ }
+ res1.values = append(res1.values, res[:])
+ }
+
+ cols, err := rows.Columns()
+ if err != nil {
+ dbt.Fatal(desc, err)
+ }
+ res1.columns = cols
+
+ if !reflect.DeepEqual(expected[0], res1) {
+ dbt.Error(desc, "want =", expected[0], "got =", res1)
+ }
+
+ if !rows.NextResultSet() {
+ dbt.Fatal(desc, "expected next result set")
+ }
+
+ // ignoring one result set
+
+ if !rows.NextResultSet() {
+ dbt.Fatal(desc, "expected next result set")
+ }
+
+ var res2 result
+ cols, err = rows.Columns()
+ if err != nil {
+ dbt.Fatal(desc, err)
+ }
+ res2.columns = cols
+
+ for rows.Next() {
+ var res [3]int
+ if err := rows.Scan(&res[0], &res[1], &res[2]); err != nil {
+ dbt.Fatal(desc, err)
+ }
+ res2.values = append(res2.values, res[:])
+ }
+
+ if !reflect.DeepEqual(expected[1], res2) {
+ dbt.Error(desc, "want =", expected[1], "got =", res2)
+ }
+
+ if rows.NextResultSet() {
+ dbt.Error(desc, "unexpected next result set")
+ }
+
+ if err := rows.Err(); err != nil {
+ dbt.Error(desc, err)
+ }
+ }
+
+ runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
+ rows := dbt.mustQuery(`DO 1;
+ SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
+ DO 1;
+ SELECT 0 UNION SELECT 1;
+ SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;`)
+ defer rows.Close()
+ checkRows("query: ", rows, dbt)
+ })
+
+ runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
+ queries := []string{
+ `
+ DROP PROCEDURE IF EXISTS test_mrss;
+ CREATE PROCEDURE test_mrss()
+ BEGIN
+ DO 1;
+ SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
+ DO 1;
+ SELECT 0 UNION SELECT 1;
+ SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;
+ END
+ `,
+ `
+ DROP PROCEDURE IF EXISTS test_mrss;
+ CREATE PROCEDURE test_mrss()
+ BEGIN
+ SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
+ SELECT 0 UNION SELECT 1;
+ SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;
+ END
+ `,
+ }
+
+ defer dbt.mustExec("DROP PROCEDURE IF EXISTS test_mrss")
+
+ for i, query := range queries {
+ dbt.mustExec(query)
+
+ stmt, err := dbt.db.Prepare("CALL test_mrss()")
+ if err != nil {
+ dbt.Fatalf("%v (i=%d)", err, i)
+ }
+ defer stmt.Close()
+
+ for j := 0; j < 2; j++ {
+ rows, err := stmt.Query()
+ if err != nil {
+ dbt.Fatalf("%v (i=%d) (j=%d)", err, i, j)
+ }
+ checkRows(fmt.Sprintf("prepared stmt query (i=%d) (j=%d): ", i, j), rows, dbt)
+ }
+ }
+ })
+}
+
+func TestMultiResultSetNoSelect(t *testing.T) {
+ runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
+ rows := dbt.mustQuery("DO 1; DO 2;")
+ defer rows.Close()
+
+ if rows.Next() {
+ dbt.Error("unexpected row")
+ }
+
+ if rows.NextResultSet() {
+ dbt.Error("unexpected next result set")
+ }
+
+ if err := rows.Err(); err != nil {
+ dbt.Error("expected nil; got ", err)
+ }
+ })
+}
+
+// tests if rows are set in a proper state if some results were ignored before
+// calling rows.NextResultSet.
+func TestSkipResults(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ rows := dbt.mustQuery("SELECT 1, 2")
+ defer rows.Close()
+
+ if !rows.Next() {
+ dbt.Error("expected row")
+ }
+
+ if rows.NextResultSet() {
+ dbt.Error("unexpected next result set")
+ }
+
+ if err := rows.Err(); err != nil {
+ dbt.Error("expected nil; got ", err)
+ }
+ })
+}
+
+func TestPingContext(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+ if err := dbt.db.PingContext(ctx); err != context.Canceled {
+ dbt.Errorf("expected context.Canceled, got %v", err)
+ }
+ })
+}
+
+func TestContextCancelExec(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ ctx, cancel := context.WithCancel(context.Background())
+
+ // Delay execution for just a bit until db.ExecContext has begun.
+ defer time.AfterFunc(100*time.Millisecond, cancel).Stop()
+
+ // This query will be canceled.
+ startTime := time.Now()
+ if _, err := dbt.db.ExecContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
+ dbt.Errorf("expected context.Canceled, got %v", err)
+ }
+ if d := time.Since(startTime); d > 500*time.Millisecond {
+ dbt.Errorf("too long execution time: %s", d)
+ }
+
+ // Wait for the INSERT query has done.
+ time.Sleep(time.Second)
+
+ // Check how many times the query is executed.
+ var v int
+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+ dbt.Fatalf("%s", err.Error())
+ }
+ if v != 1 { // TODO: need to kill the query, and v should be 0.
+ dbt.Errorf("expected val to be 1, got %d", v)
+ }
+
+ // Context is already canceled, so error should come before execution.
+ if _, err := dbt.db.ExecContext(ctx, "INSERT INTO test VALUES (1)"); err == nil {
+ dbt.Error("expected error")
+ } else if err.Error() != "context canceled" {
+ dbt.Fatalf("unexpected error: %s", err)
+ }
+
+ // The second insert query will fail, so the table has no changes.
+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+ dbt.Fatalf("%s", err.Error())
+ }
+ if v != 1 {
+ dbt.Errorf("expected val to be 1, got %d", v)
+ }
+ })
+}
+
+func TestContextCancelQuery(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ ctx, cancel := context.WithCancel(context.Background())
+
+ // Delay execution for just a bit until db.ExecContext has begun.
+ defer time.AfterFunc(100*time.Millisecond, cancel).Stop()
+
+ // This query will be canceled.
+ startTime := time.Now()
+ if _, err := dbt.db.QueryContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
+ dbt.Errorf("expected context.Canceled, got %v", err)
+ }
+ if d := time.Since(startTime); d > 500*time.Millisecond {
+ dbt.Errorf("too long execution time: %s", d)
+ }
+
+ // Wait for the INSERT query has done.
+ time.Sleep(time.Second)
+
+ // Check how many times the query is executed.
+ var v int
+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+ dbt.Fatalf("%s", err.Error())
+ }
+ if v != 1 { // TODO: need to kill the query, and v should be 0.
+ dbt.Errorf("expected val to be 1, got %d", v)
+ }
+
+ // Context is already canceled, so error should come before execution.
+ if _, err := dbt.db.QueryContext(ctx, "INSERT INTO test VALUES (1)"); err != context.Canceled {
+ dbt.Errorf("expected context.Canceled, got %v", err)
+ }
+
+ // The second insert query will fail, so the table has no changes.
+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+ dbt.Fatalf("%s", err.Error())
+ }
+ if v != 1 {
+ dbt.Errorf("expected val to be 1, got %d", v)
+ }
+ })
+}
+
+func TestContextCancelQueryRow(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ dbt.mustExec("INSERT INTO test VALUES (1), (2), (3)")
+ ctx, cancel := context.WithCancel(context.Background())
+
+ rows, err := dbt.db.QueryContext(ctx, "SELECT v FROM test")
+ if err != nil {
+ dbt.Fatalf("%s", err.Error())
+ }
+
+ // the first row will be succeed.
+ var v int
+ if !rows.Next() {
+ dbt.Fatalf("unexpected end")
+ }
+ if err := rows.Scan(&v); err != nil {
+ dbt.Fatalf("%s", err.Error())
+ }
+
+ cancel()
+ // make sure the driver recieve cancel request.
+ time.Sleep(100 * time.Millisecond)
+
+ if rows.Next() {
+ dbt.Errorf("expected end, but not")
+ }
+ if err := rows.Err(); err != context.Canceled {
+ dbt.Errorf("expected context.Canceled, got %v", err)
+ }
+ })
+}
+
+func TestContextCancelPrepare(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+ if _, err := dbt.db.PrepareContext(ctx, "SELECT 1"); err != context.Canceled {
+ dbt.Errorf("expected context.Canceled, got %v", err)
+ }
+ })
+}
+
+func TestContextCancelStmtExec(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ ctx, cancel := context.WithCancel(context.Background())
+ stmt, err := dbt.db.PrepareContext(ctx, "INSERT INTO test VALUES (SLEEP(1))")
+ if err != nil {
+ dbt.Fatalf("unexpected error: %v", err)
+ }
+
+ // Delay execution for just a bit until db.ExecContext has begun.
+ defer time.AfterFunc(100*time.Millisecond, cancel).Stop()
+
+ // This query will be canceled.
+ startTime := time.Now()
+ if _, err := stmt.ExecContext(ctx); err != context.Canceled {
+ dbt.Errorf("expected context.Canceled, got %v", err)
+ }
+ if d := time.Since(startTime); d > 500*time.Millisecond {
+ dbt.Errorf("too long execution time: %s", d)
+ }
+
+ // Wait for the INSERT query has done.
+ time.Sleep(time.Second)
+
+ // Check how many times the query is executed.
+ var v int
+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+ dbt.Fatalf("%s", err.Error())
+ }
+ if v != 1 { // TODO: need to kill the query, and v should be 0.
+ dbt.Errorf("expected val to be 1, got %d", v)
+ }
+ })
+}
+
+func TestContextCancelStmtQuery(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ ctx, cancel := context.WithCancel(context.Background())
+ stmt, err := dbt.db.PrepareContext(ctx, "INSERT INTO test VALUES (SLEEP(1))")
+ if err != nil {
+ dbt.Fatalf("unexpected error: %v", err)
+ }
+
+ // Delay execution for just a bit until db.ExecContext has begun.
+ defer time.AfterFunc(100*time.Millisecond, cancel).Stop()
+
+ // This query will be canceled.
+ startTime := time.Now()
+ if _, err := stmt.QueryContext(ctx); err != context.Canceled {
+ dbt.Errorf("expected context.Canceled, got %v", err)
+ }
+ if d := time.Since(startTime); d > 500*time.Millisecond {
+ dbt.Errorf("too long execution time: %s", d)
+ }
+
+ // Wait for the INSERT query has done.
+ time.Sleep(time.Second)
+
+ // Check how many times the query is executed.
+ var v int
+ if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
+ dbt.Fatalf("%s", err.Error())
+ }
+ if v != 1 { // TODO: need to kill the query, and v should be 0.
+ dbt.Errorf("expected val to be 1, got %d", v)
+ }
+ })
+}
+
+func TestContextCancelBegin(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ ctx, cancel := context.WithCancel(context.Background())
+ tx, err := dbt.db.BeginTx(ctx, nil)
+ if err != nil {
+ dbt.Fatal(err)
+ }
+
+ // Delay execution for just a bit until db.ExecContext has begun.
+ defer time.AfterFunc(100*time.Millisecond, cancel).Stop()
+
+ // This query will be canceled.
+ startTime := time.Now()
+ if _, err := tx.ExecContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
+ dbt.Errorf("expected context.Canceled, got %v", err)
+ }
+ if d := time.Since(startTime); d > 500*time.Millisecond {
+ dbt.Errorf("too long execution time: %s", d)
+ }
+
+ // Transaction is canceled, so expect an error.
+ switch err := tx.Commit(); err {
+ case sql.ErrTxDone:
+ // because the transaction has already been rollbacked.
+ // the database/sql package watches ctx
+ // and rollbacks when ctx is canceled.
+ case context.Canceled:
+ // the database/sql package rollbacks on another goroutine,
+ // so the transaction may not be rollbacked depending on goroutine scheduling.
+ default:
+ dbt.Errorf("expected sql.ErrTxDone or context.Canceled, got %v", err)
+ }
+
+ // Context is canceled, so cannot begin a transaction.
+ if _, err := dbt.db.BeginTx(ctx, nil); err != context.Canceled {
+ dbt.Errorf("expected context.Canceled, got %v", err)
+ }
+ })
+}
+
+func TestContextBeginIsolationLevel(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ tx1, err := dbt.db.BeginTx(ctx, &sql.TxOptions{
+ Isolation: sql.LevelRepeatableRead,
+ })
+ if err != nil {
+ dbt.Fatal(err)
+ }
+
+ tx2, err := dbt.db.BeginTx(ctx, &sql.TxOptions{
+ Isolation: sql.LevelReadCommitted,
+ })
+ if err != nil {
+ dbt.Fatal(err)
+ }
+
+ _, err = tx1.ExecContext(ctx, "INSERT INTO test VALUES (1)")
+ if err != nil {
+ dbt.Fatal(err)
+ }
+
+ var v int
+ row := tx2.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
+ if err := row.Scan(&v); err != nil {
+ dbt.Fatal(err)
+ }
+ // Because writer transaction wasn't commited yet, it should be available
+ if v != 0 {
+ dbt.Errorf("expected val to be 0, got %d", v)
+ }
+
+ err = tx1.Commit()
+ if err != nil {
+ dbt.Fatal(err)
+ }
+
+ row = tx2.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
+ if err := row.Scan(&v); err != nil {
+ dbt.Fatal(err)
+ }
+ // Data written by writer transaction is already commited, it should be selectable
+ if v != 1 {
+ dbt.Errorf("expected val to be 1, got %d", v)
+ }
+ tx2.Commit()
+ })
+}
+
+func TestContextBeginReadOnly(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ dbt.mustExec("CREATE TABLE test (v INTEGER)")
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ tx, err := dbt.db.BeginTx(ctx, &sql.TxOptions{
+ ReadOnly: true,
+ })
+ if _, ok := err.(*MySQLError); ok {
+ dbt.Skip("It seems that your MySQL does not support READ ONLY transactions")
+ return
+ } else if err != nil {
+ dbt.Fatal(err)
+ }
+
+ // INSERT queries fail in a READ ONLY transaction.
+ _, err = tx.ExecContext(ctx, "INSERT INTO test VALUES (1)")
+ if _, ok := err.(*MySQLError); !ok {
+ dbt.Errorf("expected MySQLError, got %v", err)
+ }
+
+ // SELECT queries can be executed.
+ var v int
+ row := tx.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
+ if err := row.Scan(&v); err != nil {
+ dbt.Fatal(err)
+ }
+ if v != 0 {
+ dbt.Errorf("expected val to be 0, got %d", v)
+ }
+
+ if err := tx.Commit(); err != nil {
+ dbt.Fatal(err)
+ }
+ })
+}
+
+func TestRowsColumnTypes(t *testing.T) {
+ niNULL := sql.NullInt64{Int64: 0, Valid: false}
+ ni0 := sql.NullInt64{Int64: 0, Valid: true}
+ ni1 := sql.NullInt64{Int64: 1, Valid: true}
+ ni42 := sql.NullInt64{Int64: 42, Valid: true}
+ nfNULL := sql.NullFloat64{Float64: 0.0, Valid: false}
+ nf0 := sql.NullFloat64{Float64: 0.0, Valid: true}
+ nf1337 := sql.NullFloat64{Float64: 13.37, Valid: true}
+ nt0 := NullTime{Time: time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC), Valid: true}
+ nt1 := NullTime{Time: time.Date(2006, 01, 02, 15, 04, 05, 100000000, time.UTC), Valid: true}
+ nt2 := NullTime{Time: time.Date(2006, 01, 02, 15, 04, 05, 110000000, time.UTC), Valid: true}
+ nt6 := NullTime{Time: time.Date(2006, 01, 02, 15, 04, 05, 111111000, time.UTC), Valid: true}
+ nd1 := NullTime{Time: time.Date(2006, 01, 02, 0, 0, 0, 0, time.UTC), Valid: true}
+ nd2 := NullTime{Time: time.Date(2006, 03, 04, 0, 0, 0, 0, time.UTC), Valid: true}
+ ndNULL := NullTime{Time: time.Time{}, Valid: false}
+ rbNULL := sql.RawBytes(nil)
+ rb0 := sql.RawBytes("0")
+ rb42 := sql.RawBytes("42")
+ rbTest := sql.RawBytes("Test")
+ rb0pad4 := sql.RawBytes("0\x00\x00\x00") // BINARY right-pads values with 0x00
+ rbx0 := sql.RawBytes("\x00")
+ rbx42 := sql.RawBytes("\x42")
+
+ var columns = []struct {
+ name string
+ fieldType string // type used when creating table schema
+ databaseTypeName string // actual type used by MySQL
+ scanType reflect.Type
+ nullable bool
+ precision int64 // 0 if not ok
+ scale int64
+ valuesIn [3]string
+ valuesOut [3]interface{}
+ }{
+ {"bit8null", "BIT(8)", "BIT", scanTypeRawBytes, true, 0, 0, [3]string{"0x0", "NULL", "0x42"}, [3]interface{}{rbx0, rbNULL, rbx42}},
+ {"boolnull", "BOOL", "TINYINT", scanTypeNullInt, true, 0, 0, [3]string{"NULL", "true", "0"}, [3]interface{}{niNULL, ni1, ni0}},
+ {"bool", "BOOL NOT NULL", "TINYINT", scanTypeInt8, false, 0, 0, [3]string{"1", "0", "FALSE"}, [3]interface{}{int8(1), int8(0), int8(0)}},
+ {"intnull", "INTEGER", "INT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]interface{}{ni0, niNULL, ni42}},
+ {"smallint", "SMALLINT NOT NULL", "SMALLINT", scanTypeInt16, false, 0, 0, [3]string{"0", "-32768", "32767"}, [3]interface{}{int16(0), int16(-32768), int16(32767)}},
+ {"smallintnull", "SMALLINT", "SMALLINT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]interface{}{ni0, niNULL, ni42}},
+ {"int3null", "INT(3)", "INT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]interface{}{ni0, niNULL, ni42}},
+ {"int7", "INT(7) NOT NULL", "INT", scanTypeInt32, false, 0, 0, [3]string{"0", "-1337", "42"}, [3]interface{}{int32(0), int32(-1337), int32(42)}},
+ {"mediumintnull", "MEDIUMINT", "MEDIUMINT", scanTypeNullInt, true, 0, 0, [3]string{"0", "42", "NULL"}, [3]interface{}{ni0, ni42, niNULL}},
+ {"bigint", "BIGINT NOT NULL", "BIGINT", scanTypeInt64, false, 0, 0, [3]string{"0", "65535", "-42"}, [3]interface{}{int64(0), int64(65535), int64(-42)}},
+ {"bigintnull", "BIGINT", "BIGINT", scanTypeNullInt, true, 0, 0, [3]string{"NULL", "1", "42"}, [3]interface{}{niNULL, ni1, ni42}},
+ {"tinyuint", "TINYINT UNSIGNED NOT NULL", "TINYINT", scanTypeUint8, false, 0, 0, [3]string{"0", "255", "42"}, [3]interface{}{uint8(0), uint8(255), uint8(42)}},
+ {"smalluint", "SMALLINT UNSIGNED NOT NULL", "SMALLINT", scanTypeUint16, false, 0, 0, [3]string{"0", "65535", "42"}, [3]interface{}{uint16(0), uint16(65535), uint16(42)}},
+ {"biguint", "BIGINT UNSIGNED NOT NULL", "BIGINT", scanTypeUint64, false, 0, 0, [3]string{"0", "65535", "42"}, [3]interface{}{uint64(0), uint64(65535), uint64(42)}},
+ {"uint13", "INT(13) UNSIGNED NOT NULL", "INT", scanTypeUint32, false, 0, 0, [3]string{"0", "1337", "42"}, [3]interface{}{uint32(0), uint32(1337), uint32(42)}},
+ {"float", "FLOAT NOT NULL", "FLOAT", scanTypeFloat32, false, math.MaxInt64, math.MaxInt64, [3]string{"0", "42", "13.37"}, [3]interface{}{float32(0), float32(42), float32(13.37)}},
+ {"floatnull", "FLOAT", "FLOAT", scanTypeNullFloat, true, math.MaxInt64, math.MaxInt64, [3]string{"0", "NULL", "13.37"}, [3]interface{}{nf0, nfNULL, nf1337}},
+ {"float74null", "FLOAT(7,4)", "FLOAT", scanTypeNullFloat, true, math.MaxInt64, 4, [3]string{"0", "NULL", "13.37"}, [3]interface{}{nf0, nfNULL, nf1337}},
+ {"double", "DOUBLE NOT NULL", "DOUBLE", scanTypeFloat64, false, math.MaxInt64, math.MaxInt64, [3]string{"0", "42", "13.37"}, [3]interface{}{float64(0), float64(42), float64(13.37)}},
+ {"doublenull", "DOUBLE", "DOUBLE", scanTypeNullFloat, true, math.MaxInt64, math.MaxInt64, [3]string{"0", "NULL", "13.37"}, [3]interface{}{nf0, nfNULL, nf1337}},
+ {"decimal1", "DECIMAL(10,6) NOT NULL", "DECIMAL", scanTypeRawBytes, false, 10, 6, [3]string{"0", "13.37", "1234.123456"}, [3]interface{}{sql.RawBytes("0.000000"), sql.RawBytes("13.370000"), sql.RawBytes("1234.123456")}},
+ {"decimal1null", "DECIMAL(10,6)", "DECIMAL", scanTypeRawBytes, true, 10, 6, [3]string{"0", "NULL", "1234.123456"}, [3]interface{}{sql.RawBytes("0.000000"), rbNULL, sql.RawBytes("1234.123456")}},
+ {"decimal2", "DECIMAL(8,4) NOT NULL", "DECIMAL", scanTypeRawBytes, false, 8, 4, [3]string{"0", "13.37", "1234.123456"}, [3]interface{}{sql.RawBytes("0.0000"), sql.RawBytes("13.3700"), sql.RawBytes("1234.1235")}},
+ {"decimal2null", "DECIMAL(8,4)", "DECIMAL", scanTypeRawBytes, true, 8, 4, [3]string{"0", "NULL", "1234.123456"}, [3]interface{}{sql.RawBytes("0.0000"), rbNULL, sql.RawBytes("1234.1235")}},
+ {"decimal3", "DECIMAL(5,0) NOT NULL", "DECIMAL", scanTypeRawBytes, false, 5, 0, [3]string{"0", "13.37", "-12345.123456"}, [3]interface{}{rb0, sql.RawBytes("13"), sql.RawBytes("-12345")}},
+ {"decimal3null", "DECIMAL(5,0)", "DECIMAL", scanTypeRawBytes, true, 5, 0, [3]string{"0", "NULL", "-12345.123456"}, [3]interface{}{rb0, rbNULL, sql.RawBytes("-12345")}},
+ {"char25null", "CHAR(25)", "CHAR", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
+ {"varchar42", "VARCHAR(42) NOT NULL", "VARCHAR", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
+ {"binary4null", "BINARY(4)", "BINARY", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0pad4, rbNULL, rbTest}},
+ {"varbinary42", "VARBINARY(42) NOT NULL", "VARBINARY", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
+ {"tinyblobnull", "TINYBLOB", "BLOB", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
+ {"tinytextnull", "TINYTEXT", "TEXT", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
+ {"blobnull", "BLOB", "BLOB", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
+ {"textnull", "TEXT", "TEXT", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
+ {"mediumblob", "MEDIUMBLOB NOT NULL", "BLOB", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
+ {"mediumtext", "MEDIUMTEXT NOT NULL", "TEXT", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
+ {"longblob", "LONGBLOB NOT NULL", "BLOB", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
+ {"longtext", "LONGTEXT NOT NULL", "TEXT", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
+ {"datetime", "DATETIME", "DATETIME", scanTypeNullTime, true, 0, 0, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]interface{}{nt0, nt0, nt0}},
+ {"datetime2", "DATETIME(2)", "DATETIME", scanTypeNullTime, true, 2, 2, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]interface{}{nt0, nt1, nt2}},
+ {"datetime6", "DATETIME(6)", "DATETIME", scanTypeNullTime, true, 6, 6, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]interface{}{nt0, nt1, nt6}},
+ {"date", "DATE", "DATE", scanTypeNullTime, true, 0, 0, [3]string{"'2006-01-02'", "NULL", "'2006-03-04'"}, [3]interface{}{nd1, ndNULL, nd2}},
+ {"year", "YEAR NOT NULL", "YEAR", scanTypeUint16, false, 0, 0, [3]string{"2006", "2000", "1994"}, [3]interface{}{uint16(2006), uint16(2000), uint16(1994)}},
+ }
+
+ schema := ""
+ values1 := ""
+ values2 := ""
+ values3 := ""
+ for _, column := range columns {
+ schema += fmt.Sprintf("`%s` %s, ", column.name, column.fieldType)
+ values1 += column.valuesIn[0] + ", "
+ values2 += column.valuesIn[1] + ", "
+ values3 += column.valuesIn[2] + ", "
+ }
+ schema = schema[:len(schema)-2]
+ values1 = values1[:len(values1)-2]
+ values2 = values2[:len(values2)-2]
+ values3 = values3[:len(values3)-2]
+
+ dsns := []string{
+ dsn + "&parseTime=true",
+ dsn + "&parseTime=false",
+ }
+ for _, testdsn := range dsns {
+ runTests(t, testdsn, func(dbt *DBTest) {
+ dbt.mustExec("CREATE TABLE test (" + schema + ")")
+ dbt.mustExec("INSERT INTO test VALUES (" + values1 + "), (" + values2 + "), (" + values3 + ")")
+
+ rows, err := dbt.db.Query("SELECT * FROM test")
+ if err != nil {
+ t.Fatalf("Query: %v", err)
+ }
+
+ tt, err := rows.ColumnTypes()
+ if err != nil {
+ t.Fatalf("ColumnTypes: %v", err)
+ }
+
+ if len(tt) != len(columns) {
+ t.Fatalf("unexpected number of columns: expected %d, got %d", len(columns), len(tt))
+ }
+
+ types := make([]reflect.Type, len(tt))
+ for i, tp := range tt {
+ column := columns[i]
+
+ // Name
+ name := tp.Name()
+ if name != column.name {
+ t.Errorf("column name mismatch %s != %s", name, column.name)
+ continue
+ }
+
+ // DatabaseTypeName
+ databaseTypeName := tp.DatabaseTypeName()
+ if databaseTypeName != column.databaseTypeName {
+ t.Errorf("databasetypename name mismatch for column %q: %s != %s", name, databaseTypeName, column.databaseTypeName)
+ continue
+ }
+
+ // ScanType
+ scanType := tp.ScanType()
+ if scanType != column.scanType {
+ if scanType == nil {
+ t.Errorf("scantype is null for column %q", name)
+ } else {
+ t.Errorf("scantype mismatch for column %q: %s != %s", name, scanType.Name(), column.scanType.Name())
+ }
+ continue
+ }
+ types[i] = scanType
+
+ // Nullable
+ nullable, ok := tp.Nullable()
+ if !ok {
+ t.Errorf("nullable not ok %q", name)
+ continue
+ }
+ if nullable != column.nullable {
+ t.Errorf("nullable mismatch for column %q: %t != %t", name, nullable, column.nullable)
+ }
+
+ // Length
+ // length, ok := tp.Length()
+ // if length != column.length {
+ // if !ok {
+ // t.Errorf("length not ok for column %q", name)
+ // } else {
+ // t.Errorf("length mismatch for column %q: %d != %d", name, length, column.length)
+ // }
+ // continue
+ // }
+
+ // Precision and Scale
+ precision, scale, ok := tp.DecimalSize()
+ if precision != column.precision {
+ if !ok {
+ t.Errorf("precision not ok for column %q", name)
+ } else {
+ t.Errorf("precision mismatch for column %q: %d != %d", name, precision, column.precision)
+ }
+ continue
+ }
+ if scale != column.scale {
+ if !ok {
+ t.Errorf("scale not ok for column %q", name)
+ } else {
+ t.Errorf("scale mismatch for column %q: %d != %d", name, scale, column.scale)
+ }
+ continue
+ }
+ }
+
+ values := make([]interface{}, len(tt))
+ for i := range values {
+ values[i] = reflect.New(types[i]).Interface()
+ }
+ i := 0
+ for rows.Next() {
+ err = rows.Scan(values...)
+ if err != nil {
+ t.Fatalf("failed to scan values in %v", err)
+ }
+ for j := range values {
+ value := reflect.ValueOf(values[j]).Elem().Interface()
+ if !reflect.DeepEqual(value, columns[j].valuesOut[i]) {
+ if columns[j].scanType == scanTypeRawBytes {
+ t.Errorf("row %d, column %d: %v != %v", i, j, string(value.(sql.RawBytes)), string(columns[j].valuesOut[i].(sql.RawBytes)))
+ } else {
+ t.Errorf("row %d, column %d: %v != %v", i, j, value, columns[j].valuesOut[i])
+ }
+ }
+ }
+ i++
+ }
+ if i != 3 {
+ t.Errorf("expected 3 rows, got %d", i)
+ }
+
+ if err := rows.Close(); err != nil {
+ t.Errorf("error closing rows: %s", err)
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/driver_test.go b/vendor/github.com/go-sql-driver/mysql/driver_test.go
index 78e68f5d0..7877aa979 100644
--- a/vendor/github.com/go-sql-driver/mysql/driver_test.go
+++ b/vendor/github.com/go-sql-driver/mysql/driver_test.go
@@ -27,6 +27,12 @@ import (
"time"
)
+// Ensure that all the driver interfaces are implemented
+var (
+ _ driver.Rows = &binaryRows{}
+ _ driver.Rows = &textRows{}
+)
+
var (
user string
pass string
@@ -63,7 +69,7 @@ func init() {
addr = env("MYSQL_TEST_ADDR", "localhost:3306")
dbname = env("MYSQL_TEST_DBNAME", "gotest")
netAddr = fmt.Sprintf("%s(%s)", prot, addr)
- dsn = fmt.Sprintf("%s:%s@%s/%s?timeout=30s&strict=true", user, pass, netAddr, dbname)
+ dsn = fmt.Sprintf("%s:%s@%s/%s?timeout=30s", user, pass, netAddr, dbname)
c, err := net.Dial(prot, addr)
if err == nil {
available = true
@@ -171,6 +177,17 @@ func (dbt *DBTest) mustQuery(query string, args ...interface{}) (rows *sql.Rows)
return rows
}
+func maybeSkip(t *testing.T, err error, skipErrno uint16) {
+ mySQLErr, ok := err.(*MySQLError)
+ if !ok {
+ return
+ }
+
+ if mySQLErr.Number == skipErrno {
+ t.Skipf("skipping test for error: %v", err)
+ }
+}
+
func TestEmptyQuery(t *testing.T) {
runTests(t, dsn, func(dbt *DBTest) {
// just a comment, no query
@@ -482,6 +499,85 @@ func TestString(t *testing.T) {
})
}
+type testValuer struct {
+ value string
+}
+
+func (tv testValuer) Value() (driver.Value, error) {
+ return tv.value, nil
+}
+
+func TestValuer(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ in := testValuer{"a_value"}
+ var out string
+ var rows *sql.Rows
+
+ dbt.mustExec("CREATE TABLE test (value VARCHAR(255)) CHARACTER SET utf8")
+ dbt.mustExec("INSERT INTO test VALUES (?)", in)
+ rows = dbt.mustQuery("SELECT value FROM test")
+ if rows.Next() {
+ rows.Scan(&out)
+ if in.value != out {
+ dbt.Errorf("Valuer: %v != %s", in, out)
+ }
+ } else {
+ dbt.Errorf("Valuer: no data")
+ }
+
+ dbt.mustExec("DROP TABLE IF EXISTS test")
+ })
+}
+
+type testValuerWithValidation struct {
+ value string
+}
+
+func (tv testValuerWithValidation) Value() (driver.Value, error) {
+ if len(tv.value) == 0 {
+ return nil, fmt.Errorf("Invalid string valuer. Value must not be empty")
+ }
+
+ return tv.value, nil
+}
+
+func TestValuerWithValidation(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ in := testValuerWithValidation{"a_value"}
+ var out string
+ var rows *sql.Rows
+
+ dbt.mustExec("CREATE TABLE testValuer (value VARCHAR(255)) CHARACTER SET utf8")
+ dbt.mustExec("INSERT INTO testValuer VALUES (?)", in)
+
+ rows = dbt.mustQuery("SELECT value FROM testValuer")
+ defer rows.Close()
+
+ if rows.Next() {
+ rows.Scan(&out)
+ if in.value != out {
+ dbt.Errorf("Valuer: %v != %s", in, out)
+ }
+ } else {
+ dbt.Errorf("Valuer: no data")
+ }
+
+ if _, err := dbt.db.Exec("INSERT INTO testValuer VALUES (?)", testValuerWithValidation{""}); err == nil {
+ dbt.Errorf("Failed to check valuer error")
+ }
+
+ if _, err := dbt.db.Exec("INSERT INTO testValuer VALUES (?)", nil); err != nil {
+ dbt.Errorf("Failed to check nil")
+ }
+
+ if _, err := dbt.db.Exec("INSERT INTO testValuer VALUES (?)", map[string]bool{}); err == nil {
+ dbt.Errorf("Failed to check not valuer")
+ }
+
+ dbt.mustExec("DROP TABLE IF EXISTS testValuer")
+ })
+}
+
type timeTests struct {
dbtype string
tlayout string
@@ -684,7 +780,7 @@ func TestDateTime(t *testing.T) {
for _, setup := range setups.tests {
allowBinTime := true
if setup.s == "" {
- // fill time string whereever Go can reliable produce it
+ // fill time string wherever Go can reliable produce it
setup.s = setup.t.Format(setups.tlayout)
} else if setup.s[0] == '!' {
// skip tests using setup.t as source in queries
@@ -856,14 +952,14 @@ func TestNULL(t *testing.T) {
dbt.Fatal(err)
}
if b != nil {
- dbt.Error("non-nil []byte wich should be nil")
+ dbt.Error("non-nil []byte which should be nil")
}
// Read non-nil
if err = nonNullStmt.QueryRow().Scan(&b); err != nil {
dbt.Fatal(err)
}
if b == nil {
- dbt.Error("nil []byte wich should be non-nil")
+ dbt.Error("nil []byte which should be non-nil")
}
// Insert nil
b = nil
@@ -953,7 +1049,7 @@ func TestUint64(t *testing.T) {
}
func TestLongData(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
+ runTests(t, dsn+"&maxAllowedPacket=0", func(dbt *DBTest) {
var maxAllowedPacketSize int
err := dbt.db.QueryRow("select @@max_allowed_packet").Scan(&maxAllowedPacketSize)
if err != nil {
@@ -1054,22 +1150,36 @@ func TestLoadData(t *testing.T) {
dbt.Fatalf("rows count mismatch. Got %d, want 4", i)
}
}
+
+ dbt.db.Exec("DROP TABLE IF EXISTS test")
+ dbt.mustExec("CREATE TABLE test (id INT NOT NULL PRIMARY KEY, value TEXT NOT NULL) CHARACTER SET utf8")
+
+ // Local File
file, err := ioutil.TempFile("", "gotest")
defer os.Remove(file.Name())
if err != nil {
dbt.Fatal(err)
}
- file.WriteString("1\ta string\n2\ta string containing a \\t\n3\ta string containing a \\n\n4\ta string containing both \\t\\n\n")
- file.Close()
+ RegisterLocalFile(file.Name())
- dbt.db.Exec("DROP TABLE IF EXISTS test")
- dbt.mustExec("CREATE TABLE test (id INT NOT NULL PRIMARY KEY, value TEXT NOT NULL) CHARACTER SET utf8")
+ // Try first with empty file
+ dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE %q INTO TABLE test", file.Name()))
+ var count int
+ err = dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&count)
+ if err != nil {
+ dbt.Fatal(err.Error())
+ }
+ if count != 0 {
+ dbt.Fatalf("unexpected row count: got %d, want 0", count)
+ }
- // Local File
- RegisterLocalFile(file.Name())
+ // Then fille File with data and try to load it
+ file.WriteString("1\ta string\n2\ta string containing a \\t\n3\ta string containing a \\n\n4\ta string containing both \\t\\n\n")
+ file.Close()
dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE %q INTO TABLE test", file.Name()))
verifyLoadDataResult()
- // negative test
+
+ // Try with non-existing file
_, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'doesnotexist' INTO TABLE test")
if err == nil {
dbt.Fatal("load non-existent file didn't fail")
@@ -1145,84 +1255,6 @@ func TestFoundRows(t *testing.T) {
})
}
-func TestStrict(t *testing.T) {
- // ALLOW_INVALID_DATES to get rid of stricter modes - we want to test for warnings, not errors
- relaxedDsn := dsn + "&sql_mode='ALLOW_INVALID_DATES,NO_AUTO_CREATE_USER'"
- // make sure the MySQL version is recent enough with a separate connection
- // before running the test
- conn, err := MySQLDriver{}.Open(relaxedDsn)
- if conn != nil {
- conn.Close()
- }
- if me, ok := err.(*MySQLError); ok && me.Number == 1231 {
- // Error 1231: Variable 'sql_mode' can't be set to the value of 'ALLOW_INVALID_DATES'
- // => skip test, MySQL server version is too old
- return
- }
- runTests(t, relaxedDsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (a TINYINT NOT NULL, b CHAR(4))")
-
- var queries = [...]struct {
- in string
- codes []string
- }{
- {"DROP TABLE IF EXISTS no_such_table", []string{"1051"}},
- {"INSERT INTO test VALUES(10,'mysql'),(NULL,'test'),(300,'Open Source')", []string{"1265", "1048", "1264", "1265"}},
- }
- var err error
-
- var checkWarnings = func(err error, mode string, idx int) {
- if err == nil {
- dbt.Errorf("expected STRICT error on query [%s] %s", mode, queries[idx].in)
- }
-
- if warnings, ok := err.(MySQLWarnings); ok {
- var codes = make([]string, len(warnings))
- for i := range warnings {
- codes[i] = warnings[i].Code
- }
- if len(codes) != len(queries[idx].codes) {
- dbt.Errorf("unexpected STRICT error count on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes)
- }
-
- for i := range warnings {
- if codes[i] != queries[idx].codes[i] {
- dbt.Errorf("unexpected STRICT error codes on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes)
- return
- }
- }
-
- } else {
- dbt.Errorf("unexpected error on query [%s] %s: %s", mode, queries[idx].in, err.Error())
- }
- }
-
- // text protocol
- for i := range queries {
- _, err = dbt.db.Exec(queries[i].in)
- checkWarnings(err, "text", i)
- }
-
- var stmt *sql.Stmt
-
- // binary protocol
- for i := range queries {
- stmt, err = dbt.db.Prepare(queries[i].in)
- if err != nil {
- dbt.Errorf("error on preparing query %s: %s", queries[i].in, err.Error())
- }
-
- _, err = stmt.Exec()
- checkWarnings(err, "binary", i)
-
- err = stmt.Close()
- if err != nil {
- dbt.Errorf("error on closing stmt for query %s: %s", queries[i].in, err.Error())
- }
- }
- })
-}
-
func TestTLS(t *testing.T) {
tlsTest := func(dbt *DBTest) {
if err := dbt.db.Ping(); err != nil {
@@ -1422,7 +1454,6 @@ func TestTimezoneConversion(t *testing.T) {
// Regression test for timezone handling
tzTest := func(dbt *DBTest) {
-
// Create table
dbt.mustExec("CREATE TABLE test (ts TIMESTAMP)")
@@ -1638,8 +1669,9 @@ func TestStmtMultiRows(t *testing.T) {
// Regression test for
// * more than 32 NULL parameters (issue 209)
// * more parameters than fit into the buffer (issue 201)
+// * parameters * 64 > max_allowed_packet (issue 734)
func TestPreparedManyCols(t *testing.T) {
- const numParams = defaultBufSize
+ numParams := 65535
runTests(t, dsn, func(dbt *DBTest) {
query := "SELECT ?" + strings.Repeat(",?", numParams-1)
stmt, err := dbt.db.Prepare(query)
@@ -1647,15 +1679,25 @@ func TestPreparedManyCols(t *testing.T) {
dbt.Fatal(err)
}
defer stmt.Close()
+
// create more parameters than fit into the buffer
// which will take nil-values
params := make([]interface{}, numParams)
rows, err := stmt.Query(params...)
if err != nil {
- stmt.Close()
dbt.Fatal(err)
}
- defer rows.Close()
+ rows.Close()
+
+ // Create 0byte string which we can't send via STMT_LONG_DATA.
+ for i := 0; i < numParams; i++ {
+ params[i] = ""
+ }
+ rows, err = stmt.Query(params...)
+ if err != nil {
+ dbt.Fatal(err)
+ }
+ rows.Close()
})
}
@@ -1739,7 +1781,7 @@ func TestCustomDial(t *testing.T) {
return net.Dial(prot, addr)
})
- db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s&strict=true", user, pass, addr, dbname))
+ db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s", user, pass, addr, dbname))
if err != nil {
t.Fatalf("error connecting: %s", err.Error())
}
@@ -1836,7 +1878,7 @@ func TestUnixSocketAuthFail(t *testing.T) {
}
}
t.Logf("socket: %s", socket)
- badDSN := fmt.Sprintf("%s:%s@unix(%s)/%s?timeout=30s&strict=true", user, badPass, socket, dbname)
+ badDSN := fmt.Sprintf("%s:%s@unix(%s)/%s?timeout=30s", user, badPass, socket, dbname)
db, err := sql.Open("mysql", badDSN)
if err != nil {
t.Fatalf("error connecting: %s", err.Error())
@@ -1902,3 +1944,77 @@ func TestInterruptBySignal(t *testing.T) {
}
})
}
+
+func TestColumnsReusesSlice(t *testing.T) {
+ rows := mysqlRows{
+ rs: resultSet{
+ columns: []mysqlField{
+ {
+ tableName: "test",
+ name: "A",
+ },
+ {
+ tableName: "test",
+ name: "B",
+ },
+ },
+ },
+ }
+
+ allocs := testing.AllocsPerRun(1, func() {
+ cols := rows.Columns()
+
+ if len(cols) != 2 {
+ t.Fatalf("expected 2 columns, got %d", len(cols))
+ }
+ })
+
+ if allocs != 0 {
+ t.Fatalf("expected 0 allocations, got %d", int(allocs))
+ }
+
+ if rows.rs.columnNames == nil {
+ t.Fatalf("expected columnNames to be set, got nil")
+ }
+}
+
+func TestRejectReadOnly(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ // Create Table
+ dbt.mustExec("CREATE TABLE test (value BOOL)")
+ // Set the session to read-only. We didn't set the `rejectReadOnly`
+ // option, so any writes after this should fail.
+ _, err := dbt.db.Exec("SET SESSION TRANSACTION READ ONLY")
+ // Error 1193: Unknown system variable 'TRANSACTION' => skip test,
+ // MySQL server version is too old
+ maybeSkip(t, err, 1193)
+ if _, err := dbt.db.Exec("DROP TABLE test"); err == nil {
+ t.Fatalf("writing to DB in read-only session without " +
+ "rejectReadOnly did not error")
+ }
+ // Set the session back to read-write so runTests() can properly clean
+ // up the table `test`.
+ dbt.mustExec("SET SESSION TRANSACTION READ WRITE")
+ })
+
+ // Enable the `rejectReadOnly` option.
+ runTests(t, dsn+"&rejectReadOnly=true", func(dbt *DBTest) {
+ // Create Table
+ dbt.mustExec("CREATE TABLE test (value BOOL)")
+ // Set the session to read only. Any writes after this should error on
+ // a driver.ErrBadConn, and cause `database/sql` to initiate a new
+ // connection.
+ dbt.mustExec("SET SESSION TRANSACTION READ ONLY")
+ // This would error, but `database/sql` should automatically retry on a
+ // new connection which is not read-only, and eventually succeed.
+ dbt.mustExec("DROP TABLE test")
+ })
+}
+
+func TestPing(t *testing.T) {
+ runTests(t, dsn, func(dbt *DBTest) {
+ if err := dbt.db.Ping(); err != nil {
+ dbt.fail("Ping", "Ping", err)
+ }
+ })
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go
index ac00dcedd..47eab6945 100644
--- a/vendor/github.com/go-sql-driver/mysql/dsn.go
+++ b/vendor/github.com/go-sql-driver/mysql/dsn.go
@@ -15,6 +15,7 @@ import (
"fmt"
"net"
"net/url"
+ "sort"
"strconv"
"strings"
"time"
@@ -27,7 +28,9 @@ var (
errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations")
)
-// Config is a configuration parsed from a DSN string
+// Config is a configuration parsed from a DSN string.
+// If a new Config is created instead of being parsed from a DSN string,
+// the NewConfig function should be used, which sets default values.
type Config struct {
User string // Username
Passwd string // Password (requires User)
@@ -53,7 +56,54 @@ type Config struct {
InterpolateParams bool // Interpolate placeholders into query string
MultiStatements bool // Allow multiple statements in one query
ParseTime bool // Parse time values to time.Time
- Strict bool // Return warnings as errors
+ RejectReadOnly bool // Reject read-only connections
+}
+
+// NewConfig creates a new Config and sets default values.
+func NewConfig() *Config {
+ return &Config{
+ Collation: defaultCollation,
+ Loc: time.UTC,
+ MaxAllowedPacket: defaultMaxAllowedPacket,
+ AllowNativePasswords: true,
+ }
+}
+
+func (cfg *Config) normalize() error {
+ if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
+ return errInvalidDSNUnsafeCollation
+ }
+
+ // Set default network if empty
+ if cfg.Net == "" {
+ cfg.Net = "tcp"
+ }
+
+ // Set default address if empty
+ if cfg.Addr == "" {
+ switch cfg.Net {
+ case "tcp":
+ cfg.Addr = "127.0.0.1:3306"
+ case "unix":
+ cfg.Addr = "/tmp/mysql.sock"
+ default:
+ return errors.New("default addr for network '" + cfg.Net + "' unknown")
+ }
+
+ } else if cfg.Net == "tcp" {
+ cfg.Addr = ensureHavePort(cfg.Addr)
+ }
+
+ if cfg.tls != nil {
+ if cfg.tls.ServerName == "" && !cfg.tls.InsecureSkipVerify {
+ host, _, err := net.SplitHostPort(cfg.Addr)
+ if err == nil {
+ cfg.tls.ServerName = host
+ }
+ }
+ }
+
+ return nil
}
// FormatDSN formats the given Config into a DSN string which can be passed to
@@ -102,12 +152,12 @@ func (cfg *Config) FormatDSN() string {
}
}
- if cfg.AllowNativePasswords {
+ if !cfg.AllowNativePasswords {
if hasParam {
- buf.WriteString("&allowNativePasswords=true")
+ buf.WriteString("&allowNativePasswords=false")
} else {
hasParam = true
- buf.WriteString("?allowNativePasswords=true")
+ buf.WriteString("?allowNativePasswords=false")
}
}
@@ -195,12 +245,12 @@ func (cfg *Config) FormatDSN() string {
buf.WriteString(cfg.ReadTimeout.String())
}
- if cfg.Strict {
+ if cfg.RejectReadOnly {
if hasParam {
- buf.WriteString("&strict=true")
+ buf.WriteString("&rejectReadOnly=true")
} else {
hasParam = true
- buf.WriteString("?strict=true")
+ buf.WriteString("?rejectReadOnly=true")
}
}
@@ -234,7 +284,7 @@ func (cfg *Config) FormatDSN() string {
buf.WriteString(cfg.WriteTimeout.String())
}
- if cfg.MaxAllowedPacket > 0 {
+ if cfg.MaxAllowedPacket != defaultMaxAllowedPacket {
if hasParam {
buf.WriteString("&maxAllowedPacket=")
} else {
@@ -247,7 +297,12 @@ func (cfg *Config) FormatDSN() string {
// other params
if cfg.Params != nil {
- for param, value := range cfg.Params {
+ var params []string
+ for param := range cfg.Params {
+ params = append(params, param)
+ }
+ sort.Strings(params)
+ for _, param := range params {
if hasParam {
buf.WriteByte('&')
} else {
@@ -257,7 +312,7 @@ func (cfg *Config) FormatDSN() string {
buf.WriteString(param)
buf.WriteByte('=')
- buf.WriteString(url.QueryEscape(value))
+ buf.WriteString(url.QueryEscape(cfg.Params[param]))
}
}
@@ -267,10 +322,7 @@ func (cfg *Config) FormatDSN() string {
// ParseDSN parses the DSN string to a Config
func ParseDSN(dsn string) (cfg *Config, err error) {
// New config with some default values
- cfg = &Config{
- Loc: time.UTC,
- Collation: defaultCollation,
- }
+ cfg = NewConfig()
// [user[:password]@][net[(addr)]]/dbname[?param1=value1&paramN=valueN]
// Find the last '/' (since the password or the net addr might contain a '/')
@@ -338,28 +390,9 @@ func ParseDSN(dsn string) (cfg *Config, err error) {
return nil, errInvalidDSNNoSlash
}
- if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
- return nil, errInvalidDSNUnsafeCollation
- }
-
- // Set default network if empty
- if cfg.Net == "" {
- cfg.Net = "tcp"
+ if err = cfg.normalize(); err != nil {
+ return nil, err
}
-
- // Set default address if empty
- if cfg.Addr == "" {
- switch cfg.Net {
- case "tcp":
- cfg.Addr = "127.0.0.1:3306"
- case "unix":
- cfg.Addr = "/tmp/mysql.sock"
- default:
- return nil, errors.New("default addr for network '" + cfg.Net + "' unknown")
- }
-
- }
-
return
}
@@ -374,7 +407,6 @@ func parseDSNParams(cfg *Config, params string) (err error) {
// cfg params
switch value := param[1]; param[0] {
-
// Disable INFILE whitelist / enable all files
case "allowAllFiles":
var isBool bool
@@ -472,14 +504,18 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return
}
- // Strict mode
- case "strict":
+ // Reject read-only connections
+ case "rejectReadOnly":
var isBool bool
- cfg.Strict, isBool = readBool(value)
+ cfg.RejectReadOnly, isBool = readBool(value)
if !isBool {
return errors.New("invalid bool value: " + value)
}
+ // Strict mode
+ case "strict":
+ panic("strict mode has been removed. See https://github.com/go-sql-driver/mysql/wiki/strict-mode")
+
// Dial Timeout
case "timeout":
cfg.Timeout, err = time.ParseDuration(value)
@@ -506,14 +542,7 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return fmt.Errorf("invalid value for TLS config name: %v", err)
}
- if tlsConfig, ok := tlsConfigRegister[name]; ok {
- if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify {
- host, _, err := net.SplitHostPort(cfg.Addr)
- if err == nil {
- tlsConfig.ServerName = host
- }
- }
-
+ if tlsConfig := getTLSConfigClone(name); tlsConfig != nil {
cfg.TLSConfig = name
cfg.tls = tlsConfig
} else {
@@ -546,3 +575,10 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return
}
+
+func ensureHavePort(addr string) string {
+ if _, _, err := net.SplitHostPort(addr); err != nil {
+ return net.JoinHostPort(addr, "3306")
+ }
+ return addr
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn_test.go b/vendor/github.com/go-sql-driver/mysql/dsn_test.go
index 0693192ad..7507d1201 100644
--- a/vendor/github.com/go-sql-driver/mysql/dsn_test.go
+++ b/vendor/github.com/go-sql-driver/mysql/dsn_test.go
@@ -22,47 +22,57 @@ var testDSNs = []struct {
out *Config
}{{
"username:password@protocol(address)/dbname?param=value",
- &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC},
+ &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
}, {
"username:password@protocol(address)/dbname?param=value&columnsWithAlias=true",
- &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC, ColumnsWithAlias: true},
+ &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, ColumnsWithAlias: true},
}, {
"username:password@protocol(address)/dbname?param=value&columnsWithAlias=true&multiStatements=true",
- &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC, ColumnsWithAlias: true, MultiStatements: true},
+ &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, ColumnsWithAlias: true, MultiStatements: true},
}, {
"user@unix(/path/to/socket)/dbname?charset=utf8",
- &Config{User: "user", Net: "unix", Addr: "/path/to/socket", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8_general_ci", Loc: time.UTC},
+ &Config{User: "user", Net: "unix", Addr: "/path/to/socket", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
}, {
"user:password@tcp(localhost:5555)/dbname?charset=utf8&tls=true",
- &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8_general_ci", Loc: time.UTC, TLSConfig: "true"},
+ &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, TLSConfig: "true"},
}, {
"user:password@tcp(localhost:5555)/dbname?charset=utf8mb4,utf8&tls=skip-verify",
- &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8mb4,utf8"}, Collation: "utf8_general_ci", Loc: time.UTC, TLSConfig: "skip-verify"},
+ &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8mb4,utf8"}, Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, TLSConfig: "skip-verify"},
}, {
"user:password@/dbname?loc=UTC&timeout=30s&readTimeout=1s&writeTimeout=1s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE&collation=utf8mb4_unicode_ci&maxAllowedPacket=16777216",
- &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_unicode_ci", Loc: time.UTC, Timeout: 30 * time.Second, ReadTimeout: time.Second, WriteTimeout: time.Second, AllowAllFiles: true, AllowOldPasswords: true, ClientFoundRows: true, MaxAllowedPacket: 16777216},
+ &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_unicode_ci", Loc: time.UTC, AllowNativePasswords: true, Timeout: 30 * time.Second, ReadTimeout: time.Second, WriteTimeout: time.Second, AllowAllFiles: true, AllowOldPasswords: true, ClientFoundRows: true, MaxAllowedPacket: 16777216},
+}, {
+ "user:password@/dbname?allowNativePasswords=false&maxAllowedPacket=0",
+ &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: 0, AllowNativePasswords: false},
}, {
"user:p@ss(word)@tcp([de:ad:be:ef::ca:fe]:80)/dbname?loc=Local",
- &Config{User: "user", Passwd: "p@ss(word)", Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:80", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.Local},
+ &Config{User: "user", Passwd: "p@ss(word)", Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:80", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.Local, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
}, {
"/dbname",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.UTC},
+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
}, {
"@/",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC},
+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
}, {
"/",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC},
+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
}, {
"",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC},
+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
}, {
"user:p@/ssword@/",
- &Config{User: "user", Passwd: "p@/ssword", Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC},
+ &Config{User: "user", Passwd: "p@/ssword", Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
}, {
"unix/?arg=%2Fsome%2Fpath.ext",
- &Config{Net: "unix", Addr: "/tmp/mysql.sock", Params: map[string]string{"arg": "/some/path.ext"}, Collation: "utf8_general_ci", Loc: time.UTC},
-}}
+ &Config{Net: "unix", Addr: "/tmp/mysql.sock", Params: map[string]string{"arg": "/some/path.ext"}, Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
+}, {
+ "tcp(127.0.0.1)/dbname",
+ &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
+}, {
+ "tcp(de:ad:be:ef::ca:fe)/dbname",
+ &Config{Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:3306", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
+},
+}
func TestDSNParser(t *testing.T) {
for i, tst := range testDSNs {
@@ -88,6 +98,7 @@ func TestDSNParserInvalid(t *testing.T) {
"(/", // no closing brace
"net(addr)//", // unescaped
"User:pass@tcp(1.2.3.4:3306)", // no trailing slash
+ "net()/", // unknown default addr
//"/dbname?arg=/some/unescaped/path",
}
@@ -159,11 +170,41 @@ func TestDSNWithCustomTLS(t *testing.T) {
t.Error(err.Error())
} else if cfg.tls.ServerName != name {
t.Errorf("did not get the correct ServerName (%s) parsing DSN (%s).", name, tst)
+ } else if tlsCfg.ServerName != "" {
+ t.Errorf("tlsCfg was mutated ServerName (%s) should be empty parsing DSN (%s).", name, tst)
}
DeregisterTLSConfig("utils_test")
}
+func TestDSNTLSConfig(t *testing.T) {
+ expectedServerName := "example.com"
+ dsn := "tcp(example.com:1234)/?tls=true"
+
+ cfg, err := ParseDSN(dsn)
+ if err != nil {
+ t.Error(err.Error())
+ }
+ if cfg.tls == nil {
+ t.Error("cfg.tls should not be nil")
+ }
+ if cfg.tls.ServerName != expectedServerName {
+ t.Errorf("cfg.tls.ServerName should be %q, got %q (host with port)", expectedServerName, cfg.tls.ServerName)
+ }
+
+ dsn = "tcp(example.com)/?tls=true"
+ cfg, err = ParseDSN(dsn)
+ if err != nil {
+ t.Error(err.Error())
+ }
+ if cfg.tls == nil {
+ t.Error("cfg.tls should not be nil")
+ }
+ if cfg.tls.ServerName != expectedServerName {
+ t.Errorf("cfg.tls.ServerName should be %q, got %q (host without port)", expectedServerName, cfg.tls.ServerName)
+ }
+}
+
func TestDSNWithCustomTLSQueryEscape(t *testing.T) {
const configKey = "&%!:"
dsn := "User:password@tcp(localhost:5555)/dbname?tls=" + url.QueryEscape(configKey)
@@ -218,6 +259,21 @@ func TestDSNUnsafeCollation(t *testing.T) {
}
}
+func TestParamsAreSorted(t *testing.T) {
+ expected := "/dbname?interpolateParams=true&foobar=baz&quux=loo"
+ cfg := NewConfig()
+ cfg.DBName = "dbname"
+ cfg.InterpolateParams = true
+ cfg.Params = map[string]string{
+ "quux": "loo",
+ "foobar": "baz",
+ }
+ actual := cfg.FormatDSN()
+ if actual != expected {
+ t.Errorf("generic Config.Params were not sorted: want %#v, got %#v", expected, actual)
+ }
+}
+
func BenchmarkParseDSN(b *testing.B) {
b.ReportAllocs()
diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go
index 857854e14..760782ff2 100644
--- a/vendor/github.com/go-sql-driver/mysql/errors.go
+++ b/vendor/github.com/go-sql-driver/mysql/errors.go
@@ -9,10 +9,8 @@
package mysql
import (
- "database/sql/driver"
"errors"
"fmt"
- "io"
"log"
"os"
)
@@ -31,6 +29,12 @@ var (
ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?")
ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server")
ErrBusyBuffer = errors.New("busy buffer")
+
+ // errBadConnNoWrite is used for connection errors where nothing was sent to the database yet.
+ // If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn
+ // to trigger a resend.
+ // See https://github.com/go-sql-driver/mysql/pull/302
+ errBadConnNoWrite = errors.New("bad connection")
)
var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
@@ -59,74 +63,3 @@ type MySQLError struct {
func (me *MySQLError) Error() string {
return fmt.Sprintf("Error %d: %s", me.Number, me.Message)
}
-
-// MySQLWarnings is an error type which represents a group of one or more MySQL
-// warnings
-type MySQLWarnings []MySQLWarning
-
-func (mws MySQLWarnings) Error() string {
- var msg string
- for i, warning := range mws {
- if i > 0 {
- msg += "\r\n"
- }
- msg += fmt.Sprintf(
- "%s %s: %s",
- warning.Level,
- warning.Code,
- warning.Message,
- )
- }
- return msg
-}
-
-// MySQLWarning is an error type which represents a single MySQL warning.
-// Warnings are returned in groups only. See MySQLWarnings
-type MySQLWarning struct {
- Level string
- Code string
- Message string
-}
-
-func (mc *mysqlConn) getWarnings() (err error) {
- rows, err := mc.Query("SHOW WARNINGS", nil)
- if err != nil {
- return
- }
-
- var warnings = MySQLWarnings{}
- var values = make([]driver.Value, 3)
-
- for {
- err = rows.Next(values)
- switch err {
- case nil:
- warning := MySQLWarning{}
-
- if raw, ok := values[0].([]byte); ok {
- warning.Level = string(raw)
- } else {
- warning.Level = fmt.Sprintf("%s", values[0])
- }
- if raw, ok := values[1].([]byte); ok {
- warning.Code = string(raw)
- } else {
- warning.Code = fmt.Sprintf("%s", values[1])
- }
- if raw, ok := values[2].([]byte); ok {
- warning.Message = string(raw)
- } else {
- warning.Message = fmt.Sprintf("%s", values[0])
- }
-
- warnings = append(warnings, warning)
-
- case io.EOF:
- return warnings
-
- default:
- rows.Close()
- return
- }
- }
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go
new file mode 100644
index 000000000..e1e2ece4b
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/fields.go
@@ -0,0 +1,194 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql"
+ "reflect"
+)
+
+func (mf *mysqlField) typeDatabaseName() string {
+ switch mf.fieldType {
+ case fieldTypeBit:
+ return "BIT"
+ case fieldTypeBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "TEXT"
+ }
+ return "BLOB"
+ case fieldTypeDate:
+ return "DATE"
+ case fieldTypeDateTime:
+ return "DATETIME"
+ case fieldTypeDecimal:
+ return "DECIMAL"
+ case fieldTypeDouble:
+ return "DOUBLE"
+ case fieldTypeEnum:
+ return "ENUM"
+ case fieldTypeFloat:
+ return "FLOAT"
+ case fieldTypeGeometry:
+ return "GEOMETRY"
+ case fieldTypeInt24:
+ return "MEDIUMINT"
+ case fieldTypeJSON:
+ return "JSON"
+ case fieldTypeLong:
+ return "INT"
+ case fieldTypeLongBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "LONGTEXT"
+ }
+ return "LONGBLOB"
+ case fieldTypeLongLong:
+ return "BIGINT"
+ case fieldTypeMediumBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "MEDIUMTEXT"
+ }
+ return "MEDIUMBLOB"
+ case fieldTypeNewDate:
+ return "DATE"
+ case fieldTypeNewDecimal:
+ return "DECIMAL"
+ case fieldTypeNULL:
+ return "NULL"
+ case fieldTypeSet:
+ return "SET"
+ case fieldTypeShort:
+ return "SMALLINT"
+ case fieldTypeString:
+ if mf.charSet == collations[binaryCollation] {
+ return "BINARY"
+ }
+ return "CHAR"
+ case fieldTypeTime:
+ return "TIME"
+ case fieldTypeTimestamp:
+ return "TIMESTAMP"
+ case fieldTypeTiny:
+ return "TINYINT"
+ case fieldTypeTinyBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "TINYTEXT"
+ }
+ return "TINYBLOB"
+ case fieldTypeVarChar:
+ if mf.charSet == collations[binaryCollation] {
+ return "VARBINARY"
+ }
+ return "VARCHAR"
+ case fieldTypeVarString:
+ if mf.charSet == collations[binaryCollation] {
+ return "VARBINARY"
+ }
+ return "VARCHAR"
+ case fieldTypeYear:
+ return "YEAR"
+ default:
+ return ""
+ }
+}
+
+var (
+ scanTypeFloat32 = reflect.TypeOf(float32(0))
+ scanTypeFloat64 = reflect.TypeOf(float64(0))
+ scanTypeInt8 = reflect.TypeOf(int8(0))
+ scanTypeInt16 = reflect.TypeOf(int16(0))
+ scanTypeInt32 = reflect.TypeOf(int32(0))
+ scanTypeInt64 = reflect.TypeOf(int64(0))
+ scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
+ scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
+ scanTypeNullTime = reflect.TypeOf(NullTime{})
+ scanTypeUint8 = reflect.TypeOf(uint8(0))
+ scanTypeUint16 = reflect.TypeOf(uint16(0))
+ scanTypeUint32 = reflect.TypeOf(uint32(0))
+ scanTypeUint64 = reflect.TypeOf(uint64(0))
+ scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{})
+ scanTypeUnknown = reflect.TypeOf(new(interface{}))
+)
+
+type mysqlField struct {
+ tableName string
+ name string
+ length uint32
+ flags fieldFlag
+ fieldType fieldType
+ decimals byte
+ charSet uint8
+}
+
+func (mf *mysqlField) scanType() reflect.Type {
+ switch mf.fieldType {
+ case fieldTypeTiny:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint8
+ }
+ return scanTypeInt8
+ }
+ return scanTypeNullInt
+
+ case fieldTypeShort, fieldTypeYear:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint16
+ }
+ return scanTypeInt16
+ }
+ return scanTypeNullInt
+
+ case fieldTypeInt24, fieldTypeLong:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint32
+ }
+ return scanTypeInt32
+ }
+ return scanTypeNullInt
+
+ case fieldTypeLongLong:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint64
+ }
+ return scanTypeInt64
+ }
+ return scanTypeNullInt
+
+ case fieldTypeFloat:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeFloat32
+ }
+ return scanTypeNullFloat
+
+ case fieldTypeDouble:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeFloat64
+ }
+ return scanTypeNullFloat
+
+ case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
+ fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
+ fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
+ fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
+ fieldTypeTime:
+ return scanTypeRawBytes
+
+ case fieldTypeDate, fieldTypeNewDate,
+ fieldTypeTimestamp, fieldTypeDateTime:
+ // NullTime is always returned for more consistent behavior as it can
+ // handle both cases of parseTime regardless if the field is nullable.
+ return scanTypeNullTime
+
+ default:
+ return scanTypeUnknown
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go
index 547357cfa..4020f9192 100644
--- a/vendor/github.com/go-sql-driver/mysql/infile.go
+++ b/vendor/github.com/go-sql-driver/mysql/infile.go
@@ -147,7 +147,8 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
}
// send content packets
- if err == nil {
+ // if packetSize == 0, the Reader contains no data
+ if err == nil && packetSize > 0 {
data := make([]byte, 4+packetSize)
var n int
for err == nil {
diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go
index aafe9793e..afc3fcc46 100644
--- a/vendor/github.com/go-sql-driver/mysql/packets.go
+++ b/vendor/github.com/go-sql-driver/mysql/packets.go
@@ -30,9 +30,12 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
// read packet header
data, err := mc.buf.readNext(4)
if err != nil {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return nil, cerr
+ }
errLog.Print(err)
mc.Close()
- return nil, driver.ErrBadConn
+ return nil, ErrInvalidConn
}
// packet length [24 bit]
@@ -54,7 +57,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
if prevData == nil {
errLog.Print(ErrMalformPkt)
mc.Close()
- return nil, driver.ErrBadConn
+ return nil, ErrInvalidConn
}
return prevData, nil
@@ -63,9 +66,12 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
// read packet body [pktLen bytes]
data, err = mc.buf.readNext(pktLen)
if err != nil {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return nil, cerr
+ }
errLog.Print(err)
mc.Close()
- return nil, driver.ErrBadConn
+ return nil, ErrInvalidConn
}
// return data if this was the last packet
@@ -125,11 +131,20 @@ func (mc *mysqlConn) writePacket(data []byte) error {
// Handle error
if err == nil { // n != len(data)
+ mc.cleanup()
errLog.Print(ErrMalformPkt)
} else {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return cerr
+ }
+ if n == 0 && pktLen == len(data)-4 {
+ // only for the first loop iteration when nothing was written yet
+ return errBadConnNoWrite
+ }
+ mc.cleanup()
errLog.Print(err)
}
- return driver.ErrBadConn
+ return ErrInvalidConn
}
}
@@ -142,6 +157,11 @@ func (mc *mysqlConn) writePacket(data []byte) error {
func (mc *mysqlConn) readInitPacket() ([]byte, error) {
data, err := mc.readPacket()
if err != nil {
+ // for init we can rewrite this to ErrBadConn for sql.Driver to retry, since
+ // in connection initialization we don't risk retrying non-idempotent actions.
+ if err == ErrInvalidConn {
+ return nil, driver.ErrBadConn
+ }
return nil, err
}
@@ -263,7 +283,7 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
if data == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// ClientFlags [32 bit]
@@ -341,7 +361,9 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
func (mc *mysqlConn) writeOldAuthPacket(cipher []byte) error {
// User password
- scrambleBuff := scrambleOldPassword(cipher, []byte(mc.cfg.Passwd))
+ // https://dev.mysql.com/doc/internals/en/old-password-authentication.html
+ // Old password authentication only need and will need 8-byte challenge.
+ scrambleBuff := scrambleOldPassword(cipher[:8], []byte(mc.cfg.Passwd))
// Calculate the packet length and add a tailing 0
pktLen := len(scrambleBuff) + 1
@@ -349,7 +371,7 @@ func (mc *mysqlConn) writeOldAuthPacket(cipher []byte) error {
if data == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// Add the scrambled password [null terminated string]
@@ -368,7 +390,7 @@ func (mc *mysqlConn) writeClearAuthPacket() error {
if data == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// Add the clear password [null terminated string]
@@ -381,7 +403,9 @@ func (mc *mysqlConn) writeClearAuthPacket() error {
// Native password authentication method
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
func (mc *mysqlConn) writeNativeAuthPacket(cipher []byte) error {
- scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.Passwd))
+ // https://dev.mysql.com/doc/internals/en/secure-password-authentication.html
+ // Native password authentication only need and will need 20-byte challenge.
+ scrambleBuff := scramblePassword(cipher[0:20], []byte(mc.cfg.Passwd))
// Calculate the packet length and add a tailing 0
pktLen := len(scrambleBuff)
@@ -389,7 +413,7 @@ func (mc *mysqlConn) writeNativeAuthPacket(cipher []byte) error {
if data == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// Add the scramble
@@ -410,7 +434,7 @@ func (mc *mysqlConn) writeCommandPacket(command byte) error {
if data == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// Add command byte
@@ -429,7 +453,7 @@ func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
if data == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// Add command byte
@@ -450,7 +474,7 @@ func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
if data == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// Add command byte
@@ -484,25 +508,26 @@ func (mc *mysqlConn) readResultOK() ([]byte, error) {
if len(data) > 1 {
pluginEndIndex := bytes.IndexByte(data, 0x00)
plugin := string(data[1:pluginEndIndex])
- cipher := data[pluginEndIndex+1 : len(data)-1]
+ cipher := data[pluginEndIndex+1:]
- if plugin == "mysql_old_password" {
+ switch plugin {
+ case "mysql_old_password":
// using old_passwords
return cipher, ErrOldPassword
- } else if plugin == "mysql_clear_password" {
+ case "mysql_clear_password":
// using clear text password
return cipher, ErrCleartextPassword
- } else if plugin == "mysql_native_password" {
+ case "mysql_native_password":
// using mysql default authentication method
return cipher, ErrNativePassword
- } else {
+ default:
return cipher, ErrUnknownPlugin
}
- } else {
- // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
- return nil, ErrOldPassword
}
+ // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
+ return nil, ErrOldPassword
+
default: // Error otherwise
return nil, mc.handleErrorPacket(data)
}
@@ -550,6 +575,22 @@ func (mc *mysqlConn) handleErrorPacket(data []byte) error {
// Error Number [16 bit uint]
errno := binary.LittleEndian.Uint16(data[1:3])
+ // 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION
+ // 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover)
+ if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly {
+ // Oops; we are connected to a read-only connection, and won't be able
+ // to issue any write statements. Since RejectReadOnly is configured,
+ // we throw away this connection hoping this one would have write
+ // permission. This is specifically for a possible race condition
+ // during failover (e.g. on AWS Aurora). See README.md for more.
+ //
+ // We explicitly close the connection before returning
+ // driver.ErrBadConn to ensure that `database/sql` purges this
+ // connection and initiates a new one for next statement next time.
+ mc.Close()
+ return driver.ErrBadConn
+ }
+
pos := 3
// SQL State [optional: # + 5bytes string]
@@ -584,19 +625,12 @@ func (mc *mysqlConn) handleOkPacket(data []byte) error {
// server_status [2 bytes]
mc.status = readStatus(data[1+n+m : 1+n+m+2])
- if err := mc.discardResults(); err != nil {
- return err
+ if mc.status&statusMoreResultsExists != 0 {
+ return nil
}
// warning count [2 bytes]
- if !mc.strict {
- return nil
- }
- pos := 1 + n + m + 2
- if binary.LittleEndian.Uint16(data[pos:pos+2]) > 0 {
- return mc.getWarnings()
- }
return nil
}
@@ -668,14 +702,21 @@ func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
if err != nil {
return nil, err
}
+ pos += n
// Filler [uint8]
+ pos++
+
// Charset [charset, collation uint8]
+ columns[i].charSet = data[pos]
+ pos += 2
+
// Length [uint32]
- pos += n + 1 + 2 + 4
+ columns[i].length = binary.LittleEndian.Uint32(data[pos : pos+4])
+ pos += 4
// Field type [uint8]
- columns[i].fieldType = data[pos]
+ columns[i].fieldType = fieldType(data[pos])
pos++
// Flags [uint16]
@@ -698,6 +739,10 @@ func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
func (rows *textRows) readRow(dest []driver.Value) error {
mc := rows.mc
+ if rows.rs.done {
+ return io.EOF
+ }
+
data, err := mc.readPacket()
if err != nil {
return err
@@ -707,15 +752,11 @@ func (rows *textRows) readRow(dest []driver.Value) error {
if data[0] == iEOF && len(data) == 5 {
// server_status [2 bytes]
rows.mc.status = readStatus(data[3:])
- err = rows.mc.discardResults()
- if err == nil {
- err = io.EOF
- } else {
- // connection unusable
- rows.mc.Close()
+ rows.rs.done = true
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
}
- rows.mc = nil
- return err
+ return io.EOF
}
if data[0] == iERR {
rows.mc = nil
@@ -736,7 +777,7 @@ func (rows *textRows) readRow(dest []driver.Value) error {
if !mc.parseTime {
continue
} else {
- switch rows.columns[i].fieldType {
+ switch rows.rs.columns[i].fieldType {
case fieldTypeTimestamp, fieldTypeDateTime,
fieldTypeDate, fieldTypeNewDate:
dest[i], err = parseDateTime(
@@ -808,14 +849,7 @@ func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) {
// Reserved [8 bit]
// Warning count [16 bit uint]
- if !stmt.mc.strict {
- return columnCount, nil
- }
- // Check for warnings count > 0, only available in MySQL > 4.1
- if len(data) >= 12 && binary.LittleEndian.Uint16(data[10:12]) > 0 {
- return columnCount, stmt.mc.getWarnings()
- }
return columnCount, nil
}
return 0, err
@@ -887,6 +921,12 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
const minPktLen = 4 + 1 + 4 + 1 + 4
mc := stmt.mc
+ // Determine threshould dynamically to avoid packet size shortage.
+ longDataSize := mc.maxAllowedPacket / (stmt.paramCount + 1)
+ if longDataSize < 64 {
+ longDataSize = 64
+ }
+
// Reset packet-sequence
mc.sequence = 0
@@ -900,7 +940,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
if data == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// command [1 byte]
@@ -959,7 +999,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
// build NULL-bitmap
if arg == nil {
nullMask[i/8] |= 1 << (uint(i) & 7)
- paramTypes[i+i] = fieldTypeNULL
+ paramTypes[i+i] = byte(fieldTypeNULL)
paramTypes[i+i+1] = 0x00
continue
}
@@ -967,7 +1007,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
// cache types and values
switch v := arg.(type) {
case int64:
- paramTypes[i+i] = fieldTypeLongLong
+ paramTypes[i+i] = byte(fieldTypeLongLong)
paramTypes[i+i+1] = 0x00
if cap(paramValues)-len(paramValues)-8 >= 0 {
@@ -983,7 +1023,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
}
case float64:
- paramTypes[i+i] = fieldTypeDouble
+ paramTypes[i+i] = byte(fieldTypeDouble)
paramTypes[i+i+1] = 0x00
if cap(paramValues)-len(paramValues)-8 >= 0 {
@@ -999,7 +1039,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
}
case bool:
- paramTypes[i+i] = fieldTypeTiny
+ paramTypes[i+i] = byte(fieldTypeTiny)
paramTypes[i+i+1] = 0x00
if v {
@@ -1011,10 +1051,10 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
case []byte:
// Common case (non-nil value) first
if v != nil {
- paramTypes[i+i] = fieldTypeString
+ paramTypes[i+i] = byte(fieldTypeString)
paramTypes[i+i+1] = 0x00
- if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 {
+ if len(v) < longDataSize {
paramValues = appendLengthEncodedInteger(paramValues,
uint64(len(v)),
)
@@ -1029,14 +1069,14 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
// Handle []byte(nil) as a NULL value
nullMask[i/8] |= 1 << (uint(i) & 7)
- paramTypes[i+i] = fieldTypeNULL
+ paramTypes[i+i] = byte(fieldTypeNULL)
paramTypes[i+i+1] = 0x00
case string:
- paramTypes[i+i] = fieldTypeString
+ paramTypes[i+i] = byte(fieldTypeString)
paramTypes[i+i+1] = 0x00
- if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 {
+ if len(v) < longDataSize {
paramValues = appendLengthEncodedInteger(paramValues,
uint64(len(v)),
)
@@ -1048,20 +1088,22 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
}
case time.Time:
- paramTypes[i+i] = fieldTypeString
+ paramTypes[i+i] = byte(fieldTypeString)
paramTypes[i+i+1] = 0x00
- var val []byte
+ var a [64]byte
+ var b = a[:0]
+
if v.IsZero() {
- val = []byte("0000-00-00")
+ b = append(b, "0000-00-00"...)
} else {
- val = []byte(v.In(mc.cfg.Loc).Format(timeFormat))
+ b = v.In(mc.cfg.Loc).AppendFormat(b, timeFormat)
}
paramValues = appendLengthEncodedInteger(paramValues,
- uint64(len(val)),
+ uint64(len(b)),
)
- paramValues = append(paramValues, val...)
+ paramValues = append(paramValues, b...)
default:
return fmt.Errorf("can not convert type: %T", arg)
@@ -1097,8 +1139,6 @@ func (mc *mysqlConn) discardResults() error {
if err := mc.readUntilEOF(); err != nil {
return err
}
- } else {
- mc.status &^= statusMoreResultsExists
}
}
return nil
@@ -1116,20 +1156,17 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
// EOF Packet
if data[0] == iEOF && len(data) == 5 {
rows.mc.status = readStatus(data[3:])
- err = rows.mc.discardResults()
- if err == nil {
- err = io.EOF
- } else {
- // connection unusable
- rows.mc.Close()
+ rows.rs.done = true
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
}
- rows.mc = nil
- return err
+ return io.EOF
}
+ mc := rows.mc
rows.mc = nil
// Error otherwise
- return rows.mc.handleErrorPacket(data)
+ return mc.handleErrorPacket(data)
}
// NULL-bitmap, [(column-count + 7 + 2) / 8 bytes]
@@ -1145,14 +1182,14 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
}
// Convert to byte-coded string
- switch rows.columns[i].fieldType {
+ switch rows.rs.columns[i].fieldType {
case fieldTypeNULL:
dest[i] = nil
continue
// Numeric Types
case fieldTypeTiny:
- if rows.columns[i].flags&flagUnsigned != 0 {
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
dest[i] = int64(data[pos])
} else {
dest[i] = int64(int8(data[pos]))
@@ -1161,7 +1198,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
continue
case fieldTypeShort, fieldTypeYear:
- if rows.columns[i].flags&flagUnsigned != 0 {
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2]))
} else {
dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2])))
@@ -1170,7 +1207,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
continue
case fieldTypeInt24, fieldTypeLong:
- if rows.columns[i].flags&flagUnsigned != 0 {
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4]))
} else {
dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4])))
@@ -1179,7 +1216,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
continue
case fieldTypeLongLong:
- if rows.columns[i].flags&flagUnsigned != 0 {
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
val := binary.LittleEndian.Uint64(data[pos : pos+8])
if val > math.MaxInt64 {
dest[i] = uint64ToString(val)
@@ -1193,7 +1230,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
continue
case fieldTypeFloat:
- dest[i] = float32(math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4])))
+ dest[i] = math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))
pos += 4
continue
@@ -1233,10 +1270,10 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
case isNull:
dest[i] = nil
continue
- case rows.columns[i].fieldType == fieldTypeTime:
+ case rows.rs.columns[i].fieldType == fieldTypeTime:
// database/sql does not support an equivalent to TIME, return a string
var dstlen uint8
- switch decimals := rows.columns[i].decimals; decimals {
+ switch decimals := rows.rs.columns[i].decimals; decimals {
case 0x00, 0x1f:
dstlen = 8
case 1, 2, 3, 4, 5, 6:
@@ -1244,7 +1281,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
default:
return fmt.Errorf(
"protocol error, illegal decimals value %d",
- rows.columns[i].decimals,
+ rows.rs.columns[i].decimals,
)
}
dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, true)
@@ -1252,10 +1289,10 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc)
default:
var dstlen uint8
- if rows.columns[i].fieldType == fieldTypeDate {
+ if rows.rs.columns[i].fieldType == fieldTypeDate {
dstlen = 10
} else {
- switch decimals := rows.columns[i].decimals; decimals {
+ switch decimals := rows.rs.columns[i].decimals; decimals {
case 0x00, 0x1f:
dstlen = 19
case 1, 2, 3, 4, 5, 6:
@@ -1263,7 +1300,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
default:
return fmt.Errorf(
"protocol error, illegal decimals value %d",
- rows.columns[i].decimals,
+ rows.rs.columns[i].decimals,
)
}
}
@@ -1279,7 +1316,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
// Please report if this happens!
default:
- return fmt.Errorf("unknown field type %d", rows.columns[i].fieldType)
+ return fmt.Errorf("unknown field type %d", rows.rs.columns[i].fieldType)
}
}
diff --git a/vendor/github.com/go-sql-driver/mysql/packets_test.go b/vendor/github.com/go-sql-driver/mysql/packets_test.go
index 98404586a..2f8207511 100644
--- a/vendor/github.com/go-sql-driver/mysql/packets_test.go
+++ b/vendor/github.com/go-sql-driver/mysql/packets_test.go
@@ -9,7 +9,6 @@
package mysql
import (
- "database/sql/driver"
"errors"
"net"
"testing"
@@ -101,7 +100,7 @@ func TestReadPacketSingleByte(t *testing.T) {
t.Fatal(err)
}
if len(packet) != 1 {
- t.Fatalf("unexpected packet lenght: expected %d, got %d", 1, len(packet))
+ t.Fatalf("unexpected packet length: expected %d, got %d", 1, len(packet))
}
if packet[0] != 0xff {
t.Fatalf("unexpected packet content: expected %x, got %x", 0xff, packet[0])
@@ -171,7 +170,7 @@ func TestReadPacketSplit(t *testing.T) {
t.Fatal(err)
}
if len(packet) != maxPacketSize {
- t.Fatalf("unexpected packet lenght: expected %d, got %d", maxPacketSize, len(packet))
+ t.Fatalf("unexpected packet length: expected %d, got %d", maxPacketSize, len(packet))
}
if packet[0] != 0x11 {
t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
@@ -205,7 +204,7 @@ func TestReadPacketSplit(t *testing.T) {
t.Fatal(err)
}
if len(packet) != 2*maxPacketSize {
- t.Fatalf("unexpected packet lenght: expected %d, got %d", 2*maxPacketSize, len(packet))
+ t.Fatalf("unexpected packet length: expected %d, got %d", 2*maxPacketSize, len(packet))
}
if packet[0] != 0x11 {
t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
@@ -231,7 +230,7 @@ func TestReadPacketSplit(t *testing.T) {
t.Fatal(err)
}
if len(packet) != maxPacketSize+42 {
- t.Fatalf("unexpected packet lenght: expected %d, got %d", maxPacketSize+42, len(packet))
+ t.Fatalf("unexpected packet length: expected %d, got %d", maxPacketSize+42, len(packet))
}
if packet[0] != 0x11 {
t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
@@ -244,15 +243,16 @@ func TestReadPacketSplit(t *testing.T) {
func TestReadPacketFail(t *testing.T) {
conn := new(mockConn)
mc := &mysqlConn{
- buf: newBuffer(conn),
+ buf: newBuffer(conn),
+ closech: make(chan struct{}),
}
// illegal empty (stand-alone) packet
conn.data = []byte{0x00, 0x00, 0x00, 0x00}
conn.maxReads = 1
_, err := mc.readPacket()
- if err != driver.ErrBadConn {
- t.Errorf("expected ErrBadConn, got %v", err)
+ if err != ErrInvalidConn {
+ t.Errorf("expected ErrInvalidConn, got %v", err)
}
// reset
@@ -263,8 +263,8 @@ func TestReadPacketFail(t *testing.T) {
// fail to read header
conn.closed = true
_, err = mc.readPacket()
- if err != driver.ErrBadConn {
- t.Errorf("expected ErrBadConn, got %v", err)
+ if err != ErrInvalidConn {
+ t.Errorf("expected ErrInvalidConn, got %v", err)
}
// reset
@@ -276,7 +276,7 @@ func TestReadPacketFail(t *testing.T) {
// fail to read body
conn.maxReads = 1
_, err = mc.readPacket()
- if err != driver.ErrBadConn {
- t.Errorf("expected ErrBadConn, got %v", err)
+ if err != ErrInvalidConn {
+ t.Errorf("expected ErrInvalidConn, got %v", err)
}
}
diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go
index c08255eee..d3b1e2822 100644
--- a/vendor/github.com/go-sql-driver/mysql/rows.go
+++ b/vendor/github.com/go-sql-driver/mysql/rows.go
@@ -11,19 +11,20 @@ package mysql
import (
"database/sql/driver"
"io"
+ "math"
+ "reflect"
)
-type mysqlField struct {
- tableName string
- name string
- flags fieldFlag
- fieldType byte
- decimals byte
+type resultSet struct {
+ columns []mysqlField
+ columnNames []string
+ done bool
}
type mysqlRows struct {
- mc *mysqlConn
- columns []mysqlField
+ mc *mysqlConn
+ rs resultSet
+ finish func()
}
type binaryRows struct {
@@ -34,37 +35,86 @@ type textRows struct {
mysqlRows
}
-type emptyRows struct{}
-
func (rows *mysqlRows) Columns() []string {
- columns := make([]string, len(rows.columns))
+ if rows.rs.columnNames != nil {
+ return rows.rs.columnNames
+ }
+
+ columns := make([]string, len(rows.rs.columns))
if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias {
for i := range columns {
- if tableName := rows.columns[i].tableName; len(tableName) > 0 {
- columns[i] = tableName + "." + rows.columns[i].name
+ if tableName := rows.rs.columns[i].tableName; len(tableName) > 0 {
+ columns[i] = tableName + "." + rows.rs.columns[i].name
} else {
- columns[i] = rows.columns[i].name
+ columns[i] = rows.rs.columns[i].name
}
}
} else {
for i := range columns {
- columns[i] = rows.columns[i].name
+ columns[i] = rows.rs.columns[i].name
}
}
+
+ rows.rs.columnNames = columns
return columns
}
-func (rows *mysqlRows) Close() error {
+func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string {
+ return rows.rs.columns[i].typeDatabaseName()
+}
+
+// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) {
+// return int64(rows.rs.columns[i].length), true
+// }
+
+func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) {
+ return rows.rs.columns[i].flags&flagNotNULL == 0, true
+}
+
+func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) {
+ column := rows.rs.columns[i]
+ decimals := int64(column.decimals)
+
+ switch column.fieldType {
+ case fieldTypeDecimal, fieldTypeNewDecimal:
+ if decimals > 0 {
+ return int64(column.length) - 2, decimals, true
+ }
+ return int64(column.length) - 1, decimals, true
+ case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime:
+ return decimals, decimals, true
+ case fieldTypeFloat, fieldTypeDouble:
+ if decimals == 0x1f {
+ return math.MaxInt64, math.MaxInt64, true
+ }
+ return math.MaxInt64, decimals, true
+ }
+
+ return 0, 0, false
+}
+
+func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type {
+ return rows.rs.columns[i].scanType()
+}
+
+func (rows *mysqlRows) Close() (err error) {
+ if f := rows.finish; f != nil {
+ f()
+ rows.finish = nil
+ }
+
mc := rows.mc
if mc == nil {
return nil
}
- if mc.netConn == nil {
- return ErrInvalidConn
+ if err := mc.error(); err != nil {
+ return err
}
// Remove unread packets from stream
- err := mc.readUntilEOF()
+ if !rows.rs.done {
+ err = mc.readUntilEOF()
+ }
if err == nil {
if err = mc.discardResults(); err != nil {
return err
@@ -75,22 +125,66 @@ func (rows *mysqlRows) Close() error {
return err
}
-func (rows *binaryRows) Next(dest []driver.Value) error {
- if mc := rows.mc; mc != nil {
- if mc.netConn == nil {
- return ErrInvalidConn
+func (rows *mysqlRows) HasNextResultSet() (b bool) {
+ if rows.mc == nil {
+ return false
+ }
+ return rows.mc.status&statusMoreResultsExists != 0
+}
+
+func (rows *mysqlRows) nextResultSet() (int, error) {
+ if rows.mc == nil {
+ return 0, io.EOF
+ }
+ if err := rows.mc.error(); err != nil {
+ return 0, err
+ }
+
+ // Remove unread packets from stream
+ if !rows.rs.done {
+ if err := rows.mc.readUntilEOF(); err != nil {
+ return 0, err
}
+ rows.rs.done = true
+ }
- // Fetch next row from stream
- return rows.readRow(dest)
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
+ return 0, io.EOF
}
- return io.EOF
+ rows.rs = resultSet{}
+ return rows.mc.readResultSetHeaderPacket()
}
-func (rows *textRows) Next(dest []driver.Value) error {
+func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) {
+ for {
+ resLen, err := rows.nextResultSet()
+ if err != nil {
+ return 0, err
+ }
+
+ if resLen > 0 {
+ return resLen, nil
+ }
+
+ rows.rs.done = true
+ }
+}
+
+func (rows *binaryRows) NextResultSet() error {
+ resLen, err := rows.nextNotEmptyResultSet()
+ if err != nil {
+ return err
+ }
+
+ rows.rs.columns, err = rows.mc.readColumns(resLen)
+ return err
+}
+
+func (rows *binaryRows) Next(dest []driver.Value) error {
if mc := rows.mc; mc != nil {
- if mc.netConn == nil {
- return ErrInvalidConn
+ if err := mc.error(); err != nil {
+ return err
}
// Fetch next row from stream
@@ -99,14 +193,24 @@ func (rows *textRows) Next(dest []driver.Value) error {
return io.EOF
}
-func (rows emptyRows) Columns() []string {
- return nil
-}
+func (rows *textRows) NextResultSet() (err error) {
+ resLen, err := rows.nextNotEmptyResultSet()
+ if err != nil {
+ return err
+ }
-func (rows emptyRows) Close() error {
- return nil
+ rows.rs.columns, err = rows.mc.readColumns(resLen)
+ return err
}
-func (rows emptyRows) Next(dest []driver.Value) error {
+func (rows *textRows) Next(dest []driver.Value) error {
+ if mc := rows.mc; mc != nil {
+ if err := mc.error(); err != nil {
+ return err
+ }
+
+ // Fetch next row from stream
+ return rows.readRow(dest)
+ }
return io.EOF
}
diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go
index 7f9b04585..98e57bcd8 100644
--- a/vendor/github.com/go-sql-driver/mysql/statement.go
+++ b/vendor/github.com/go-sql-driver/mysql/statement.go
@@ -11,6 +11,7 @@ package mysql
import (
"database/sql/driver"
"fmt"
+ "io"
"reflect"
"strconv"
)
@@ -19,11 +20,10 @@ type mysqlStmt struct {
mc *mysqlConn
id uint32
paramCount int
- columns []mysqlField // cached from the first query
}
func (stmt *mysqlStmt) Close() error {
- if stmt.mc == nil || stmt.mc.netConn == nil {
+ if stmt.mc == nil || stmt.mc.closed.IsSet() {
// driver.Stmt.Close can be called more than once, thus this function
// has to be idempotent.
// See also Issue #450 and golang/go#16019.
@@ -45,14 +45,14 @@ func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
}
func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
- if stmt.mc.netConn == nil {
+ if stmt.mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
err := stmt.writeExecutePacket(args)
if err != nil {
- return nil, err
+ return nil, stmt.mc.markBadConn(err)
}
mc := stmt.mc
@@ -62,37 +62,45 @@ func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
// Read Result
resLen, err := mc.readResultSetHeaderPacket()
- if err == nil {
- if resLen > 0 {
- // Columns
- err = mc.readUntilEOF()
- if err != nil {
- return nil, err
- }
-
- // Rows
- err = mc.readUntilEOF()
+ if err != nil {
+ return nil, err
+ }
+
+ if resLen > 0 {
+ // Columns
+ if err = mc.readUntilEOF(); err != nil {
+ return nil, err
}
- if err == nil {
- return &mysqlResult{
- affectedRows: int64(mc.affectedRows),
- insertId: int64(mc.insertId),
- }, nil
+
+ // Rows
+ if err := mc.readUntilEOF(); err != nil {
+ return nil, err
}
}
- return nil, err
+ if err := mc.discardResults(); err != nil {
+ return nil, err
+ }
+
+ return &mysqlResult{
+ affectedRows: int64(mc.affectedRows),
+ insertId: int64(mc.insertId),
+ }, nil
}
func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
- if stmt.mc.netConn == nil {
+ return stmt.query(args)
+}
+
+func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
+ if stmt.mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
err := stmt.writeExecutePacket(args)
if err != nil {
- return nil, err
+ return nil, stmt.mc.markBadConn(err)
}
mc := stmt.mc
@@ -107,14 +115,15 @@ func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
if resLen > 0 {
rows.mc = mc
- // Columns
- // If not cached, read them and cache them
- if stmt.columns == nil {
- rows.columns, err = mc.readColumns(resLen)
- stmt.columns = rows.columns
- } else {
- rows.columns = stmt.columns
- err = mc.readUntilEOF()
+ rows.rs.columns, err = mc.readColumns(resLen)
+ } else {
+ rows.rs.done = true
+
+ switch err := rows.NextResultSet(); err {
+ case nil, io.EOF:
+ return rows, nil
+ default:
+ return nil, err
}
}
@@ -128,6 +137,12 @@ func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
return v, nil
}
+ if v != nil {
+ if valuer, ok := v.(driver.Valuer); ok {
+ return valuer.Value()
+ }
+ }
+
rv := reflect.ValueOf(v)
switch rv.Kind() {
case reflect.Ptr:
@@ -148,6 +163,16 @@ func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
return int64(u64), nil
case reflect.Float32, reflect.Float64:
return rv.Float(), nil
+ case reflect.Bool:
+ return rv.Bool(), nil
+ case reflect.Slice:
+ ek := rv.Type().Elem().Kind()
+ if ek == reflect.Uint8 {
+ return rv.Bytes(), nil
+ }
+ return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, ek)
+ case reflect.String:
+ return rv.String(), nil
}
return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
}
diff --git a/vendor/github.com/go-sql-driver/mysql/statement_test.go b/vendor/github.com/go-sql-driver/mysql/statement_test.go
new file mode 100644
index 000000000..98a6c1933
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/statement_test.go
@@ -0,0 +1,126 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestConvertDerivedString(t *testing.T) {
+ type derived string
+
+ output, err := converter{}.ConvertValue(derived("value"))
+ if err != nil {
+ t.Fatal("Derived string type not convertible", err)
+ }
+
+ if output != "value" {
+ t.Fatalf("Derived string type not converted, got %#v %T", output, output)
+ }
+}
+
+func TestConvertDerivedByteSlice(t *testing.T) {
+ type derived []uint8
+
+ output, err := converter{}.ConvertValue(derived("value"))
+ if err != nil {
+ t.Fatal("Byte slice not convertible", err)
+ }
+
+ if bytes.Compare(output.([]byte), []byte("value")) != 0 {
+ t.Fatalf("Byte slice not converted, got %#v %T", output, output)
+ }
+}
+
+func TestConvertDerivedUnsupportedSlice(t *testing.T) {
+ type derived []int
+
+ _, err := converter{}.ConvertValue(derived{1})
+ if err == nil || err.Error() != "unsupported type mysql.derived, a slice of int" {
+ t.Fatal("Unexpected error", err)
+ }
+}
+
+func TestConvertDerivedBool(t *testing.T) {
+ type derived bool
+
+ output, err := converter{}.ConvertValue(derived(true))
+ if err != nil {
+ t.Fatal("Derived bool type not convertible", err)
+ }
+
+ if output != true {
+ t.Fatalf("Derived bool type not converted, got %#v %T", output, output)
+ }
+}
+
+func TestConvertPointer(t *testing.T) {
+ str := "value"
+
+ output, err := converter{}.ConvertValue(&str)
+ if err != nil {
+ t.Fatal("Pointer type not convertible", err)
+ }
+
+ if output != "value" {
+ t.Fatalf("Pointer type not converted, got %#v %T", output, output)
+ }
+}
+
+func TestConvertSignedIntegers(t *testing.T) {
+ values := []interface{}{
+ int8(-42),
+ int16(-42),
+ int32(-42),
+ int64(-42),
+ int(-42),
+ }
+
+ for _, value := range values {
+ output, err := converter{}.ConvertValue(value)
+ if err != nil {
+ t.Fatalf("%T type not convertible %s", value, err)
+ }
+
+ if output != int64(-42) {
+ t.Fatalf("%T type not converted, got %#v %T", value, output, output)
+ }
+ }
+}
+
+func TestConvertUnsignedIntegers(t *testing.T) {
+ values := []interface{}{
+ uint8(42),
+ uint16(42),
+ uint32(42),
+ uint64(42),
+ uint(42),
+ }
+
+ for _, value := range values {
+ output, err := converter{}.ConvertValue(value)
+ if err != nil {
+ t.Fatalf("%T type not convertible %s", value, err)
+ }
+
+ if output != int64(42) {
+ t.Fatalf("%T type not converted, got %#v %T", value, output, output)
+ }
+ }
+
+ output, err := converter{}.ConvertValue(^uint64(0))
+ if err != nil {
+ t.Fatal("uint64 high-bit not convertible", err)
+ }
+
+ if output != "18446744073709551615" {
+ t.Fatalf("uint64 high-bit not converted, got %#v %T", output, output)
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go
index 33c749b35..417d72793 100644
--- a/vendor/github.com/go-sql-driver/mysql/transaction.go
+++ b/vendor/github.com/go-sql-driver/mysql/transaction.go
@@ -13,7 +13,7 @@ type mysqlTx struct {
}
func (tx *mysqlTx) Commit() (err error) {
- if tx.mc == nil || tx.mc.netConn == nil {
+ if tx.mc == nil || tx.mc.closed.IsSet() {
return ErrInvalidConn
}
err = tx.mc.exec("COMMIT")
@@ -22,7 +22,7 @@ func (tx *mysqlTx) Commit() (err error) {
}
func (tx *mysqlTx) Rollback() (err error) {
- if tx.mc == nil || tx.mc.netConn == nil {
+ if tx.mc == nil || tx.mc.closed.IsSet() {
return ErrInvalidConn
}
err = tx.mc.exec("ROLLBACK")
diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go
index d523b7ffd..a92a4029b 100644
--- a/vendor/github.com/go-sql-driver/mysql/utils.go
+++ b/vendor/github.com/go-sql-driver/mysql/utils.go
@@ -16,16 +16,21 @@ import (
"fmt"
"io"
"strings"
+ "sync"
+ "sync/atomic"
"time"
)
var (
+ tlsConfigLock sync.RWMutex
tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs
)
// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
// Use the key as a value in the DSN where tls=value.
//
+// Note: The tls.Config provided to needs to be exclusively owned by the driver after registering.
+//
// rootCertPool := x509.NewCertPool()
// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
// if err != nil {
@@ -51,19 +56,32 @@ func RegisterTLSConfig(key string, config *tls.Config) error {
return fmt.Errorf("key '%s' is reserved", key)
}
+ tlsConfigLock.Lock()
if tlsConfigRegister == nil {
tlsConfigRegister = make(map[string]*tls.Config)
}
tlsConfigRegister[key] = config
+ tlsConfigLock.Unlock()
return nil
}
// DeregisterTLSConfig removes the tls.Config associated with key.
func DeregisterTLSConfig(key string) {
+ tlsConfigLock.Lock()
if tlsConfigRegister != nil {
delete(tlsConfigRegister, key)
}
+ tlsConfigLock.Unlock()
+}
+
+func getTLSConfigClone(key string) (config *tls.Config) {
+ tlsConfigLock.RLock()
+ if v, ok := tlsConfigRegister[key]; ok {
+ config = cloneTLSConfig(v)
+ }
+ tlsConfigLock.RUnlock()
+ return
}
// Returns the bool value of the input.
@@ -548,8 +566,8 @@ func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
if len(b) == 0 {
return 0, true, 1
}
- switch b[0] {
+ switch b[0] {
// 251: NULL
case 0xfb:
return 0, true, 1
@@ -738,3 +756,67 @@ func escapeStringQuotes(buf []byte, v string) []byte {
return buf[:pos]
}
+
+/******************************************************************************
+* Sync utils *
+******************************************************************************/
+
+// noCopy may be embedded into structs which must not be copied
+// after the first use.
+//
+// See https://github.com/golang/go/issues/8005#issuecomment-190753527
+// for details.
+type noCopy struct{}
+
+// Lock is a no-op used by -copylocks checker from `go vet`.
+func (*noCopy) Lock() {}
+
+// atomicBool is a wrapper around uint32 for usage as a boolean value with
+// atomic access.
+type atomicBool struct {
+ _noCopy noCopy
+ value uint32
+}
+
+// IsSet returns wether the current boolean value is true
+func (ab *atomicBool) IsSet() bool {
+ return atomic.LoadUint32(&ab.value) > 0
+}
+
+// Set sets the value of the bool regardless of the previous value
+func (ab *atomicBool) Set(value bool) {
+ if value {
+ atomic.StoreUint32(&ab.value, 1)
+ } else {
+ atomic.StoreUint32(&ab.value, 0)
+ }
+}
+
+// TrySet sets the value of the bool and returns wether the value changed
+func (ab *atomicBool) TrySet(value bool) bool {
+ if value {
+ return atomic.SwapUint32(&ab.value, 1) == 0
+ }
+ return atomic.SwapUint32(&ab.value, 0) > 0
+}
+
+// atomicBool is a wrapper for atomically accessed error values
+type atomicError struct {
+ _noCopy noCopy
+ value atomic.Value
+}
+
+// Set sets the error value regardless of the previous value.
+// The value must not be nil
+func (ae *atomicError) Set(value error) {
+ ae.value.Store(value)
+}
+
+// Value returns the current error value
+func (ae *atomicError) Value() error {
+ if v := ae.value.Load(); v != nil {
+ // this will panic if the value doesn't implement the error interface
+ return v.(error)
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils_go17.go b/vendor/github.com/go-sql-driver/mysql/utils_go17.go
new file mode 100644
index 000000000..f59563456
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/utils_go17.go
@@ -0,0 +1,40 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.7
+// +build !go1.8
+
+package mysql
+
+import "crypto/tls"
+
+func cloneTLSConfig(c *tls.Config) *tls.Config {
+ return &tls.Config{
+ Rand: c.Rand,
+ Time: c.Time,
+ Certificates: c.Certificates,
+ NameToCertificate: c.NameToCertificate,
+ GetCertificate: c.GetCertificate,
+ RootCAs: c.RootCAs,
+ NextProtos: c.NextProtos,
+ ServerName: c.ServerName,
+ ClientAuth: c.ClientAuth,
+ ClientCAs: c.ClientCAs,
+ InsecureSkipVerify: c.InsecureSkipVerify,
+ CipherSuites: c.CipherSuites,
+ PreferServerCipherSuites: c.PreferServerCipherSuites,
+ SessionTicketsDisabled: c.SessionTicketsDisabled,
+ SessionTicketKey: c.SessionTicketKey,
+ ClientSessionCache: c.ClientSessionCache,
+ MinVersion: c.MinVersion,
+ MaxVersion: c.MaxVersion,
+ CurvePreferences: c.CurvePreferences,
+ DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
+ Renegotiation: c.Renegotiation,
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils_go18.go b/vendor/github.com/go-sql-driver/mysql/utils_go18.go
new file mode 100644
index 000000000..7d8c9b16e
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/utils_go18.go
@@ -0,0 +1,49 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.8
+
+package mysql
+
+import (
+ "crypto/tls"
+ "database/sql"
+ "database/sql/driver"
+ "errors"
+)
+
+func cloneTLSConfig(c *tls.Config) *tls.Config {
+ return c.Clone()
+}
+
+func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
+ dargs := make([]driver.Value, len(named))
+ for n, param := range named {
+ if len(param.Name) > 0 {
+ // TODO: support the use of Named Parameters #561
+ return nil, errors.New("mysql: driver does not support the use of Named Parameters")
+ }
+ dargs[n] = param.Value
+ }
+ return dargs, nil
+}
+
+func mapIsolationLevel(level driver.IsolationLevel) (string, error) {
+ switch sql.IsolationLevel(level) {
+ case sql.LevelRepeatableRead:
+ return "REPEATABLE READ", nil
+ case sql.LevelReadCommitted:
+ return "READ COMMITTED", nil
+ case sql.LevelReadUncommitted:
+ return "READ UNCOMMITTED", nil
+ case sql.LevelSerializable:
+ return "SERIALIZABLE", nil
+ default:
+ return "", errors.New("mysql: unsupported isolation level: " + string(level))
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils_go18_test.go b/vendor/github.com/go-sql-driver/mysql/utils_go18_test.go
new file mode 100644
index 000000000..856c25f56
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/utils_go18_test.go
@@ -0,0 +1,54 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.8
+
+package mysql
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "testing"
+)
+
+func TestIsolationLevelMapping(t *testing.T) {
+
+ data := []struct {
+ level driver.IsolationLevel
+ expected string
+ }{
+ {
+ level: driver.IsolationLevel(sql.LevelReadCommitted),
+ expected: "READ COMMITTED",
+ },
+ {
+ level: driver.IsolationLevel(sql.LevelRepeatableRead),
+ expected: "REPEATABLE READ",
+ },
+ {
+ level: driver.IsolationLevel(sql.LevelReadUncommitted),
+ expected: "READ UNCOMMITTED",
+ },
+ {
+ level: driver.IsolationLevel(sql.LevelSerializable),
+ expected: "SERIALIZABLE",
+ },
+ }
+
+ for i, td := range data {
+ if actual, err := mapIsolationLevel(td.level); actual != td.expected || err != nil {
+ t.Fatal(i, td.expected, actual, err)
+ }
+ }
+
+ // check unsupported mapping
+ if actual, err := mapIsolationLevel(driver.IsolationLevel(sql.LevelLinearizable)); actual != "" || err == nil {
+ t.Fatal("Expected error on unsupported isolation level")
+ }
+
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils_test.go b/vendor/github.com/go-sql-driver/mysql/utils_test.go
index 0d6c6684f..0041892db 100644
--- a/vendor/github.com/go-sql-driver/mysql/utils_test.go
+++ b/vendor/github.com/go-sql-driver/mysql/utils_test.go
@@ -195,3 +195,83 @@ func TestEscapeQuotes(t *testing.T) {
expect("foo''bar", "foo'bar") // affected
expect("foo\"bar", "foo\"bar") // not affected
}
+
+func TestAtomicBool(t *testing.T) {
+ var ab atomicBool
+ if ab.IsSet() {
+ t.Fatal("Expected value to be false")
+ }
+
+ ab.Set(true)
+ if ab.value != 1 {
+ t.Fatal("Set(true) did not set value to 1")
+ }
+ if !ab.IsSet() {
+ t.Fatal("Expected value to be true")
+ }
+
+ ab.Set(true)
+ if !ab.IsSet() {
+ t.Fatal("Expected value to be true")
+ }
+
+ ab.Set(false)
+ if ab.value != 0 {
+ t.Fatal("Set(false) did not set value to 0")
+ }
+ if ab.IsSet() {
+ t.Fatal("Expected value to be false")
+ }
+
+ ab.Set(false)
+ if ab.IsSet() {
+ t.Fatal("Expected value to be false")
+ }
+ if ab.TrySet(false) {
+ t.Fatal("Expected TrySet(false) to fail")
+ }
+ if !ab.TrySet(true) {
+ t.Fatal("Expected TrySet(true) to succeed")
+ }
+ if !ab.IsSet() {
+ t.Fatal("Expected value to be true")
+ }
+
+ ab.Set(true)
+ if !ab.IsSet() {
+ t.Fatal("Expected value to be true")
+ }
+ if ab.TrySet(true) {
+ t.Fatal("Expected TrySet(true) to fail")
+ }
+ if !ab.TrySet(false) {
+ t.Fatal("Expected TrySet(false) to succeed")
+ }
+ if ab.IsSet() {
+ t.Fatal("Expected value to be false")
+ }
+
+ ab._noCopy.Lock() // we've "tested" it ¯\_(ツ)_/¯
+}
+
+func TestAtomicError(t *testing.T) {
+ var ae atomicError
+ if ae.Value() != nil {
+ t.Fatal("Expected value to be nil")
+ }
+
+ ae.Set(ErrMalformPkt)
+ if v := ae.Value(); v != ErrMalformPkt {
+ if v == nil {
+ t.Fatal("Value is still nil")
+ }
+ t.Fatal("Error did not match")
+ }
+ ae.Set(ErrPktSync)
+ if ae.Value() == ErrMalformPkt {
+ t.Fatal("Error still matches old error")
+ }
+ if v := ae.Value(); v != ErrPktSync {
+ t.Fatal("Error did not match")
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
index dfdfc5b30..110ae1384 100644
--- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
+++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
@@ -193,7 +193,8 @@ func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeU
// "Generated output always contains 3, 6, or 9 fractional digits,
// depending on required precision."
s, ns := s.Field(0).Int(), s.Field(1).Int()
- x := fmt.Sprintf("%d.%09d", s, ns)
+ d := time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond
+ x := fmt.Sprintf("%.9f", d.Seconds())
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, "000")
out.write(`"`)
diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go
index 4fdbde15a..2428d0566 100644
--- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go
+++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go
@@ -407,7 +407,6 @@ var marshalingTests = []struct {
{"Any with WKT", marshaler, anyWellKnown, anyWellKnownJSON},
{"Any with WKT and indent", marshalerAllOptions, anyWellKnown, anyWellKnownPrettyJSON},
{"Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}, `{"dur":"3.000s"}`},
- {"Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 100000000, Nanos: 1}}, `{"dur":"100000000.000000001s"}`},
{"Struct", marshaler, &pb.KnownTypes{St: &stpb.Struct{
Fields: map[string]*stpb.Value{
"one": {Kind: &stpb.Value_StringValue{"loneliest number"}},
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
new file mode 100644
index 000000000..bd0e3bb4c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/discard.go
@@ -0,0 +1,151 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2017 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+//
+// For proto2 messages, the unknown fields of message extensions are only
+// discarded from messages that have been accessed via GetExtension.
+func DiscardUnknown(m Message) {
+ discardLegacy(m)
+}
+
+func discardLegacy(m Message) {
+ v := reflect.ValueOf(m)
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ return
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Struct {
+ return
+ }
+ t := v.Type()
+
+ for i := 0; i < v.NumField(); i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ vf := v.Field(i)
+ tf := f.Type
+
+ // Unwrap tf to get its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
+ }
+
+ switch tf.Kind() {
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
+ case isSlice: // E.g., []*pb.T
+ for j := 0; j < vf.Len(); j++ {
+ discardLegacy(vf.Index(j).Interface().(Message))
+ }
+ default: // E.g., *pb.T
+ discardLegacy(vf.Interface().(Message))
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
+ default: // E.g., map[K]V
+ tv := vf.Type().Elem()
+ if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
+ for _, key := range vf.MapKeys() {
+ val := vf.MapIndex(key)
+ discardLegacy(val.Interface().(Message))
+ }
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
+ default: // E.g., test_proto.isCommunique_Union interface
+ if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
+ vf = vf.Elem() // E.g., *test_proto.Communique_Msg
+ if !vf.IsNil() {
+ vf = vf.Elem() // E.g., test_proto.Communique_Msg
+ vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
+ if vf.Kind() == reflect.Ptr {
+ discardLegacy(vf.Interface().(Message))
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
+ if vf.Type() != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ vf.Set(reflect.ValueOf([]byte(nil)))
+ }
+
+ // For proto2 messages, only discard unknown fields in message extensions
+ // that have been accessed via GetExtension.
+ if em, ok := extendable(m); ok {
+ // Ignore lock since discardLegacy is not concurrency safe.
+ emm, _ := em.extensionsRead()
+ for _, mx := range emm {
+ if m, ok := mx.value.(Message); ok {
+ discardLegacy(m)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md b/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md
new file mode 100644
index 000000000..232be82e4
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md
@@ -0,0 +1,11 @@
+**What version of Go are you running?** (Paste the output of `go version`)
+
+
+**What version of gorilla/mux are you at?** (Paste the output of `git rev-parse HEAD` inside `$GOPATH/src/github.com/gorilla/mux`)
+
+
+**Describe your problem** (and what you have tried so far)
+
+
+**Paste a minimal, runnable, reproduction of your issue below** (use backticks to format it)
+
diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md
index 67a79e00a..f9b3103f0 100644
--- a/vendor/github.com/gorilla/mux/README.md
+++ b/vendor/github.com/gorilla/mux/README.md
@@ -27,6 +27,8 @@ The name mux stands for "HTTP request multiplexer". Like the standard `http.Serv
* [Static Files](#static-files)
* [Registered URLs](#registered-urls)
* [Walking Routes](#walking-routes)
+* [Graceful Shutdown](#graceful-shutdown)
+* [Middleware](#middleware)
* [Full Example](#full-example)
---
@@ -45,11 +47,11 @@ Let's start registering a couple of URL paths and handlers:
```go
func main() {
- r := mux.NewRouter()
- r.HandleFunc("/", HomeHandler)
- r.HandleFunc("/products", ProductsHandler)
- r.HandleFunc("/articles", ArticlesHandler)
- http.Handle("/", r)
+ r := mux.NewRouter()
+ r.HandleFunc("/", HomeHandler)
+ r.HandleFunc("/products", ProductsHandler)
+ r.HandleFunc("/articles", ArticlesHandler)
+ http.Handle("/", r)
}
```
@@ -68,9 +70,9 @@ The names are used to create a map of route variables which can be retrieved cal
```go
func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- w.WriteHeader(http.StatusOK)
- fmt.Fprintf(w, "Category: %v\n", vars["category"])
+ vars := mux.Vars(r)
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprintf(w, "Category: %v\n", vars["category"])
}
```
@@ -122,7 +124,7 @@ r.Queries("key", "value")
```go
r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
- return r.ProtoMajor == 0
+ return r.ProtoMajor == 0
})
```
@@ -243,24 +245,24 @@ request that matches "/static/*". This makes it easy to serve static files with
```go
func main() {
- var dir string
+ var dir string
- flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
- flag.Parse()
- r := mux.NewRouter()
+ flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+ flag.Parse()
+ r := mux.NewRouter()
- // This will serve files under http://localhost:8000/static/<filename>
- r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+ // This will serve files under http://localhost:8000/static/<filename>
+ r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
- srv := &http.Server{
- Handler: r,
- Addr: "127.0.0.1:8000",
- // Good practice: enforce timeouts for servers you create!
- WriteTimeout: 15 * time.Second,
- ReadTimeout: 15 * time.Second,
- }
+ srv := &http.Server{
+ Handler: r,
+ Addr: "127.0.0.1:8000",
+ // Good practice: enforce timeouts for servers you create!
+ WriteTimeout: 15 * time.Second,
+ ReadTimeout: 15 * time.Second,
+ }
- log.Fatal(srv.ListenAndServe())
+ log.Fatal(srv.ListenAndServe())
}
```
@@ -383,6 +385,149 @@ r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error
})
```
+### Graceful Shutdown
+
+Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`:
+
+```go
+package main
+
+import (
+ "context"
+ "flag"
+ "log"
+ "net/http"
+ "os"
+ "os/signal"
+
+ "github.com/gorilla/mux"
+)
+
+func main() {
+ var wait time.Duration
+ flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m")
+ flag.Parse()
+
+ r := mux.NewRouter()
+ // Add your routes as needed
+
+ srv := &http.Server{
+ Addr: "0.0.0.0:8080",
+ // Good practice to set timeouts to avoid Slowloris attacks.
+ WriteTimeout: time.Second * 15,
+ ReadTimeout: time.Second * 15,
+ IdleTimeout: time.Second * 60,
+ Handler: r, // Pass our instance of gorilla/mux in.
+ }
+
+ // Run our server in a goroutine so that it doesn't block.
+ go func() {
+ if err := srv.ListenAndServe(); err != nil {
+ log.Println(err)
+ }
+ }()
+
+ c := make(chan os.Signal, 1)
+ // We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C)
+ // SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught.
+ signal.Notify(c, os.Interrupt)
+
+ // Block until we receive our signal.
+ <-c
+
+ // Create a deadline to wait for.
+ ctx, cancel := context.WithTimeout(ctx, wait)
+ // Doesn't block if no connections, but will otherwise wait
+ // until the timeout deadline.
+ srv.Shutdown(ctx)
+ // Optionally, you could run srv.Shutdown in a goroutine and block on
+ // <-ctx.Done() if your application should wait for other services
+ // to finalize based on context cancellation.
+ log.Println("shutting down")
+ os.Exit(0)
+}
+```
+
+### Middleware
+
+Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters.
+Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking.
+
+Mux middlewares are defined using the de facto standard type:
+
+```go
+type MiddlewareFunc func(http.Handler) http.Handler
+```
+
+Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers.
+
+A very basic middleware which logs the URI of the request being handled could be written as:
+
+```go
+func simpleMw(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Do stuff here
+ log.Println(r.RequestURI)
+ // Call the next handler, which can be another middleware in the chain, or the final handler.
+ next.ServeHTTP(w, r)
+ })
+}
+```
+
+Middlewares can be added to a router using `Router.AddMiddlewareFunc()`:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/", handler)
+r.AddMiddleware(simpleMw)
+```
+
+A more complex authentication middleware, which maps session token to users, could be written as:
+
+```go
+// Define our struct
+type authenticationMiddleware struct {
+ tokenUsers map[string]string
+}
+
+// Initialize it somewhere
+func (amw *authenticationMiddleware) Populate() {
+ amw.tokenUsers["00000000"] = "user0"
+ amw.tokenUsers["aaaaaaaa"] = "userA"
+ amw.tokenUsers["05f717e5"] = "randomUser"
+ amw.tokenUsers["deadbeef"] = "user0"
+}
+
+// Middleware function, which will be called for each request
+func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ token := r.Header.Get("X-Session-Token")
+
+ if user, found := amw.tokenUsers[token]; found {
+ // We found the token in our map
+ log.Printf("Authenticated user %s\n", user)
+ // Pass down the request to the next middleware (or final handler)
+ next.ServeHTTP(w, r)
+ } else {
+ // Write an error and stop the handler chain
+ http.Error(w, "Forbidden", 403)
+ }
+ })
+}
+```
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/", handler)
+
+amw := authenticationMiddleware{}
+amw.Populate()
+
+r.AddMiddlewareFunc(amw.Middleware)
+```
+
+Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares *should* write to `ResponseWriter` if they *are* going to terminate the request, and they *should not* write to `ResponseWriter` if they *are not* going to terminate it.
+
## Full Example
Here's a complete, runnable example of a small `mux` based server:
@@ -391,22 +536,22 @@ Here's a complete, runnable example of a small `mux` based server:
package main
import (
- "net/http"
- "log"
- "github.com/gorilla/mux"
+ "net/http"
+ "log"
+ "github.com/gorilla/mux"
)
func YourHandler(w http.ResponseWriter, r *http.Request) {
- w.Write([]byte("Gorilla!\n"))
+ w.Write([]byte("Gorilla!\n"))
}
func main() {
- r := mux.NewRouter()
- // Routes consist of a path and a handler function.
- r.HandleFunc("/", YourHandler)
+ r := mux.NewRouter()
+ // Routes consist of a path and a handler function.
+ r.HandleFunc("/", YourHandler)
- // Bind to a port and pass our router in
- log.Fatal(http.ListenAndServe(":8000", r))
+ // Bind to a port and pass our router in
+ log.Fatal(http.ListenAndServe(":8000", r))
}
```
diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go
index cce30b2f0..013f08898 100644
--- a/vendor/github.com/gorilla/mux/doc.go
+++ b/vendor/github.com/gorilla/mux/doc.go
@@ -238,5 +238,70 @@ as well:
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
"id", "42")
+
+Since **vX.Y.Z**, mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed if a
+match is found (including subrouters). Middlewares are defined using the de facto standard type:
+
+ type MiddlewareFunc func(http.Handler) http.Handler
+
+Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created).
+
+A very basic middleware which logs the URI of the request being handled could be written as:
+
+ func simpleMw(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Do stuff here
+ log.Println(r.RequestURI)
+ // Call the next handler, which can be another middleware in the chain, or the final handler.
+ next.ServeHTTP(w, r)
+ })
+ }
+
+Middlewares can be added to a router using `Router.Use()`:
+
+ r := mux.NewRouter()
+ r.HandleFunc("/", handler)
+ r.AddMiddleware(simpleMw)
+
+A more complex authentication middleware, which maps session token to users, could be written as:
+
+ // Define our struct
+ type authenticationMiddleware struct {
+ tokenUsers map[string]string
+ }
+
+ // Initialize it somewhere
+ func (amw *authenticationMiddleware) Populate() {
+ amw.tokenUsers["00000000"] = "user0"
+ amw.tokenUsers["aaaaaaaa"] = "userA"
+ amw.tokenUsers["05f717e5"] = "randomUser"
+ amw.tokenUsers["deadbeef"] = "user0"
+ }
+
+ // Middleware function, which will be called for each request
+ func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ token := r.Header.Get("X-Session-Token")
+
+ if user, found := amw.tokenUsers[token]; found {
+ // We found the token in our map
+ log.Printf("Authenticated user %s\n", user)
+ next.ServeHTTP(w, r)
+ } else {
+ http.Error(w, "Forbidden", 403)
+ }
+ })
+ }
+
+ r := mux.NewRouter()
+ r.HandleFunc("/", handler)
+
+ amw := authenticationMiddleware{}
+ amw.Populate()
+
+ r.Use(amw.Middleware)
+
+Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to.
+
*/
package mux
diff --git a/vendor/github.com/gorilla/mux/example_route_test.go b/vendor/github.com/gorilla/mux/example_route_test.go
new file mode 100644
index 000000000..112557071
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/example_route_test.go
@@ -0,0 +1,51 @@
+package mux_test
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/gorilla/mux"
+)
+
+// This example demonstrates setting a regular expression matcher for
+// the header value. A plain word will match any value that contains a
+// matching substring as if the pattern was wrapped with `.*`.
+func ExampleRoute_HeadersRegexp() {
+ r := mux.NewRouter()
+ route := r.NewRoute().HeadersRegexp("Accept", "html")
+
+ req1, _ := http.NewRequest("GET", "example.com", nil)
+ req1.Header.Add("Accept", "text/plain")
+ req1.Header.Add("Accept", "text/html")
+
+ req2, _ := http.NewRequest("GET", "example.com", nil)
+ req2.Header.Set("Accept", "application/xhtml+xml")
+
+ matchInfo := &mux.RouteMatch{}
+ fmt.Printf("Match: %v %q\n", route.Match(req1, matchInfo), req1.Header["Accept"])
+ fmt.Printf("Match: %v %q\n", route.Match(req2, matchInfo), req2.Header["Accept"])
+ // Output:
+ // Match: true ["text/plain" "text/html"]
+ // Match: true ["application/xhtml+xml"]
+}
+
+// This example demonstrates setting a strict regular expression matcher
+// for the header value. Using the start and end of string anchors, the
+// value must be an exact match.
+func ExampleRoute_HeadersRegexp_exactMatch() {
+ r := mux.NewRouter()
+ route := r.NewRoute().HeadersRegexp("Origin", "^https://example.co$")
+
+ yes, _ := http.NewRequest("GET", "example.co", nil)
+ yes.Header.Set("Origin", "https://example.co")
+
+ no, _ := http.NewRequest("GET", "example.co.uk", nil)
+ no.Header.Set("Origin", "https://example.co.uk")
+
+ matchInfo := &mux.RouteMatch{}
+ fmt.Printf("Match: %v %q\n", route.Match(yes, matchInfo), yes.Header["Origin"])
+ fmt.Printf("Match: %v %q\n", route.Match(no, matchInfo), no.Header["Origin"])
+ // Output:
+ // Match: true ["https://example.co"]
+ // Match: false ["https://example.co.uk"]
+}
diff --git a/vendor/github.com/gorilla/mux/middleware.go b/vendor/github.com/gorilla/mux/middleware.go
new file mode 100644
index 000000000..8f898675e
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/middleware.go
@@ -0,0 +1,28 @@
+package mux
+
+import "net/http"
+
+// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler.
+// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed
+// to it, and then calls the handler passed as parameter to the MiddlewareFunc.
+type MiddlewareFunc func(http.Handler) http.Handler
+
+// middleware interface is anything which implements a MiddlewareFunc named Middleware.
+type middleware interface {
+ Middleware(handler http.Handler) http.Handler
+}
+
+// MiddlewareFunc also implements the middleware interface.
+func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler {
+ return mw(handler)
+}
+
+// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
+func (r *Router) Use(mwf MiddlewareFunc) {
+ r.middlewares = append(r.middlewares, mwf)
+}
+
+// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
+func (r *Router) useInterface(mw middleware) {
+ r.middlewares = append(r.middlewares, mw)
+}
diff --git a/vendor/github.com/gorilla/mux/middleware_test.go b/vendor/github.com/gorilla/mux/middleware_test.go
new file mode 100644
index 000000000..93947e8cb
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/middleware_test.go
@@ -0,0 +1,336 @@
+package mux
+
+import (
+ "bytes"
+ "net/http"
+ "testing"
+)
+
+type testMiddleware struct {
+ timesCalled uint
+}
+
+func (tm *testMiddleware) Middleware(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ tm.timesCalled++
+ h.ServeHTTP(w, r)
+ })
+}
+
+func dummyHandler(w http.ResponseWriter, r *http.Request) {}
+
+func TestMiddlewareAdd(t *testing.T) {
+ router := NewRouter()
+ router.HandleFunc("/", dummyHandler).Methods("GET")
+
+ mw := &testMiddleware{}
+
+ router.useInterface(mw)
+ if len(router.middlewares) != 1 || router.middlewares[0] != mw {
+ t.Fatal("Middleware was not added correctly")
+ }
+
+ router.Use(mw.Middleware)
+ if len(router.middlewares) != 2 {
+ t.Fatal("MiddlewareFunc method was not added correctly")
+ }
+
+ banalMw := func(handler http.Handler) http.Handler {
+ return handler
+ }
+ router.Use(banalMw)
+ if len(router.middlewares) != 3 {
+ t.Fatal("MiddlewareFunc method was not added correctly")
+ }
+}
+
+func TestMiddleware(t *testing.T) {
+ router := NewRouter()
+ router.HandleFunc("/", dummyHandler).Methods("GET")
+
+ mw := &testMiddleware{}
+ router.useInterface(mw)
+
+ rw := NewRecorder()
+ req := newRequest("GET", "/")
+
+ // Test regular middleware call
+ router.ServeHTTP(rw, req)
+ if mw.timesCalled != 1 {
+ t.Fatalf("Expected %d calls, but got only %d", 1, mw.timesCalled)
+ }
+
+ // Middleware should not be called for 404
+ req = newRequest("GET", "/not/found")
+ router.ServeHTTP(rw, req)
+ if mw.timesCalled != 1 {
+ t.Fatalf("Expected %d calls, but got only %d", 1, mw.timesCalled)
+ }
+
+ // Middleware should not be called if there is a method mismatch
+ req = newRequest("POST", "/")
+ router.ServeHTTP(rw, req)
+ if mw.timesCalled != 1 {
+ t.Fatalf("Expected %d calls, but got only %d", 1, mw.timesCalled)
+ }
+
+ // Add the middleware again as function
+ router.Use(mw.Middleware)
+ req = newRequest("GET", "/")
+ router.ServeHTTP(rw, req)
+ if mw.timesCalled != 3 {
+ t.Fatalf("Expected %d calls, but got only %d", 3, mw.timesCalled)
+ }
+
+}
+
+func TestMiddlewareSubrouter(t *testing.T) {
+ router := NewRouter()
+ router.HandleFunc("/", dummyHandler).Methods("GET")
+
+ subrouter := router.PathPrefix("/sub").Subrouter()
+ subrouter.HandleFunc("/x", dummyHandler).Methods("GET")
+
+ mw := &testMiddleware{}
+ subrouter.useInterface(mw)
+
+ rw := NewRecorder()
+ req := newRequest("GET", "/")
+
+ router.ServeHTTP(rw, req)
+ if mw.timesCalled != 0 {
+ t.Fatalf("Expected %d calls, but got only %d", 0, mw.timesCalled)
+ }
+
+ req = newRequest("GET", "/sub/")
+ router.ServeHTTP(rw, req)
+ if mw.timesCalled != 0 {
+ t.Fatalf("Expected %d calls, but got only %d", 0, mw.timesCalled)
+ }
+
+ req = newRequest("GET", "/sub/x")
+ router.ServeHTTP(rw, req)
+ if mw.timesCalled != 1 {
+ t.Fatalf("Expected %d calls, but got only %d", 1, mw.timesCalled)
+ }
+
+ req = newRequest("GET", "/sub/not/found")
+ router.ServeHTTP(rw, req)
+ if mw.timesCalled != 1 {
+ t.Fatalf("Expected %d calls, but got only %d", 1, mw.timesCalled)
+ }
+
+ router.useInterface(mw)
+
+ req = newRequest("GET", "/")
+ router.ServeHTTP(rw, req)
+ if mw.timesCalled != 2 {
+ t.Fatalf("Expected %d calls, but got only %d", 2, mw.timesCalled)
+ }
+
+ req = newRequest("GET", "/sub/x")
+ router.ServeHTTP(rw, req)
+ if mw.timesCalled != 4 {
+ t.Fatalf("Expected %d calls, but got only %d", 4, mw.timesCalled)
+ }
+}
+
+func TestMiddlewareExecution(t *testing.T) {
+ mwStr := []byte("Middleware\n")
+ handlerStr := []byte("Logic\n")
+
+ router := NewRouter()
+ router.HandleFunc("/", func(w http.ResponseWriter, e *http.Request) {
+ w.Write(handlerStr)
+ })
+
+ rw := NewRecorder()
+ req := newRequest("GET", "/")
+
+ // Test handler-only call
+ router.ServeHTTP(rw, req)
+
+ if bytes.Compare(rw.Body.Bytes(), handlerStr) != 0 {
+ t.Fatal("Handler response is not what it should be")
+ }
+
+ // Test middleware call
+ rw = NewRecorder()
+
+ router.Use(func(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Write(mwStr)
+ h.ServeHTTP(w, r)
+ })
+ })
+
+ router.ServeHTTP(rw, req)
+ if bytes.Compare(rw.Body.Bytes(), append(mwStr, handlerStr...)) != 0 {
+ t.Fatal("Middleware + handler response is not what it should be")
+ }
+}
+
+func TestMiddlewareNotFound(t *testing.T) {
+ mwStr := []byte("Middleware\n")
+ handlerStr := []byte("Logic\n")
+
+ router := NewRouter()
+ router.HandleFunc("/", func(w http.ResponseWriter, e *http.Request) {
+ w.Write(handlerStr)
+ })
+ router.Use(func(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Write(mwStr)
+ h.ServeHTTP(w, r)
+ })
+ })
+
+ // Test not found call with default handler
+ rw := NewRecorder()
+ req := newRequest("GET", "/notfound")
+
+ router.ServeHTTP(rw, req)
+ if bytes.Contains(rw.Body.Bytes(), mwStr) {
+ t.Fatal("Middleware was called for a 404")
+ }
+
+ // Test not found call with custom handler
+ rw = NewRecorder()
+ req = newRequest("GET", "/notfound")
+
+ router.NotFoundHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
+ rw.Write([]byte("Custom 404 handler"))
+ })
+ router.ServeHTTP(rw, req)
+
+ if bytes.Contains(rw.Body.Bytes(), mwStr) {
+ t.Fatal("Middleware was called for a custom 404")
+ }
+}
+
+func TestMiddlewareMethodMismatch(t *testing.T) {
+ mwStr := []byte("Middleware\n")
+ handlerStr := []byte("Logic\n")
+
+ router := NewRouter()
+ router.HandleFunc("/", func(w http.ResponseWriter, e *http.Request) {
+ w.Write(handlerStr)
+ }).Methods("GET")
+
+ router.Use(func(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Write(mwStr)
+ h.ServeHTTP(w, r)
+ })
+ })
+
+ // Test method mismatch
+ rw := NewRecorder()
+ req := newRequest("POST", "/")
+
+ router.ServeHTTP(rw, req)
+ if bytes.Contains(rw.Body.Bytes(), mwStr) {
+ t.Fatal("Middleware was called for a method mismatch")
+ }
+
+ // Test not found call
+ rw = NewRecorder()
+ req = newRequest("POST", "/")
+
+ router.MethodNotAllowedHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
+ rw.Write([]byte("Method not allowed"))
+ })
+ router.ServeHTTP(rw, req)
+
+ if bytes.Contains(rw.Body.Bytes(), mwStr) {
+ t.Fatal("Middleware was called for a method mismatch")
+ }
+}
+
+func TestMiddlewareNotFoundSubrouter(t *testing.T) {
+ mwStr := []byte("Middleware\n")
+ handlerStr := []byte("Logic\n")
+
+ router := NewRouter()
+ router.HandleFunc("/", func(w http.ResponseWriter, e *http.Request) {
+ w.Write(handlerStr)
+ })
+
+ subrouter := router.PathPrefix("/sub/").Subrouter()
+ subrouter.HandleFunc("/", func(w http.ResponseWriter, e *http.Request) {
+ w.Write(handlerStr)
+ })
+
+ router.Use(func(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Write(mwStr)
+ h.ServeHTTP(w, r)
+ })
+ })
+
+ // Test not found call for default handler
+ rw := NewRecorder()
+ req := newRequest("GET", "/sub/notfound")
+
+ router.ServeHTTP(rw, req)
+ if bytes.Contains(rw.Body.Bytes(), mwStr) {
+ t.Fatal("Middleware was called for a 404")
+ }
+
+ // Test not found call with custom handler
+ rw = NewRecorder()
+ req = newRequest("GET", "/sub/notfound")
+
+ subrouter.NotFoundHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
+ rw.Write([]byte("Custom 404 handler"))
+ })
+ router.ServeHTTP(rw, req)
+
+ if bytes.Contains(rw.Body.Bytes(), mwStr) {
+ t.Fatal("Middleware was called for a custom 404")
+ }
+}
+
+func TestMiddlewareMethodMismatchSubrouter(t *testing.T) {
+ mwStr := []byte("Middleware\n")
+ handlerStr := []byte("Logic\n")
+
+ router := NewRouter()
+ router.HandleFunc("/", func(w http.ResponseWriter, e *http.Request) {
+ w.Write(handlerStr)
+ })
+
+ subrouter := router.PathPrefix("/sub/").Subrouter()
+ subrouter.HandleFunc("/", func(w http.ResponseWriter, e *http.Request) {
+ w.Write(handlerStr)
+ }).Methods("GET")
+
+ router.Use(func(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Write(mwStr)
+ h.ServeHTTP(w, r)
+ })
+ })
+
+ // Test method mismatch without custom handler
+ rw := NewRecorder()
+ req := newRequest("POST", "/sub/")
+
+ router.ServeHTTP(rw, req)
+ if bytes.Contains(rw.Body.Bytes(), mwStr) {
+ t.Fatal("Middleware was called for a method mismatch")
+ }
+
+ // Test method mismatch with custom handler
+ rw = NewRecorder()
+ req = newRequest("POST", "/sub/")
+
+ router.MethodNotAllowedHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
+ rw.Write([]byte("Method not allowed"))
+ })
+ router.ServeHTTP(rw, req)
+
+ if bytes.Contains(rw.Body.Bytes(), mwStr) {
+ t.Fatal("Middleware was called for a method mismatch")
+ }
+}
diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go
index 49de78923..efabd2417 100644
--- a/vendor/github.com/gorilla/mux/mux.go
+++ b/vendor/github.com/gorilla/mux/mux.go
@@ -63,6 +63,8 @@ type Router struct {
KeepContext bool
// see Router.UseEncodedPath(). This defines a flag for all routes.
useEncodedPath bool
+ // Slice of middlewares to be called after a match is found
+ middlewares []middleware
}
// Match attempts to match the given request against the router's registered routes.
@@ -79,6 +81,12 @@ type Router struct {
func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
for _, route := range r.routes {
if route.Match(req, match) {
+ // Build middleware chain if no error was found
+ if match.MatchErr == nil {
+ for i := len(r.middlewares) - 1; i >= 0; i-- {
+ match.Handler = r.middlewares[i].Middleware(match.Handler)
+ }
+ }
return true
}
}
@@ -147,6 +155,7 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if !r.KeepContext {
defer contextClear(req)
}
+
handler.ServeHTTP(w, req)
}
@@ -164,13 +173,18 @@ func (r *Router) GetRoute(name string) *Route {
// StrictSlash defines the trailing slash behavior for new routes. The initial
// value is false.
//
-// When true, if the route path is "/path/", accessing "/path" will redirect
+// When true, if the route path is "/path/", accessing "/path" will perform a redirect
// to the former and vice versa. In other words, your application will always
// see the path as specified in the route.
//
// When false, if the route path is "/path", accessing "/path/" will not match
// this route and vice versa.
//
+// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for
+// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed
+// request will be made as a GET by most clients. Use middleware or client settings
+// to modify this behaviour as needed.
+//
// Special case: when a route sets a path prefix using the PathPrefix() method,
// strict slash is ignored for that route because the redirect behavior can't
// be determined from a prefix alone. However, any subrouters created from that
@@ -196,10 +210,6 @@ func (r *Router) SkipClean(value bool) *Router {
// UseEncodedPath tells the router to match the encoded original path
// to the routes.
// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
-// This behavior has the drawback of needing to match routes against
-// r.RequestURI instead of r.URL.Path. Any modifications (such as http.StripPrefix)
-// to r.URL.Path will not affect routing when this flag is on and thus may
-// induce unintended behavior.
//
// If not called, the router will match the unencoded path to the routes.
// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
diff --git a/vendor/github.com/gorilla/mux/mux_test.go b/vendor/github.com/gorilla/mux/mux_test.go
index 6c7e30d19..9e93c9830 100644
--- a/vendor/github.com/gorilla/mux/mux_test.go
+++ b/vendor/github.com/gorilla/mux/mux_test.go
@@ -25,7 +25,7 @@ func (r *Route) GoString() string {
}
func (r *routeRegexp) GoString() string {
- return fmt.Sprintf("&routeRegexp{template: %q, matchHost: %t, matchQuery: %t, strictSlash: %t, regexp: regexp.MustCompile(%q), reverse: %q, varsN: %v, varsR: %v", r.template, r.matchHost, r.matchQuery, r.strictSlash, r.regexp.String(), r.reverse, r.varsN, r.varsR)
+ return fmt.Sprintf("&routeRegexp{template: %q, regexpType: %v, options: %v, regexp: regexp.MustCompile(%q), reverse: %q, varsN: %v, varsR: %v", r.template, r.regexpType, r.options, r.regexp.String(), r.reverse, r.varsN, r.varsR)
}
type routeTest struct {
@@ -1967,6 +1967,318 @@ func TestErrMatchNotFound(t *testing.T) {
}
}
+// methodsSubrouterTest models the data necessary for testing handler
+// matching for subrouters created after HTTP methods matcher registration.
+type methodsSubrouterTest struct {
+ title string
+ wantCode int
+ router *Router
+ // method is the input into the request and expected response
+ method string
+ // input request path
+ path string
+ // redirectTo is the expected location path for strict-slash matches
+ redirectTo string
+}
+
+// methodHandler writes the method string in response.
+func methodHandler(method string) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte(method))
+ }
+}
+
+// TestMethodsSubrouterCatchall matches handlers for subrouters where a
+// catchall handler is set for a mis-matching method.
+func TestMethodsSubrouterCatchall(t *testing.T) {
+ t.Parallel()
+
+ router := NewRouter()
+ router.Methods("PATCH").Subrouter().PathPrefix("/").HandlerFunc(methodHandler("PUT"))
+ router.Methods("GET").Subrouter().HandleFunc("/foo", methodHandler("GET"))
+ router.Methods("POST").Subrouter().HandleFunc("/foo", methodHandler("POST"))
+ router.Methods("DELETE").Subrouter().HandleFunc("/foo", methodHandler("DELETE"))
+
+ tests := []methodsSubrouterTest{
+ {
+ title: "match GET handler",
+ router: router,
+ path: "http://localhost/foo",
+ method: "GET",
+ wantCode: http.StatusOK,
+ },
+ {
+ title: "match POST handler",
+ router: router,
+ method: "POST",
+ path: "http://localhost/foo",
+ wantCode: http.StatusOK,
+ },
+ {
+ title: "match DELETE handler",
+ router: router,
+ method: "DELETE",
+ path: "http://localhost/foo",
+ wantCode: http.StatusOK,
+ },
+ {
+ title: "disallow PUT method",
+ router: router,
+ method: "PUT",
+ path: "http://localhost/foo",
+ wantCode: http.StatusMethodNotAllowed,
+ },
+ }
+
+ for _, test := range tests {
+ testMethodsSubrouter(t, test)
+ }
+}
+
+// TestMethodsSubrouterStrictSlash matches handlers on subrouters with
+// strict-slash matchers.
+func TestMethodsSubrouterStrictSlash(t *testing.T) {
+ t.Parallel()
+
+ router := NewRouter()
+ sub := router.PathPrefix("/").Subrouter()
+ sub.StrictSlash(true).Path("/foo").Methods("GET").Subrouter().HandleFunc("", methodHandler("GET"))
+ sub.StrictSlash(true).Path("/foo/").Methods("PUT").Subrouter().HandleFunc("/", methodHandler("PUT"))
+ sub.StrictSlash(true).Path("/foo/").Methods("POST").Subrouter().HandleFunc("/", methodHandler("POST"))
+
+ tests := []methodsSubrouterTest{
+ {
+ title: "match POST handler",
+ router: router,
+ method: "POST",
+ path: "http://localhost/foo/",
+ wantCode: http.StatusOK,
+ },
+ {
+ title: "match GET handler",
+ router: router,
+ method: "GET",
+ path: "http://localhost/foo",
+ wantCode: http.StatusOK,
+ },
+ {
+ title: "match POST handler, redirect strict-slash",
+ router: router,
+ method: "POST",
+ path: "http://localhost/foo",
+ redirectTo: "http://localhost/foo/",
+ wantCode: http.StatusMovedPermanently,
+ },
+ {
+ title: "match GET handler, redirect strict-slash",
+ router: router,
+ method: "GET",
+ path: "http://localhost/foo/",
+ redirectTo: "http://localhost/foo",
+ wantCode: http.StatusMovedPermanently,
+ },
+ {
+ title: "disallow DELETE method",
+ router: router,
+ method: "DELETE",
+ path: "http://localhost/foo",
+ wantCode: http.StatusMethodNotAllowed,
+ },
+ }
+
+ for _, test := range tests {
+ testMethodsSubrouter(t, test)
+ }
+}
+
+// TestMethodsSubrouterPathPrefix matches handlers on subrouters created
+// on a router with a path prefix matcher and method matcher.
+func TestMethodsSubrouterPathPrefix(t *testing.T) {
+ t.Parallel()
+
+ router := NewRouter()
+ router.PathPrefix("/1").Methods("POST").Subrouter().HandleFunc("/2", methodHandler("POST"))
+ router.PathPrefix("/1").Methods("DELETE").Subrouter().HandleFunc("/2", methodHandler("DELETE"))
+ router.PathPrefix("/1").Methods("PUT").Subrouter().HandleFunc("/2", methodHandler("PUT"))
+ router.PathPrefix("/1").Methods("POST").Subrouter().HandleFunc("/2", methodHandler("POST2"))
+
+ tests := []methodsSubrouterTest{
+ {
+ title: "match first POST handler",
+ router: router,
+ method: "POST",
+ path: "http://localhost/1/2",
+ wantCode: http.StatusOK,
+ },
+ {
+ title: "match DELETE handler",
+ router: router,
+ method: "DELETE",
+ path: "http://localhost/1/2",
+ wantCode: http.StatusOK,
+ },
+ {
+ title: "match PUT handler",
+ router: router,
+ method: "PUT",
+ path: "http://localhost/1/2",
+ wantCode: http.StatusOK,
+ },
+ {
+ title: "disallow PATCH method",
+ router: router,
+ method: "PATCH",
+ path: "http://localhost/1/2",
+ wantCode: http.StatusMethodNotAllowed,
+ },
+ }
+
+ for _, test := range tests {
+ testMethodsSubrouter(t, test)
+ }
+}
+
+// TestMethodsSubrouterSubrouter matches handlers on subrouters produced
+// from method matchers registered on a root subrouter.
+func TestMethodsSubrouterSubrouter(t *testing.T) {
+ t.Parallel()
+
+ router := NewRouter()
+ sub := router.PathPrefix("/1").Subrouter()
+ sub.Methods("POST").Subrouter().HandleFunc("/2", methodHandler("POST"))
+ sub.Methods("GET").Subrouter().HandleFunc("/2", methodHandler("GET"))
+ sub.Methods("PATCH").Subrouter().HandleFunc("/2", methodHandler("PATCH"))
+ sub.HandleFunc("/2", methodHandler("PUT")).Subrouter().Methods("PUT")
+ sub.HandleFunc("/2", methodHandler("POST2")).Subrouter().Methods("POST")
+
+ tests := []methodsSubrouterTest{
+ {
+ title: "match first POST handler",
+ router: router,
+ method: "POST",
+ path: "http://localhost/1/2",
+ wantCode: http.StatusOK,
+ },
+ {
+ title: "match GET handler",
+ router: router,
+ method: "GET",
+ path: "http://localhost/1/2",
+ wantCode: http.StatusOK,
+ },
+ {
+ title: "match PATCH handler",
+ router: router,
+ method: "PATCH",
+ path: "http://localhost/1/2",
+ wantCode: http.StatusOK,
+ },
+ {
+ title: "match PUT handler",
+ router: router,
+ method: "PUT",
+ path: "http://localhost/1/2",
+ wantCode: http.StatusOK,
+ },
+ {
+ title: "disallow DELETE method",
+ router: router,
+ method: "DELETE",
+ path: "http://localhost/1/2",
+ wantCode: http.StatusMethodNotAllowed,
+ },
+ }
+
+ for _, test := range tests {
+ testMethodsSubrouter(t, test)
+ }
+}
+
+// TestMethodsSubrouterPathVariable matches handlers on matching paths
+// with path variables in them.
+func TestMethodsSubrouterPathVariable(t *testing.T) {
+ t.Parallel()
+
+ router := NewRouter()
+ router.Methods("GET").Subrouter().HandleFunc("/foo", methodHandler("GET"))
+ router.Methods("POST").Subrouter().HandleFunc("/{any}", methodHandler("POST"))
+ router.Methods("DELETE").Subrouter().HandleFunc("/1/{any}", methodHandler("DELETE"))
+ router.Methods("PUT").Subrouter().HandleFunc("/1/{any}", methodHandler("PUT"))
+
+ tests := []methodsSubrouterTest{
+ {
+ title: "match GET handler",
+ router: router,
+ method: "GET",
+ path: "http://localhost/foo",
+ wantCode: http.StatusOK,
+ },
+ {
+ title: "match POST handler",
+ router: router,
+ method: "POST",
+ path: "http://localhost/foo",
+ wantCode: http.StatusOK,
+ },
+ {
+ title: "match DELETE handler",
+ router: router,
+ method: "DELETE",
+ path: "http://localhost/1/foo",
+ wantCode: http.StatusOK,
+ },
+ {
+ title: "match PUT handler",
+ router: router,
+ method: "PUT",
+ path: "http://localhost/1/foo",
+ wantCode: http.StatusOK,
+ },
+ {
+ title: "disallow PATCH method",
+ router: router,
+ method: "PATCH",
+ path: "http://localhost/1/foo",
+ wantCode: http.StatusMethodNotAllowed,
+ },
+ }
+
+ for _, test := range tests {
+ testMethodsSubrouter(t, test)
+ }
+}
+
+// testMethodsSubrouter runs an individual methodsSubrouterTest.
+func testMethodsSubrouter(t *testing.T, test methodsSubrouterTest) {
+ // Execute request
+ req, _ := http.NewRequest(test.method, test.path, nil)
+ resp := NewRecorder()
+ test.router.ServeHTTP(resp, req)
+
+ switch test.wantCode {
+ case http.StatusMethodNotAllowed:
+ if resp.Code != http.StatusMethodNotAllowed {
+ t.Errorf(`(%s) Expected "405 Method Not Allowed", but got %d code`, test.title, resp.Code)
+ } else if matchedMethod := resp.Body.String(); matchedMethod != "" {
+ t.Errorf(`(%s) Expected "405 Method Not Allowed", but %q handler was called`, test.title, matchedMethod)
+ }
+
+ case http.StatusMovedPermanently:
+ if gotLocation := resp.HeaderMap.Get("Location"); gotLocation != test.redirectTo {
+ t.Errorf("(%s) Expected %q route-match to redirect to %q, but got %q", test.title, test.method, test.redirectTo, gotLocation)
+ }
+
+ case http.StatusOK:
+ if matchedMethod := resp.Body.String(); matchedMethod != test.method {
+ t.Errorf("(%s) Expected %q handler to be called, but %q handler was called", test.title, test.method, matchedMethod)
+ }
+
+ default:
+ expectedCodes := []int{http.StatusMethodNotAllowed, http.StatusMovedPermanently, http.StatusOK}
+ t.Errorf("(%s) Expected wantCode to be one of: %v, but got %d", test.title, expectedCodes, test.wantCode)
+ }
+}
+
// mapToPairs converts a string map to a slice of string pairs
func mapToPairs(m map[string]string) []string {
var i int
diff --git a/vendor/github.com/gorilla/mux/old_test.go b/vendor/github.com/gorilla/mux/old_test.go
index 3751e4727..b228983c4 100644
--- a/vendor/github.com/gorilla/mux/old_test.go
+++ b/vendor/github.com/gorilla/mux/old_test.go
@@ -681,7 +681,7 @@ func TestNewRegexp(t *testing.T) {
}
for pattern, paths := range tests {
- p, _ = newRouteRegexp(pattern, false, false, false, false, false)
+ p, _ = newRouteRegexp(pattern, regexpTypePath, routeRegexpOptions{})
for path, result := range paths {
matches = p.regexp.FindStringSubmatch(path)
if result == nil {
diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go
index e83213b7d..2b57e5627 100644
--- a/vendor/github.com/gorilla/mux/regexp.go
+++ b/vendor/github.com/gorilla/mux/regexp.go
@@ -14,6 +14,20 @@ import (
"strings"
)
+type routeRegexpOptions struct {
+ strictSlash bool
+ useEncodedPath bool
+}
+
+type regexpType int
+
+const (
+ regexpTypePath regexpType = 0
+ regexpTypeHost regexpType = 1
+ regexpTypePrefix regexpType = 2
+ regexpTypeQuery regexpType = 3
+)
+
// newRouteRegexp parses a route template and returns a routeRegexp,
// used to match a host, a path or a query string.
//
@@ -24,7 +38,7 @@ import (
// Previously we accepted only Python-like identifiers for variable
// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
// name and pattern can't be empty, and names can't contain a colon.
-func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash, useEncodedPath bool) (*routeRegexp, error) {
+func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) {
// Check if it is well-formed.
idxs, errBraces := braceIndices(tpl)
if errBraces != nil {
@@ -34,19 +48,18 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash,
template := tpl
// Now let's parse it.
defaultPattern := "[^/]+"
- if matchQuery {
+ if typ == regexpTypeQuery {
defaultPattern = ".*"
- } else if matchHost {
+ } else if typ == regexpTypeHost {
defaultPattern = "[^.]+"
- matchPrefix = false
}
// Only match strict slash if not matching
- if matchPrefix || matchHost || matchQuery {
- strictSlash = false
+ if typ != regexpTypePath {
+ options.strictSlash = false
}
// Set a flag for strictSlash.
endSlash := false
- if strictSlash && strings.HasSuffix(tpl, "/") {
+ if options.strictSlash && strings.HasSuffix(tpl, "/") {
tpl = tpl[:len(tpl)-1]
endSlash = true
}
@@ -88,16 +101,16 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash,
// Add the remaining.
raw := tpl[end:]
pattern.WriteString(regexp.QuoteMeta(raw))
- if strictSlash {
+ if options.strictSlash {
pattern.WriteString("[/]?")
}
- if matchQuery {
+ if typ == regexpTypeQuery {
// Add the default pattern if the query value is empty
if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" {
pattern.WriteString(defaultPattern)
}
}
- if !matchPrefix {
+ if typ != regexpTypePrefix {
pattern.WriteByte('$')
}
reverse.WriteString(raw)
@@ -118,15 +131,13 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash,
// Done!
return &routeRegexp{
- template: template,
- matchHost: matchHost,
- matchQuery: matchQuery,
- strictSlash: strictSlash,
- useEncodedPath: useEncodedPath,
- regexp: reg,
- reverse: reverse.String(),
- varsN: varsN,
- varsR: varsR,
+ template: template,
+ regexpType: typ,
+ options: options,
+ regexp: reg,
+ reverse: reverse.String(),
+ varsN: varsN,
+ varsR: varsR,
}, nil
}
@@ -135,15 +146,10 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash,
type routeRegexp struct {
// The unmodified template.
template string
- // True for host match, false for path or query string match.
- matchHost bool
- // True for query string match, false for path and host match.
- matchQuery bool
- // The strictSlash value defined on the route, but disabled if PathPrefix was used.
- strictSlash bool
- // Determines whether to use encoded req.URL.EnscapedPath() or unencoded
- // req.URL.Path for path matching
- useEncodedPath bool
+ // The type of match
+ regexpType regexpType
+ // Options for matching
+ options routeRegexpOptions
// Expanded regexp.
regexp *regexp.Regexp
// Reverse template.
@@ -156,12 +162,12 @@ type routeRegexp struct {
// Match matches the regexp against the URL host or path.
func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
- if !r.matchHost {
- if r.matchQuery {
+ if r.regexpType != regexpTypeHost {
+ if r.regexpType == regexpTypeQuery {
return r.matchQueryString(req)
}
path := req.URL.Path
- if r.useEncodedPath {
+ if r.options.useEncodedPath {
path = req.URL.EscapedPath()
}
return r.regexp.MatchString(path)
@@ -178,7 +184,7 @@ func (r *routeRegexp) url(values map[string]string) (string, error) {
if !ok {
return "", fmt.Errorf("mux: missing route variable %q", v)
}
- if r.matchQuery {
+ if r.regexpType == regexpTypeQuery {
value = url.QueryEscape(value)
}
urlValues[k] = value
@@ -203,7 +209,7 @@ func (r *routeRegexp) url(values map[string]string) (string, error) {
// For a URL with foo=bar&baz=ding, we return only the relevant key
// value pair for the routeRegexp.
func (r *routeRegexp) getURLQuery(req *http.Request) string {
- if !r.matchQuery {
+ if r.regexpType != regexpTypeQuery {
return ""
}
templateKey := strings.SplitN(r.template, "=", 2)[0]
@@ -280,7 +286,7 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route)
if len(matches) > 0 {
extractVars(path, matches, v.path.varsN, m.Vars)
// Check if we should redirect.
- if v.path.strictSlash {
+ if v.path.options.strictSlash {
p1 := strings.HasSuffix(path, "/")
p2 := strings.HasSuffix(v.path.template, "/")
if p1 != p2 {
diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go
index 69aeae791..4ce098d4f 100644
--- a/vendor/github.com/gorilla/mux/route.go
+++ b/vendor/github.com/gorilla/mux/route.go
@@ -75,6 +75,8 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
if match.MatchErr == ErrMethodMismatch {
// We found a route which matches request method, clear MatchErr
match.MatchErr = nil
+ // Then override the mis-matched handler
+ match.Handler = r.handler
}
// Yay, we have a match. Let's collect some info about it.
@@ -169,12 +171,12 @@ func (r *Route) addMatcher(m matcher) *Route {
}
// addRegexpMatcher adds a host or path matcher and builder to a route.
-func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error {
+func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error {
if r.err != nil {
return r.err
}
r.regexp = r.getRegexpGroup()
- if !matchHost && !matchQuery {
+ if typ == regexpTypePath || typ == regexpTypePrefix {
if len(tpl) > 0 && tpl[0] != '/' {
return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
}
@@ -182,7 +184,10 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery
tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
}
}
- rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash, r.useEncodedPath)
+ rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{
+ strictSlash: r.strictSlash,
+ useEncodedPath: r.useEncodedPath,
+ })
if err != nil {
return err
}
@@ -191,7 +196,7 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery
return err
}
}
- if matchHost {
+ if typ == regexpTypeHost {
if r.regexp.path != nil {
if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
return err
@@ -204,7 +209,7 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery
return err
}
}
- if matchQuery {
+ if typ == regexpTypeQuery {
r.regexp.queries = append(r.regexp.queries, rr)
} else {
r.regexp.path = rr
@@ -256,7 +261,8 @@ func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
// "X-Requested-With", "XMLHttpRequest")
//
// The above route will only match if both the request header matches both regular expressions.
-// It the value is an empty string, it will match any value if the key is set.
+// If the value is an empty string, it will match any value if the key is set.
+// Use the start and end of string anchors (^ and $) to match an exact value.
func (r *Route) HeadersRegexp(pairs ...string) *Route {
if r.err == nil {
var headers map[string]*regexp.Regexp
@@ -286,7 +292,7 @@ func (r *Route) HeadersRegexp(pairs ...string) *Route {
// Variable names must be unique in a given route. They can be retrieved
// calling mux.Vars(request).
func (r *Route) Host(tpl string) *Route {
- r.err = r.addRegexpMatcher(tpl, true, false, false)
+ r.err = r.addRegexpMatcher(tpl, regexpTypeHost)
return r
}
@@ -346,7 +352,7 @@ func (r *Route) Methods(methods ...string) *Route {
// Variable names must be unique in a given route. They can be retrieved
// calling mux.Vars(request).
func (r *Route) Path(tpl string) *Route {
- r.err = r.addRegexpMatcher(tpl, false, false, false)
+ r.err = r.addRegexpMatcher(tpl, regexpTypePath)
return r
}
@@ -362,7 +368,7 @@ func (r *Route) Path(tpl string) *Route {
// Also note that the setting of Router.StrictSlash() has no effect on routes
// with a PathPrefix matcher.
func (r *Route) PathPrefix(tpl string) *Route {
- r.err = r.addRegexpMatcher(tpl, false, true, false)
+ r.err = r.addRegexpMatcher(tpl, regexpTypePrefix)
return r
}
@@ -393,7 +399,7 @@ func (r *Route) Queries(pairs ...string) *Route {
return nil
}
for i := 0; i < length; i += 2 {
- if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil {
+ if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil {
return r
}
}
diff --git a/vendor/github.com/gorilla/mux/test_helpers.go b/vendor/github.com/gorilla/mux/test_helpers.go
new file mode 100644
index 000000000..8b2c4a4c5
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/test_helpers.go
@@ -0,0 +1,18 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import "net/http"
+
+// SetURLVars sets the URL variables for the given request, to be accessed via
+// mux.Vars for testing route behaviour.
+//
+// This API should only be used for testing purposes; it provides a way to
+// inject variables into the request context. Alternatively, URL variables
+// can be set by making a route that captures the required variables,
+// starting a server and sending the request to that server.
+func SetURLVars(r *http.Request, val map[string]string) *http.Request {
+ return setVars(r, val)
+}
diff --git a/vendor/github.com/gorilla/websocket/.travis.yml b/vendor/github.com/gorilla/websocket/.travis.yml
index 3d8d29cf3..9f233f983 100644
--- a/vendor/github.com/gorilla/websocket/.travis.yml
+++ b/vendor/github.com/gorilla/websocket/.travis.yml
@@ -8,6 +8,7 @@ matrix:
- go: 1.6
- go: 1.7
- go: 1.8
+ - go: 1.9
- go: tip
allow_failures:
- go: tip
diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go
index 43a87c753..934e28e96 100644
--- a/vendor/github.com/gorilla/websocket/client.go
+++ b/vendor/github.com/gorilla/websocket/client.go
@@ -5,10 +5,8 @@
package websocket
import (
- "bufio"
"bytes"
"crypto/tls"
- "encoding/base64"
"errors"
"io"
"io/ioutil"
@@ -88,50 +86,6 @@ type Dialer struct {
var errMalformedURL = errors.New("malformed ws or wss URL")
-// parseURL parses the URL.
-//
-// This function is a replacement for the standard library url.Parse function.
-// In Go 1.4 and earlier, url.Parse loses information from the path.
-func parseURL(s string) (*url.URL, error) {
- // From the RFC:
- //
- // ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ]
- // wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ]
- var u url.URL
- switch {
- case strings.HasPrefix(s, "ws://"):
- u.Scheme = "ws"
- s = s[len("ws://"):]
- case strings.HasPrefix(s, "wss://"):
- u.Scheme = "wss"
- s = s[len("wss://"):]
- default:
- return nil, errMalformedURL
- }
-
- if i := strings.Index(s, "?"); i >= 0 {
- u.RawQuery = s[i+1:]
- s = s[:i]
- }
-
- if i := strings.Index(s, "/"); i >= 0 {
- u.Opaque = s[i:]
- s = s[:i]
- } else {
- u.Opaque = "/"
- }
-
- u.Host = s
-
- if strings.Contains(u.Host, "@") {
- // Don't bother parsing user information because user information is
- // not allowed in websocket URIs.
- return nil, errMalformedURL
- }
-
- return &u, nil
-}
-
func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
hostPort = u.Host
hostNoPort = u.Host
@@ -150,7 +104,7 @@ func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
return hostPort, hostNoPort
}
-// DefaultDialer is a dialer with all fields set to the default zero values.
+// DefaultDialer is a dialer with all fields set to the default values.
var DefaultDialer = &Dialer{
Proxy: http.ProxyFromEnvironment,
}
@@ -177,7 +131,7 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re
return nil, nil, err
}
- u, err := parseURL(urlStr)
+ u, err := url.Parse(urlStr)
if err != nil {
return nil, nil, err
}
@@ -246,36 +200,52 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re
req.Header.Set("Sec-Websocket-Extensions", "permessage-deflate; server_no_context_takeover; client_no_context_takeover")
}
- hostPort, hostNoPort := hostPortNoPort(u)
-
- var proxyURL *url.URL
- // Check wether the proxy method has been configured
- if d.Proxy != nil {
- proxyURL, err = d.Proxy(req)
- }
- if err != nil {
- return nil, nil, err
- }
-
- var targetHostPort string
- if proxyURL != nil {
- targetHostPort, _ = hostPortNoPort(proxyURL)
- } else {
- targetHostPort = hostPort
- }
-
var deadline time.Time
if d.HandshakeTimeout != 0 {
deadline = time.Now().Add(d.HandshakeTimeout)
}
+ // Get network dial function.
netDial := d.NetDial
if netDial == nil {
netDialer := &net.Dialer{Deadline: deadline}
netDial = netDialer.Dial
}
- netConn, err := netDial("tcp", targetHostPort)
+ // If needed, wrap the dial function to set the connection deadline.
+ if !deadline.Equal(time.Time{}) {
+ forwardDial := netDial
+ netDial = func(network, addr string) (net.Conn, error) {
+ c, err := forwardDial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+ err = c.SetDeadline(deadline)
+ if err != nil {
+ c.Close()
+ return nil, err
+ }
+ return c, nil
+ }
+ }
+
+ // If needed, wrap the dial function to connect through a proxy.
+ if d.Proxy != nil {
+ proxyURL, err := d.Proxy(req)
+ if err != nil {
+ return nil, nil, err
+ }
+ if proxyURL != nil {
+ dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
+ if err != nil {
+ return nil, nil, err
+ }
+ netDial = dialer.Dial
+ }
+ }
+
+ hostPort, hostNoPort := hostPortNoPort(u)
+ netConn, err := netDial("tcp", hostPort)
if err != nil {
return nil, nil, err
}
@@ -286,42 +256,6 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re
}
}()
- if err := netConn.SetDeadline(deadline); err != nil {
- return nil, nil, err
- }
-
- if proxyURL != nil {
- connectHeader := make(http.Header)
- if user := proxyURL.User; user != nil {
- proxyUser := user.Username()
- if proxyPassword, passwordSet := user.Password(); passwordSet {
- credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
- connectHeader.Set("Proxy-Authorization", "Basic "+credential)
- }
- }
- connectReq := &http.Request{
- Method: "CONNECT",
- URL: &url.URL{Opaque: hostPort},
- Host: hostPort,
- Header: connectHeader,
- }
-
- connectReq.Write(netConn)
-
- // Read response.
- // Okay to use and discard buffered reader here, because
- // TLS server will not speak until spoken to.
- br := bufio.NewReader(netConn)
- resp, err := http.ReadResponse(br, connectReq)
- if err != nil {
- return nil, nil, err
- }
- if resp.StatusCode != 200 {
- f := strings.SplitN(resp.Status, " ", 2)
- return nil, nil, errors.New(f[1])
- }
- }
-
if u.Scheme == "https" {
cfg := cloneTLSConfig(d.TLSClientConfig)
if cfg.ServerName == "" {
diff --git a/vendor/github.com/gorilla/websocket/client_server_test.go b/vendor/github.com/gorilla/websocket/client_server_test.go
index 7d39da681..50063b7e0 100644
--- a/vendor/github.com/gorilla/websocket/client_server_test.go
+++ b/vendor/github.com/gorilla/websocket/client_server_test.go
@@ -5,11 +5,14 @@
package websocket
import (
+ "bytes"
"crypto/tls"
"crypto/x509"
"encoding/base64"
+ "encoding/binary"
"io"
"io/ioutil"
+ "net"
"net/http"
"net/http/cookiejar"
"net/http/httptest"
@@ -31,9 +34,10 @@ var cstUpgrader = Upgrader{
}
var cstDialer = Dialer{
- Subprotocols: []string{"p1", "p2"},
- ReadBufferSize: 1024,
- WriteBufferSize: 1024,
+ Subprotocols: []string{"p1", "p2"},
+ ReadBufferSize: 1024,
+ WriteBufferSize: 1024,
+ HandshakeTimeout: 30 * time.Second,
}
type cstHandler struct{ *testing.T }
@@ -143,8 +147,9 @@ func TestProxyDial(t *testing.T) {
s := newServer(t)
defer s.Close()
- surl, _ := url.Parse(s.URL)
+ surl, _ := url.Parse(s.Server.URL)
+ cstDialer := cstDialer // make local copy for modification on next line.
cstDialer.Proxy = http.ProxyURL(surl)
connect := false
@@ -160,8 +165,8 @@ func TestProxyDial(t *testing.T) {
}
if !connect {
- t.Log("connect not recieved")
- http.Error(w, "connect not recieved", 405)
+ t.Log("connect not received")
+ http.Error(w, "connect not received", 405)
return
}
origHandler.ServeHTTP(w, r)
@@ -173,16 +178,16 @@ func TestProxyDial(t *testing.T) {
}
defer ws.Close()
sendRecv(t, ws)
-
- cstDialer.Proxy = http.ProxyFromEnvironment
}
func TestProxyAuthorizationDial(t *testing.T) {
s := newServer(t)
defer s.Close()
- surl, _ := url.Parse(s.URL)
+ surl, _ := url.Parse(s.Server.URL)
surl.User = url.UserPassword("username", "password")
+
+ cstDialer := cstDialer // make local copy for modification on next line.
cstDialer.Proxy = http.ProxyURL(surl)
connect := false
@@ -200,8 +205,8 @@ func TestProxyAuthorizationDial(t *testing.T) {
}
if !connect {
- t.Log("connect with proxy authorization not recieved")
- http.Error(w, "connect with proxy authorization not recieved", 405)
+ t.Log("connect with proxy authorization not received")
+ http.Error(w, "connect with proxy authorization not received", 405)
return
}
origHandler.ServeHTTP(w, r)
@@ -213,8 +218,6 @@ func TestProxyAuthorizationDial(t *testing.T) {
}
defer ws.Close()
sendRecv(t, ws)
-
- cstDialer.Proxy = http.ProxyFromEnvironment
}
func TestDial(t *testing.T) {
@@ -237,7 +240,7 @@ func TestDialCookieJar(t *testing.T) {
d := cstDialer
d.Jar = jar
- u, _ := parseURL(s.URL)
+ u, _ := url.Parse(s.URL)
switch u.Scheme {
case "ws":
@@ -246,7 +249,7 @@ func TestDialCookieJar(t *testing.T) {
u.Scheme = "https"
}
- cookies := []*http.Cookie{&http.Cookie{Name: "gorilla", Value: "ws", Path: "/"}}
+ cookies := []*http.Cookie{{Name: "gorilla", Value: "ws", Path: "/"}}
d.Jar.SetCookies(u, cookies)
ws, _, err := d.Dial(s.URL, nil)
@@ -398,9 +401,17 @@ func TestBadMethod(t *testing.T) {
}))
defer s.Close()
- resp, err := http.PostForm(s.URL, url.Values{})
+ req, err := http.NewRequest("POST", s.URL, strings.NewReader(""))
+ if err != nil {
+ t.Fatalf("NewRequest returned error %v", err)
+ }
+ req.Header.Set("Connection", "upgrade")
+ req.Header.Set("Upgrade", "websocket")
+ req.Header.Set("Sec-Websocket-Version", "13")
+
+ resp, err := http.DefaultClient.Do(req)
if err != nil {
- t.Fatalf("PostForm returned error %v", err)
+ t.Fatalf("Do returned error %v", err)
}
resp.Body.Close()
if resp.StatusCode != http.StatusMethodNotAllowed {
@@ -510,3 +521,82 @@ func TestDialCompression(t *testing.T) {
defer ws.Close()
sendRecv(t, ws)
}
+
+func TestSocksProxyDial(t *testing.T) {
+ s := newServer(t)
+ defer s.Close()
+
+ proxyListener, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("listen failed: %v", err)
+ }
+ defer proxyListener.Close()
+ go func() {
+ c1, err := proxyListener.Accept()
+ if err != nil {
+ t.Errorf("proxy accept failed: %v", err)
+ return
+ }
+ defer c1.Close()
+
+ c1.SetDeadline(time.Now().Add(30 * time.Second))
+
+ buf := make([]byte, 32)
+ if _, err := io.ReadFull(c1, buf[:3]); err != nil {
+ t.Errorf("read failed: %v", err)
+ return
+ }
+ if want := []byte{5, 1, 0}; !bytes.Equal(want, buf[:len(want)]) {
+ t.Errorf("read %x, want %x", buf[:len(want)], want)
+ }
+ if _, err := c1.Write([]byte{5, 0}); err != nil {
+ t.Errorf("write failed: %v", err)
+ return
+ }
+ if _, err := io.ReadFull(c1, buf[:10]); err != nil {
+ t.Errorf("read failed: %v", err)
+ return
+ }
+ if want := []byte{5, 1, 0, 1}; !bytes.Equal(want, buf[:len(want)]) {
+ t.Errorf("read %x, want %x", buf[:len(want)], want)
+ return
+ }
+ buf[1] = 0
+ if _, err := c1.Write(buf[:10]); err != nil {
+ t.Errorf("write failed: %v", err)
+ return
+ }
+
+ ip := net.IP(buf[4:8])
+ port := binary.BigEndian.Uint16(buf[8:10])
+
+ c2, err := net.DialTCP("tcp", nil, &net.TCPAddr{IP: ip, Port: int(port)})
+ if err != nil {
+ t.Errorf("dial failed; %v", err)
+ return
+ }
+ defer c2.Close()
+ done := make(chan struct{})
+ go func() {
+ io.Copy(c1, c2)
+ close(done)
+ }()
+ io.Copy(c2, c1)
+ <-done
+ }()
+
+ purl, err := url.Parse("socks5://" + proxyListener.Addr().String())
+ if err != nil {
+ t.Fatalf("parse failed: %v", err)
+ }
+
+ cstDialer := cstDialer // make local copy for modification on next line.
+ cstDialer.Proxy = http.ProxyURL(purl)
+
+ ws, _, err := cstDialer.Dial(s.URL, nil)
+ if err != nil {
+ t.Fatalf("Dial: %v", err)
+ }
+ defer ws.Close()
+ sendRecv(t, ws)
+}
diff --git a/vendor/github.com/gorilla/websocket/client_test.go b/vendor/github.com/gorilla/websocket/client_test.go
index 7d2b0844f..5aa27b37d 100644
--- a/vendor/github.com/gorilla/websocket/client_test.go
+++ b/vendor/github.com/gorilla/websocket/client_test.go
@@ -6,49 +6,9 @@ package websocket
import (
"net/url"
- "reflect"
"testing"
)
-var parseURLTests = []struct {
- s string
- u *url.URL
- rui string
-}{
- {"ws://example.com/", &url.URL{Scheme: "ws", Host: "example.com", Opaque: "/"}, "/"},
- {"ws://example.com", &url.URL{Scheme: "ws", Host: "example.com", Opaque: "/"}, "/"},
- {"ws://example.com:7777/", &url.URL{Scheme: "ws", Host: "example.com:7777", Opaque: "/"}, "/"},
- {"wss://example.com/", &url.URL{Scheme: "wss", Host: "example.com", Opaque: "/"}, "/"},
- {"wss://example.com/a/b", &url.URL{Scheme: "wss", Host: "example.com", Opaque: "/a/b"}, "/a/b"},
- {"ss://example.com/a/b", nil, ""},
- {"ws://webmaster@example.com/", nil, ""},
- {"wss://example.com/a/b?x=y", &url.URL{Scheme: "wss", Host: "example.com", Opaque: "/a/b", RawQuery: "x=y"}, "/a/b?x=y"},
- {"wss://example.com?x=y", &url.URL{Scheme: "wss", Host: "example.com", Opaque: "/", RawQuery: "x=y"}, "/?x=y"},
-}
-
-func TestParseURL(t *testing.T) {
- for _, tt := range parseURLTests {
- u, err := parseURL(tt.s)
- if tt.u != nil && err != nil {
- t.Errorf("parseURL(%q) returned error %v", tt.s, err)
- continue
- }
- if tt.u == nil {
- if err == nil {
- t.Errorf("parseURL(%q) did not return error", tt.s)
- }
- continue
- }
- if !reflect.DeepEqual(u, tt.u) {
- t.Errorf("parseURL(%q) = %v, want %v", tt.s, u, tt.u)
- continue
- }
- if u.RequestURI() != tt.rui {
- t.Errorf("parseURL(%q).RequestURI() = %v, want %v", tt.s, u.RequestURI(), tt.rui)
- }
- }
-}
-
var hostPortNoPortTests = []struct {
u *url.URL
hostPort, hostNoPort string
diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go
index 97e1dbacb..cd3569d53 100644
--- a/vendor/github.com/gorilla/websocket/conn.go
+++ b/vendor/github.com/gorilla/websocket/conn.go
@@ -76,7 +76,7 @@ const (
// is UTF-8 encoded text.
PingMessage = 9
- // PongMessage denotes a ping control message. The optional message payload
+ // PongMessage denotes a pong control message. The optional message payload
// is UTF-8 encoded text.
PongMessage = 10
)
@@ -100,9 +100,8 @@ func (e *netError) Error() string { return e.msg }
func (e *netError) Temporary() bool { return e.temporary }
func (e *netError) Timeout() bool { return e.timeout }
-// CloseError represents close frame.
+// CloseError represents a close message.
type CloseError struct {
-
// Code is defined in RFC 6455, section 11.7.
Code int
@@ -343,7 +342,8 @@ func (c *Conn) Subprotocol() string {
return c.subprotocol
}
-// Close closes the underlying network connection without sending or waiting for a close frame.
+// Close closes the underlying network connection without sending or waiting
+// for a close message.
func (c *Conn) Close() error {
return c.conn.Close()
}
@@ -484,6 +484,9 @@ func (c *Conn) prepWrite(messageType int) error {
//
// There can be at most one open writer on a connection. NextWriter closes the
// previous writer if the application has not already done so.
+//
+// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and
+// PongMessage) are supported.
func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) {
if err := c.prepWrite(messageType); err != nil {
return nil, err
@@ -764,7 +767,6 @@ func (c *Conn) SetWriteDeadline(t time.Time) error {
// Read methods
func (c *Conn) advanceFrame() (int, error) {
-
// 1. Skip remainder of previous frame.
if c.readRemaining > 0 {
@@ -1033,7 +1035,7 @@ func (c *Conn) SetReadDeadline(t time.Time) error {
}
// SetReadLimit sets the maximum size for a message read from the peer. If a
-// message exceeds the limit, the connection sends a close frame to the peer
+// message exceeds the limit, the connection sends a close message to the peer
// and returns ErrReadLimit to the application.
func (c *Conn) SetReadLimit(limit int64) {
c.readLimit = limit
@@ -1046,24 +1048,21 @@ func (c *Conn) CloseHandler() func(code int, text string) error {
// SetCloseHandler sets the handler for close messages received from the peer.
// The code argument to h is the received close code or CloseNoStatusReceived
-// if the close message is empty. The default close handler sends a close frame
-// back to the peer.
+// if the close message is empty. The default close handler sends a close
+// message back to the peer.
//
// The application must read the connection to process close messages as
-// described in the section on Control Frames above.
+// described in the section on Control Messages above.
//
-// The connection read methods return a CloseError when a close frame is
+// The connection read methods return a CloseError when a close message is
// received. Most applications should handle close messages as part of their
// normal error handling. Applications should only set a close handler when the
-// application must perform some action before sending a close frame back to
+// application must perform some action before sending a close message back to
// the peer.
func (c *Conn) SetCloseHandler(h func(code int, text string) error) {
if h == nil {
h = func(code int, text string) error {
- message := []byte{}
- if code != CloseNoStatusReceived {
- message = FormatCloseMessage(code, "")
- }
+ message := FormatCloseMessage(code, "")
c.WriteControl(CloseMessage, message, time.Now().Add(writeWait))
return nil
}
@@ -1077,11 +1076,11 @@ func (c *Conn) PingHandler() func(appData string) error {
}
// SetPingHandler sets the handler for ping messages received from the peer.
-// The appData argument to h is the PING frame application data. The default
+// The appData argument to h is the PING message application data. The default
// ping handler sends a pong to the peer.
//
// The application must read the connection to process ping messages as
-// described in the section on Control Frames above.
+// described in the section on Control Messages above.
func (c *Conn) SetPingHandler(h func(appData string) error) {
if h == nil {
h = func(message string) error {
@@ -1103,11 +1102,11 @@ func (c *Conn) PongHandler() func(appData string) error {
}
// SetPongHandler sets the handler for pong messages received from the peer.
-// The appData argument to h is the PONG frame application data. The default
+// The appData argument to h is the PONG message application data. The default
// pong handler does nothing.
//
// The application must read the connection to process ping messages as
-// described in the section on Control Frames above.
+// described in the section on Control Messages above.
func (c *Conn) SetPongHandler(h func(appData string) error) {
if h == nil {
h = func(string) error { return nil }
@@ -1141,7 +1140,14 @@ func (c *Conn) SetCompressionLevel(level int) error {
}
// FormatCloseMessage formats closeCode and text as a WebSocket close message.
+// An empty message is returned for code CloseNoStatusReceived.
func FormatCloseMessage(closeCode int, text string) []byte {
+ if closeCode == CloseNoStatusReceived {
+ // Return empty message because it's illegal to send
+ // CloseNoStatusReceived. Return non-nil value in case application
+ // checks for nil.
+ return []byte{}
+ }
buf := make([]byte, 2+len(text))
binary.BigEndian.PutUint16(buf, uint16(closeCode))
copy(buf[2:], text)
diff --git a/vendor/github.com/gorilla/websocket/conn_test.go b/vendor/github.com/gorilla/websocket/conn_test.go
index 06e9bc3f5..5fda7b5ca 100644
--- a/vendor/github.com/gorilla/websocket/conn_test.go
+++ b/vendor/github.com/gorilla/websocket/conn_test.go
@@ -341,7 +341,6 @@ func TestUnderlyingConn(t *testing.T) {
}
func TestBufioReadBytes(t *testing.T) {
-
// Test calling bufio.ReadBytes for value longer than read buffer size.
m := make([]byte, 512)
@@ -366,7 +365,7 @@ func TestBufioReadBytes(t *testing.T) {
t.Fatalf("ReadBytes() returned %v", err)
}
if len(p) != len(m) {
- t.Fatalf("read returnd %d bytes, want %d bytes", len(p), len(m))
+ t.Fatalf("read returned %d bytes, want %d bytes", len(p), len(m))
}
}
diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go
index e291a952c..dcce1a63c 100644
--- a/vendor/github.com/gorilla/websocket/doc.go
+++ b/vendor/github.com/gorilla/websocket/doc.go
@@ -6,9 +6,8 @@
//
// Overview
//
-// The Conn type represents a WebSocket connection. A server application uses
-// the Upgrade function from an Upgrader object with a HTTP request handler
-// to get a pointer to a Conn:
+// The Conn type represents a WebSocket connection. A server application calls
+// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:
//
// var upgrader = websocket.Upgrader{
// ReadBufferSize: 1024,
@@ -31,10 +30,12 @@
// for {
// messageType, p, err := conn.ReadMessage()
// if err != nil {
+// log.Println(err)
// return
// }
-// if err = conn.WriteMessage(messageType, p); err != nil {
-// return err
+// if err := conn.WriteMessage(messageType, p); err != nil {
+// log.Println(err)
+// return
// }
// }
//
@@ -85,20 +86,26 @@
// and pong. Call the connection WriteControl, WriteMessage or NextWriter
// methods to send a control message to the peer.
//
-// Connections handle received close messages by sending a close message to the
-// peer and returning a *CloseError from the the NextReader, ReadMessage or the
-// message Read method.
+// Connections handle received close messages by calling the handler function
+// set with the SetCloseHandler method and by returning a *CloseError from the
+// NextReader, ReadMessage or the message Read method. The default close
+// handler sends a close message to the peer.
//
-// Connections handle received ping and pong messages by invoking callback
-// functions set with SetPingHandler and SetPongHandler methods. The callback
-// functions are called from the NextReader, ReadMessage and the message Read
-// methods.
+// Connections handle received ping messages by calling the handler function
+// set with the SetPingHandler method. The default ping handler sends a pong
+// message to the peer.
//
-// The default ping handler sends a pong to the peer. The application's reading
-// goroutine can block for a short time while the handler writes the pong data
-// to the connection.
+// Connections handle received pong messages by calling the handler function
+// set with the SetPongHandler method. The default pong handler does nothing.
+// If an application sends ping messages, then the application should set a
+// pong handler to receive the corresponding pong.
//
-// The application must read the connection to process ping, pong and close
+// The control message handler functions are called from the NextReader,
+// ReadMessage and message reader Read methods. The default close and ping
+// handlers can block these methods for a short time when the handler writes to
+// the connection.
+//
+// The application must read the connection to process close, ping and pong
// messages sent from the peer. If the application is not otherwise interested
// in messages from the peer, then the application should start a goroutine to
// read and discard messages from the peer. A simple example is:
@@ -137,19 +144,12 @@
// method fails the WebSocket handshake with HTTP status 403.
//
// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
-// the handshake if the Origin request header is present and not equal to the
-// Host request header.
-//
-// An application can allow connections from any origin by specifying a
-// function that always returns true:
-//
-// var upgrader = websocket.Upgrader{
-// CheckOrigin: func(r *http.Request) bool { return true },
-// }
+// the handshake if the Origin request header is present and the Origin host is
+// not equal to the Host request header.
//
-// The deprecated Upgrade function does not enforce an origin policy. It's the
-// application's responsibility to check the Origin header before calling
-// Upgrade.
+// The deprecated package-level Upgrade function does not perform origin
+// checking. The application is responsible for checking the Origin header
+// before calling the Upgrade function.
//
// Compression EXPERIMENTAL
//
diff --git a/vendor/github.com/gorilla/websocket/examples/chat/README.md b/vendor/github.com/gorilla/websocket/examples/chat/README.md
index 47c82f908..7baf3e328 100644
--- a/vendor/github.com/gorilla/websocket/examples/chat/README.md
+++ b/vendor/github.com/gorilla/websocket/examples/chat/README.md
@@ -1,6 +1,6 @@
# Chat Example
-This application shows how to use use the
+This application shows how to use the
[websocket](https://github.com/gorilla/websocket) package to implement a simple
web chat application.
diff --git a/vendor/github.com/gorilla/websocket/examples/chat/client.go b/vendor/github.com/gorilla/websocket/examples/chat/client.go
index ecfd9a7aa..9461c1ea0 100644
--- a/vendor/github.com/gorilla/websocket/examples/chat/client.go
+++ b/vendor/github.com/gorilla/websocket/examples/chat/client.go
@@ -64,7 +64,7 @@ func (c *Client) readPump() {
for {
_, message, err := c.conn.ReadMessage()
if err != nil {
- if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway) {
+ if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {
log.Printf("error: %v", err)
}
break
@@ -113,7 +113,7 @@ func (c *Client) writePump() {
}
case <-ticker.C:
c.conn.SetWriteDeadline(time.Now().Add(writeWait))
- if err := c.conn.WriteMessage(websocket.PingMessage, []byte{}); err != nil {
+ if err := c.conn.WriteMessage(websocket.PingMessage, nil); err != nil {
return
}
}
diff --git a/vendor/github.com/gorilla/websocket/examples/echo/server.go b/vendor/github.com/gorilla/websocket/examples/echo/server.go
index a685b0974..ecc680c8b 100644
--- a/vendor/github.com/gorilla/websocket/examples/echo/server.go
+++ b/vendor/github.com/gorilla/websocket/examples/echo/server.go
@@ -55,6 +55,7 @@ func main() {
var homeTemplate = template.Must(template.New("").Parse(`
<!DOCTYPE html>
+<html>
<head>
<meta charset="utf-8">
<script>
diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go
index 4f0e36875..dc2c1f641 100644
--- a/vendor/github.com/gorilla/websocket/json.go
+++ b/vendor/github.com/gorilla/websocket/json.go
@@ -9,12 +9,14 @@ import (
"io"
)
-// WriteJSON is deprecated, use c.WriteJSON instead.
+// WriteJSON writes the JSON encoding of v as a message.
+//
+// Deprecated: Use c.WriteJSON instead.
func WriteJSON(c *Conn, v interface{}) error {
return c.WriteJSON(v)
}
-// WriteJSON writes the JSON encoding of v to the connection.
+// WriteJSON writes the JSON encoding of v as a message.
//
// See the documentation for encoding/json Marshal for details about the
// conversion of Go values to JSON.
@@ -31,7 +33,10 @@ func (c *Conn) WriteJSON(v interface{}) error {
return err2
}
-// ReadJSON is deprecated, use c.ReadJSON instead.
+// ReadJSON reads the next JSON-encoded message from the connection and stores
+// it in the value pointed to by v.
+//
+// Deprecated: Use c.ReadJSON instead.
func ReadJSON(c *Conn, v interface{}) error {
return c.ReadJSON(v)
}
diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go
index 6a88bbc74..577fce9ef 100644
--- a/vendor/github.com/gorilla/websocket/mask.go
+++ b/vendor/github.com/gorilla/websocket/mask.go
@@ -11,7 +11,6 @@ import "unsafe"
const wordSize = int(unsafe.Sizeof(uintptr(0)))
func maskBytes(key [4]byte, pos int, b []byte) int {
-
// Mask one byte at a time for small buffers.
if len(b) < 2*wordSize {
for i := range b {
diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go
new file mode 100644
index 000000000..102538bd3
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/proxy.go
@@ -0,0 +1,77 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "encoding/base64"
+ "errors"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+type netDialerFunc func(netowrk, addr string) (net.Conn, error)
+
+func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) {
+ return fn(network, addr)
+}
+
+func init() {
+ proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) {
+ return &httpProxyDialer{proxyURL: proxyURL, fowardDial: forwardDialer.Dial}, nil
+ })
+}
+
+type httpProxyDialer struct {
+ proxyURL *url.URL
+ fowardDial func(network, addr string) (net.Conn, error)
+}
+
+func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) {
+ hostPort, _ := hostPortNoPort(hpd.proxyURL)
+ conn, err := hpd.fowardDial(network, hostPort)
+ if err != nil {
+ return nil, err
+ }
+
+ connectHeader := make(http.Header)
+ if user := hpd.proxyURL.User; user != nil {
+ proxyUser := user.Username()
+ if proxyPassword, passwordSet := user.Password(); passwordSet {
+ credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
+ connectHeader.Set("Proxy-Authorization", "Basic "+credential)
+ }
+ }
+
+ connectReq := &http.Request{
+ Method: "CONNECT",
+ URL: &url.URL{Opaque: addr},
+ Host: addr,
+ Header: connectHeader,
+ }
+
+ if err := connectReq.Write(conn); err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ // Read response. It's OK to use and discard buffered reader here becaue
+ // the remote server does not speak until spoken to.
+ br := bufio.NewReader(conn)
+ resp, err := http.ReadResponse(br, connectReq)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ if resp.StatusCode != 200 {
+ conn.Close()
+ f := strings.SplitN(resp.Status, " ", 2)
+ return nil, errors.New(f[1])
+ }
+ return conn, nil
+}
diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go
index 3495e0f1a..50b58a893 100644
--- a/vendor/github.com/gorilla/websocket/server.go
+++ b/vendor/github.com/gorilla/websocket/server.go
@@ -44,8 +44,12 @@ type Upgrader struct {
Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
// CheckOrigin returns true if the request Origin header is acceptable. If
- // CheckOrigin is nil, the host in the Origin header must not be set or
- // must match the host of the request.
+ // CheckOrigin is nil, then a safe default is used: return false if the
+ // Origin request header is present and the origin host is not equal to
+ // request Host header.
+ //
+ // A CheckOrigin function should carefully validate the request origin to
+ // prevent cross-site request forgery.
CheckOrigin func(r *http.Request) bool
// EnableCompression specify if the server should attempt to negotiate per
@@ -76,7 +80,7 @@ func checkSameOrigin(r *http.Request) bool {
if err != nil {
return false
}
- return u.Host == r.Host
+ return equalASCIIFold(u.Host, r.Host)
}
func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
@@ -104,32 +108,34 @@ func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header
// If the upgrade fails, then Upgrade replies to the client with an HTTP error
// response.
func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
- if r.Method != "GET" {
- return u.returnError(w, r, http.StatusMethodNotAllowed, "websocket: not a websocket handshake: request method is not GET")
- }
-
- if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
- return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-Websocket-Extensions' headers are unsupported")
- }
+ const badHandshake = "websocket: the client is not using the websocket protocol: "
if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
- return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'upgrade' token not found in 'Connection' header")
+ return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header")
}
if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
- return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'websocket' token not found in 'Upgrade' header")
+ return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header")
+ }
+
+ if r.Method != "GET" {
+ return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET")
}
if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
}
+ if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
+ return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-Websocket-Extensions' headers are unsupported")
+ }
+
checkOrigin := u.CheckOrigin
if checkOrigin == nil {
checkOrigin = checkSameOrigin
}
if !checkOrigin(r) {
- return u.returnError(w, r, http.StatusForbidden, "websocket: 'Origin' header value not allowed")
+ return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin")
}
challengeKey := r.Header.Get("Sec-Websocket-Key")
@@ -230,10 +236,11 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
//
-// This function is deprecated, use websocket.Upgrader instead.
+// Deprecated: Use websocket.Upgrader instead.
//
-// The application is responsible for checking the request origin before
-// calling Upgrade. An example implementation of the same origin policy is:
+// Upgrade does not perform origin checking. The application is responsible for
+// checking the Origin header before calling Upgrade. An example implementation
+// of the same origin policy check is:
//
// if req.Header.Get("Origin") != "http://"+req.Host {
// http.Error(w, "Origin not allowed", 403)
diff --git a/vendor/github.com/gorilla/websocket/server_test.go b/vendor/github.com/gorilla/websocket/server_test.go
index 0a28141d6..c43dbb267 100644
--- a/vendor/github.com/gorilla/websocket/server_test.go
+++ b/vendor/github.com/gorilla/websocket/server_test.go
@@ -49,3 +49,21 @@ func TestIsWebSocketUpgrade(t *testing.T) {
}
}
}
+
+var checkSameOriginTests = []struct {
+ ok bool
+ r *http.Request
+}{
+ {false, &http.Request{Host: "example.org", Header: map[string][]string{"Origin": []string{"https://other.org"}}}},
+ {true, &http.Request{Host: "example.org", Header: map[string][]string{"Origin": []string{"https://example.org"}}}},
+ {true, &http.Request{Host: "Example.org", Header: map[string][]string{"Origin": []string{"https://example.org"}}}},
+}
+
+func TestCheckSameOrigin(t *testing.T) {
+ for _, tt := range checkSameOriginTests {
+ ok := checkSameOrigin(tt.r)
+ if tt.ok != ok {
+ t.Errorf("checkSameOrigin(%+v) returned %v, want %v", tt.r, ok, tt.ok)
+ }
+ }
+}
diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go
index 9a4908df2..385fa01be 100644
--- a/vendor/github.com/gorilla/websocket/util.go
+++ b/vendor/github.com/gorilla/websocket/util.go
@@ -11,6 +11,7 @@ import (
"io"
"net/http"
"strings"
+ "unicode/utf8"
)
var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
@@ -111,14 +112,14 @@ func nextTokenOrQuoted(s string) (value string, rest string) {
case escape:
escape = false
p[j] = b
- j += 1
+ j++
case b == '\\':
escape = true
case b == '"':
return string(p[:j]), s[i+1:]
default:
p[j] = b
- j += 1
+ j++
}
}
return "", ""
@@ -127,8 +128,31 @@ func nextTokenOrQuoted(s string) (value string, rest string) {
return "", ""
}
+// equalASCIIFold returns true if s is equal to t with ASCII case folding.
+func equalASCIIFold(s, t string) bool {
+ for s != "" && t != "" {
+ sr, size := utf8.DecodeRuneInString(s)
+ s = s[size:]
+ tr, size := utf8.DecodeRuneInString(t)
+ t = t[size:]
+ if sr == tr {
+ continue
+ }
+ if 'A' <= sr && sr <= 'Z' {
+ sr = sr + 'a' - 'A'
+ }
+ if 'A' <= tr && tr <= 'Z' {
+ tr = tr + 'a' - 'A'
+ }
+ if sr != tr {
+ return false
+ }
+ }
+ return s == t
+}
+
// tokenListContainsValue returns true if the 1#token header with the given
-// name contains token.
+// name contains a token equal to value with ASCII case folding.
func tokenListContainsValue(header http.Header, name string, value string) bool {
headers:
for _, s := range header[name] {
@@ -142,7 +166,7 @@ headers:
if s != "" && s[0] != ',' {
continue headers
}
- if strings.EqualFold(t, value) {
+ if equalASCIIFold(t, value) {
return true
}
if s == "" {
@@ -156,7 +180,6 @@ headers:
// parseExtensiosn parses WebSocket extensions from a header.
func parseExtensions(header http.Header) []map[string]string {
-
// From RFC 6455:
//
// Sec-WebSocket-Extensions = extension-list
diff --git a/vendor/github.com/gorilla/websocket/util_test.go b/vendor/github.com/gorilla/websocket/util_test.go
index 610e613c0..6e15965f5 100644
--- a/vendor/github.com/gorilla/websocket/util_test.go
+++ b/vendor/github.com/gorilla/websocket/util_test.go
@@ -10,6 +10,24 @@ import (
"testing"
)
+var equalASCIIFoldTests = []struct {
+ t, s string
+ eq bool
+}{
+ {"WebSocket", "websocket", true},
+ {"websocket", "WebSocket", true},
+ {"Öyster", "öyster", false},
+}
+
+func TestEqualASCIIFold(t *testing.T) {
+ for _, tt := range equalASCIIFoldTests {
+ eq := equalASCIIFold(tt.s, tt.t)
+ if eq != tt.eq {
+ t.Errorf("equalASCIIFold(%q, %q) = %v, want %v", tt.s, tt.t, eq, tt.eq)
+ }
+ }
+}
+
var tokenListContainsValueTests = []struct {
value string
ok bool
@@ -38,29 +56,32 @@ var parseExtensionTests = []struct {
value string
extensions []map[string]string
}{
- {`foo`, []map[string]string{map[string]string{"": "foo"}}},
+ {`foo`, []map[string]string{{"": "foo"}}},
{`foo, bar; baz=2`, []map[string]string{
- map[string]string{"": "foo"},
- map[string]string{"": "bar", "baz": "2"}}},
+ {"": "foo"},
+ {"": "bar", "baz": "2"}}},
{`foo; bar="b,a;z"`, []map[string]string{
- map[string]string{"": "foo", "bar": "b,a;z"}}},
+ {"": "foo", "bar": "b,a;z"}}},
{`foo , bar; baz = 2`, []map[string]string{
- map[string]string{"": "foo"},
- map[string]string{"": "bar", "baz": "2"}}},
+ {"": "foo"},
+ {"": "bar", "baz": "2"}}},
{`foo, bar; baz=2 junk`, []map[string]string{
- map[string]string{"": "foo"}}},
+ {"": "foo"}}},
{`foo junk, bar; baz=2 junk`, nil},
{`mux; max-channels=4; flow-control, deflate-stream`, []map[string]string{
- map[string]string{"": "mux", "max-channels": "4", "flow-control": ""},
- map[string]string{"": "deflate-stream"}}},
+ {"": "mux", "max-channels": "4", "flow-control": ""},
+ {"": "deflate-stream"}}},
{`permessage-foo; x="10"`, []map[string]string{
- map[string]string{"": "permessage-foo", "x": "10"}}},
+ {"": "permessage-foo", "x": "10"}}},
{`permessage-foo; use_y, permessage-foo`, []map[string]string{
- map[string]string{"": "permessage-foo", "use_y": ""},
- map[string]string{"": "permessage-foo"}}},
+ {"": "permessage-foo", "use_y": ""},
+ {"": "permessage-foo"}}},
{`permessage-deflate; client_max_window_bits; server_max_window_bits=10 , permessage-deflate; client_max_window_bits`, []map[string]string{
- map[string]string{"": "permessage-deflate", "client_max_window_bits": "", "server_max_window_bits": "10"},
- map[string]string{"": "permessage-deflate", "client_max_window_bits": ""}}},
+ {"": "permessage-deflate", "client_max_window_bits": "", "server_max_window_bits": "10"},
+ {"": "permessage-deflate", "client_max_window_bits": ""}}},
+ {"permessage-deflate; server_no_context_takeover; client_max_window_bits=15", []map[string]string{
+ {"": "permessage-deflate", "server_no_context_takeover": "", "client_max_window_bits": "15"},
+ }},
}
func TestParseExtensions(t *testing.T) {
diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go
new file mode 100644
index 000000000..2e668f6b8
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/x_net_proxy.go
@@ -0,0 +1,473 @@
+// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
+//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy
+
+// Package proxy provides support for a variety of protocols to proxy network
+// data.
+//
+
+package websocket
+
+import (
+ "errors"
+ "io"
+ "net"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+type proxy_direct struct{}
+
+// Direct is a direct proxy: one that makes network connections directly.
+var proxy_Direct = proxy_direct{}
+
+func (proxy_direct) Dial(network, addr string) (net.Conn, error) {
+ return net.Dial(network, addr)
+}
+
+// A PerHost directs connections to a default Dialer unless the host name
+// requested matches one of a number of exceptions.
+type proxy_PerHost struct {
+ def, bypass proxy_Dialer
+
+ bypassNetworks []*net.IPNet
+ bypassIPs []net.IP
+ bypassZones []string
+ bypassHosts []string
+}
+
+// NewPerHost returns a PerHost Dialer that directs connections to either
+// defaultDialer or bypass, depending on whether the connection matches one of
+// the configured rules.
+func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost {
+ return &proxy_PerHost{
+ def: defaultDialer,
+ bypass: bypass,
+ }
+}
+
+// Dial connects to the address addr on the given network through either
+// defaultDialer or bypass.
+func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ return p.dialerForRequest(host).Dial(network, addr)
+}
+
+func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer {
+ if ip := net.ParseIP(host); ip != nil {
+ for _, net := range p.bypassNetworks {
+ if net.Contains(ip) {
+ return p.bypass
+ }
+ }
+ for _, bypassIP := range p.bypassIPs {
+ if bypassIP.Equal(ip) {
+ return p.bypass
+ }
+ }
+ return p.def
+ }
+
+ for _, zone := range p.bypassZones {
+ if strings.HasSuffix(host, zone) {
+ return p.bypass
+ }
+ if host == zone[1:] {
+ // For a zone ".example.com", we match "example.com"
+ // too.
+ return p.bypass
+ }
+ }
+ for _, bypassHost := range p.bypassHosts {
+ if bypassHost == host {
+ return p.bypass
+ }
+ }
+ return p.def
+}
+
+// AddFromString parses a string that contains comma-separated values
+// specifying hosts that should use the bypass proxy. Each value is either an
+// IP address, a CIDR range, a zone (*.example.com) or a host name
+// (localhost). A best effort is made to parse the string and errors are
+// ignored.
+func (p *proxy_PerHost) AddFromString(s string) {
+ hosts := strings.Split(s, ",")
+ for _, host := range hosts {
+ host = strings.TrimSpace(host)
+ if len(host) == 0 {
+ continue
+ }
+ if strings.Contains(host, "/") {
+ // We assume that it's a CIDR address like 127.0.0.0/8
+ if _, net, err := net.ParseCIDR(host); err == nil {
+ p.AddNetwork(net)
+ }
+ continue
+ }
+ if ip := net.ParseIP(host); ip != nil {
+ p.AddIP(ip)
+ continue
+ }
+ if strings.HasPrefix(host, "*.") {
+ p.AddZone(host[1:])
+ continue
+ }
+ p.AddHost(host)
+ }
+}
+
+// AddIP specifies an IP address that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match an IP.
+func (p *proxy_PerHost) AddIP(ip net.IP) {
+ p.bypassIPs = append(p.bypassIPs, ip)
+}
+
+// AddNetwork specifies an IP range that will use the bypass proxy. Note that
+// this will only take effect if a literal IP address is dialed. A connection
+// to a named host will never match.
+func (p *proxy_PerHost) AddNetwork(net *net.IPNet) {
+ p.bypassNetworks = append(p.bypassNetworks, net)
+}
+
+// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
+// "example.com" matches "example.com" and all of its subdomains.
+func (p *proxy_PerHost) AddZone(zone string) {
+ if strings.HasSuffix(zone, ".") {
+ zone = zone[:len(zone)-1]
+ }
+ if !strings.HasPrefix(zone, ".") {
+ zone = "." + zone
+ }
+ p.bypassZones = append(p.bypassZones, zone)
+}
+
+// AddHost specifies a host name that will use the bypass proxy.
+func (p *proxy_PerHost) AddHost(host string) {
+ if strings.HasSuffix(host, ".") {
+ host = host[:len(host)-1]
+ }
+ p.bypassHosts = append(p.bypassHosts, host)
+}
+
+// A Dialer is a means to establish a connection.
+type proxy_Dialer interface {
+ // Dial connects to the given address via the proxy.
+ Dial(network, addr string) (c net.Conn, err error)
+}
+
+// Auth contains authentication parameters that specific Dialers may require.
+type proxy_Auth struct {
+ User, Password string
+}
+
+// FromEnvironment returns the dialer specified by the proxy related variables in
+// the environment.
+func proxy_FromEnvironment() proxy_Dialer {
+ allProxy := proxy_allProxyEnv.Get()
+ if len(allProxy) == 0 {
+ return proxy_Direct
+ }
+
+ proxyURL, err := url.Parse(allProxy)
+ if err != nil {
+ return proxy_Direct
+ }
+ proxy, err := proxy_FromURL(proxyURL, proxy_Direct)
+ if err != nil {
+ return proxy_Direct
+ }
+
+ noProxy := proxy_noProxyEnv.Get()
+ if len(noProxy) == 0 {
+ return proxy
+ }
+
+ perHost := proxy_NewPerHost(proxy, proxy_Direct)
+ perHost.AddFromString(noProxy)
+ return perHost
+}
+
+// proxySchemes is a map from URL schemes to a function that creates a Dialer
+// from a URL with such a scheme.
+var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)
+
+// RegisterDialerType takes a URL scheme and a function to generate Dialers from
+// a URL with that scheme and a forwarding Dialer. Registered schemes are used
+// by FromURL.
+func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) {
+ if proxy_proxySchemes == nil {
+ proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error))
+ }
+ proxy_proxySchemes[scheme] = f
+}
+
+// FromURL returns a Dialer given a URL specification and an underlying
+// Dialer for it to make network requests.
+func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) {
+ var auth *proxy_Auth
+ if u.User != nil {
+ auth = new(proxy_Auth)
+ auth.User = u.User.Username()
+ if p, ok := u.User.Password(); ok {
+ auth.Password = p
+ }
+ }
+
+ switch u.Scheme {
+ case "socks5":
+ return proxy_SOCKS5("tcp", u.Host, auth, forward)
+ }
+
+ // If the scheme doesn't match any of the built-in schemes, see if it
+ // was registered by another package.
+ if proxy_proxySchemes != nil {
+ if f, ok := proxy_proxySchemes[u.Scheme]; ok {
+ return f(u, forward)
+ }
+ }
+
+ return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
+}
+
+var (
+ proxy_allProxyEnv = &proxy_envOnce{
+ names: []string{"ALL_PROXY", "all_proxy"},
+ }
+ proxy_noProxyEnv = &proxy_envOnce{
+ names: []string{"NO_PROXY", "no_proxy"},
+ }
+)
+
+// envOnce looks up an environment variable (optionally by multiple
+// names) once. It mitigates expensive lookups on some platforms
+// (e.g. Windows).
+// (Borrowed from net/http/transport.go)
+type proxy_envOnce struct {
+ names []string
+ once sync.Once
+ val string
+}
+
+func (e *proxy_envOnce) Get() string {
+ e.once.Do(e.init)
+ return e.val
+}
+
+func (e *proxy_envOnce) init() {
+ for _, n := range e.names {
+ e.val = os.Getenv(n)
+ if e.val != "" {
+ return
+ }
+ }
+}
+
+// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
+// with an optional username and password. See RFC 1928 and RFC 1929.
+func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) {
+ s := &proxy_socks5{
+ network: network,
+ addr: addr,
+ forward: forward,
+ }
+ if auth != nil {
+ s.user = auth.User
+ s.password = auth.Password
+ }
+
+ return s, nil
+}
+
+type proxy_socks5 struct {
+ user, password string
+ network, addr string
+ forward proxy_Dialer
+}
+
+const proxy_socks5Version = 5
+
+const (
+ proxy_socks5AuthNone = 0
+ proxy_socks5AuthPassword = 2
+)
+
+const proxy_socks5Connect = 1
+
+const (
+ proxy_socks5IP4 = 1
+ proxy_socks5Domain = 3
+ proxy_socks5IP6 = 4
+)
+
+var proxy_socks5Errors = []string{
+ "",
+ "general failure",
+ "connection forbidden",
+ "network unreachable",
+ "host unreachable",
+ "connection refused",
+ "TTL expired",
+ "command not supported",
+ "address type not supported",
+}
+
+// Dial connects to the address addr on the given network via the SOCKS5 proxy.
+func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) {
+ switch network {
+ case "tcp", "tcp6", "tcp4":
+ default:
+ return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
+ }
+
+ conn, err := s.forward.Dial(s.network, s.addr)
+ if err != nil {
+ return nil, err
+ }
+ if err := s.connect(conn, addr); err != nil {
+ conn.Close()
+ return nil, err
+ }
+ return conn, nil
+}
+
+// connect takes an existing connection to a socks5 proxy server,
+// and commands the server to extend that connection to target,
+// which must be a canonical address with a host and port.
+func (s *proxy_socks5) connect(conn net.Conn, target string) error {
+ host, portStr, err := net.SplitHostPort(target)
+ if err != nil {
+ return err
+ }
+
+ port, err := strconv.Atoi(portStr)
+ if err != nil {
+ return errors.New("proxy: failed to parse port number: " + portStr)
+ }
+ if port < 1 || port > 0xffff {
+ return errors.New("proxy: port number out of range: " + portStr)
+ }
+
+ // the size here is just an estimate
+ buf := make([]byte, 0, 6+len(host))
+
+ buf = append(buf, proxy_socks5Version)
+ if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
+ buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword)
+ } else {
+ buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone)
+ }
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+ if buf[0] != 5 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
+ }
+ if buf[1] == 0xff {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
+ }
+
+ // See RFC 1929
+ if buf[1] == proxy_socks5AuthPassword {
+ buf = buf[:0]
+ buf = append(buf, 1 /* password protocol version */)
+ buf = append(buf, uint8(len(s.user)))
+ buf = append(buf, s.user...)
+ buf = append(buf, uint8(len(s.password)))
+ buf = append(buf, s.password...)
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if buf[1] != 0 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
+ }
+ }
+
+ buf = buf[:0]
+ buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */)
+
+ if ip := net.ParseIP(host); ip != nil {
+ if ip4 := ip.To4(); ip4 != nil {
+ buf = append(buf, proxy_socks5IP4)
+ ip = ip4
+ } else {
+ buf = append(buf, proxy_socks5IP6)
+ }
+ buf = append(buf, ip...)
+ } else {
+ if len(host) > 255 {
+ return errors.New("proxy: destination host name too long: " + host)
+ }
+ buf = append(buf, proxy_socks5Domain)
+ buf = append(buf, byte(len(host)))
+ buf = append(buf, host...)
+ }
+ buf = append(buf, byte(port>>8), byte(port))
+
+ if _, err := conn.Write(buf); err != nil {
+ return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ if _, err := io.ReadFull(conn, buf[:4]); err != nil {
+ return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ failure := "unknown error"
+ if int(buf[1]) < len(proxy_socks5Errors) {
+ failure = proxy_socks5Errors[buf[1]]
+ }
+
+ if len(failure) > 0 {
+ return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
+ }
+
+ bytesToDiscard := 0
+ switch buf[3] {
+ case proxy_socks5IP4:
+ bytesToDiscard = net.IPv4len
+ case proxy_socks5IP6:
+ bytesToDiscard = net.IPv6len
+ case proxy_socks5Domain:
+ _, err := io.ReadFull(conn, buf[:1])
+ if err != nil {
+ return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+ bytesToDiscard = int(buf[0])
+ default:
+ return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
+ }
+
+ if cap(buf) < bytesToDiscard {
+ buf = make([]byte, bytesToDiscard)
+ } else {
+ buf = buf[:bytesToDiscard]
+ }
+ if _, err := io.ReadFull(conn, buf); err != nil {
+ return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ // Also need to discard the port number
+ if _, err := io.ReadFull(conn, buf[:2]); err != nil {
+ return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iradix_test.go b/vendor/github.com/hashicorp/go-immutable-radix/iradix_test.go
index 94d7578a3..bc9c77c20 100644
--- a/vendor/github.com/hashicorp/go-immutable-radix/iradix_test.go
+++ b/vendor/github.com/hashicorp/go-immutable-radix/iradix_test.go
@@ -6,7 +6,7 @@ import (
"sort"
"testing"
- "github.com/hashicorp/uuid"
+ "github.com/hashicorp/go-uuid"
)
func CopyTree(t *Tree) *Tree {
@@ -55,7 +55,10 @@ func TestRadix_HugeTxn(t *testing.T) {
txn1 := r.Txn()
var expect []string
for i := 0; i < defaultModifiedCache*100; i++ {
- gen := uuid.GenerateUUID()
+ gen, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
txn1.Insert([]byte(gen), i)
expect = append(expect, gen)
}
@@ -85,7 +88,10 @@ func TestRadix(t *testing.T) {
var min, max string
inp := make(map[string]interface{})
for i := 0; i < 1000; i++ {
- gen := uuid.GenerateUUID()
+ gen, err := uuid.GenerateUUID()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
inp[gen] = i
if gen < min || i == 0 {
min = gen
diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go
index fadb88e5e..de6e5c17c 100644
--- a/vendor/github.com/lib/pq/conn.go
+++ b/vendor/github.com/lib/pq/conn.go
@@ -339,6 +339,15 @@ func DialOpen(d Dialer, name string) (_ driver.Conn, err error) {
if err != nil {
return nil, err
}
+
+ // cn.ssl and cn.startup panic on error. Make sure we don't leak cn.c.
+ panicking := true
+ defer func() {
+ if panicking {
+ cn.c.Close()
+ }
+ }()
+
cn.ssl(o)
cn.buf = bufio.NewReader(cn.c)
cn.startup(o)
@@ -347,6 +356,7 @@ func DialOpen(d Dialer, name string) (_ driver.Conn, err error) {
if timeout, ok := o["connect_timeout"]; ok && timeout != "0" {
err = cn.c.SetDeadline(time.Time{})
}
+ panicking = false
return cn, err
}
diff --git a/vendor/github.com/lib/pq/conn_test.go b/vendor/github.com/lib/pq/conn_test.go
index 030a798c9..7c0f30eb7 100644
--- a/vendor/github.com/lib/pq/conn_test.go
+++ b/vendor/github.com/lib/pq/conn_test.go
@@ -28,7 +28,7 @@ func forceBinaryParameters() bool {
}
}
-func openTestConnConninfo(conninfo string) (*sql.DB, error) {
+func testConninfo(conninfo string) string {
defaultTo := func(envvar string, value string) {
if os.Getenv(envvar) == "" {
os.Setenv(envvar, value)
@@ -43,8 +43,11 @@ func openTestConnConninfo(conninfo string) (*sql.DB, error) {
!strings.HasPrefix(conninfo, "postgresql://") {
conninfo = conninfo + " binary_parameters=yes"
}
+ return conninfo
+}
- return sql.Open("postgres", conninfo)
+func openTestConnConninfo(conninfo string) (*sql.DB, error) {
+ return sql.Open("postgres", testConninfo(conninfo))
}
func openTestConn(t Fatalistic) *sql.DB {
@@ -637,6 +640,57 @@ func TestErrorDuringStartup(t *testing.T) {
}
}
+type testConn struct {
+ closed bool
+ net.Conn
+}
+
+func (c *testConn) Close() error {
+ c.closed = true
+ return c.Conn.Close()
+}
+
+type testDialer struct {
+ conns []*testConn
+}
+
+func (d *testDialer) Dial(ntw, addr string) (net.Conn, error) {
+ c, err := net.Dial(ntw, addr)
+ if err != nil {
+ return nil, err
+ }
+ tc := &testConn{Conn: c}
+ d.conns = append(d.conns, tc)
+ return tc, nil
+}
+
+func (d *testDialer) DialTimeout(ntw, addr string, timeout time.Duration) (net.Conn, error) {
+ c, err := net.DialTimeout(ntw, addr, timeout)
+ if err != nil {
+ return nil, err
+ }
+ tc := &testConn{Conn: c}
+ d.conns = append(d.conns, tc)
+ return tc, nil
+}
+
+func TestErrorDuringStartupClosesConn(t *testing.T) {
+ // Don't use the normal connection setup, this is intended to
+ // blow up in the startup packet from a non-existent user.
+ var d testDialer
+ c, err := DialOpen(&d, testConninfo("user=thisuserreallydoesntexist"))
+ if err == nil {
+ c.Close()
+ t.Fatal("expected dial error")
+ }
+ if len(d.conns) != 1 {
+ t.Fatalf("got len(d.conns) = %d, want = %d", len(d.conns), 1)
+ }
+ if !d.conns[0].closed {
+ t.Error("connection leaked")
+ }
+}
+
func TestBadConn(t *testing.T) {
var err error
diff --git a/vendor/github.com/lib/pq/error.go b/vendor/github.com/lib/pq/error.go
index b4bb44cee..6928d9670 100644
--- a/vendor/github.com/lib/pq/error.go
+++ b/vendor/github.com/lib/pq/error.go
@@ -153,6 +153,7 @@ var errorCodeNames = map[ErrorCode]string{
"22004": "null_value_not_allowed",
"22002": "null_value_no_indicator_parameter",
"22003": "numeric_value_out_of_range",
+ "2200H": "sequence_generator_limit_exceeded",
"22026": "string_data_length_mismatch",
"22001": "string_data_right_truncation",
"22011": "substring_error",
diff --git a/vendor/github.com/lib/pq/notify.go b/vendor/github.com/lib/pq/notify.go
index 412c6ac1e..304e081fe 100644
--- a/vendor/github.com/lib/pq/notify.go
+++ b/vendor/github.com/lib/pq/notify.go
@@ -637,7 +637,7 @@ func (l *Listener) disconnectCleanup() error {
// after the connection has been established.
func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error {
doneChan := make(chan error)
- go func() {
+ go func(notificationChan <-chan *Notification) {
for channel := range l.channels {
// If we got a response, return that error to our caller as it's
// going to be more descriptive than cn.Err().
@@ -658,7 +658,7 @@ func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notificatio
}
}
doneChan <- nil
- }()
+ }(notificationChan)
// Ignore notifications while synchronization is going on to avoid
// deadlocks. We have to send a nil notification over Notify anyway as
diff --git a/vendor/github.com/mailru/easyjson/.gitignore b/vendor/github.com/mailru/easyjson/.gitignore
new file mode 100644
index 000000000..db8c66edf
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/.gitignore
@@ -0,0 +1,4 @@
+.root
+*_easyjson.go
+*.iml
+.idea
diff --git a/vendor/github.com/mailru/easyjson/.travis.yml b/vendor/github.com/mailru/easyjson/.travis.yml
new file mode 100644
index 000000000..884f8bbdf
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+
+go:
+ - tip
+install:
+ - go get github.com/ugorji/go/codec
+ - go get github.com/pquerna/ffjson/fflib/v1
+ - go get github.com/json-iterator/go
+ - go get github.com/golang/lint/golint
diff --git a/vendor/github.com/mailru/easyjson/LICENSE b/vendor/github.com/mailru/easyjson/LICENSE
new file mode 100644
index 000000000..fbff658f7
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/LICENSE
@@ -0,0 +1,7 @@
+Copyright (c) 2016 Mail.Ru Group
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/mailru/easyjson/Makefile b/vendor/github.com/mailru/easyjson/Makefile
new file mode 100644
index 000000000..f877ab269
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/Makefile
@@ -0,0 +1,56 @@
+PKG=github.com/mailru/easyjson
+GOPATH:=$(PWD)/.root:$(GOPATH)
+export GOPATH
+
+all: test
+
+.root/src/$(PKG):
+ mkdir -p $@
+ for i in $$PWD/* ; do ln -s $$i $@/`basename $$i` ; done
+
+root: .root/src/$(PKG)
+
+clean:
+ rm -rf .root
+ rm -rf tests/*_easyjson.go
+
+build:
+ go build -i -o .root/bin/easyjson $(PKG)/easyjson
+
+generate: root build
+ .root/bin/easyjson -stubs \
+ .root/src/$(PKG)/tests/snake.go \
+ .root/src/$(PKG)/tests/data.go \
+ .root/src/$(PKG)/tests/omitempty.go \
+ .root/src/$(PKG)/tests/nothing.go \
+ .root/src/$(PKG)/tests/named_type.go
+
+ .root/bin/easyjson -all .root/src/$(PKG)/tests/data.go
+ .root/bin/easyjson -all .root/src/$(PKG)/tests/nothing.go
+ .root/bin/easyjson -all .root/src/$(PKG)/tests/errors.go
+ .root/bin/easyjson -snake_case .root/src/$(PKG)/tests/snake.go
+ .root/bin/easyjson -omit_empty .root/src/$(PKG)/tests/omitempty.go
+ .root/bin/easyjson -build_tags=use_easyjson .root/src/$(PKG)/benchmark/data.go
+ .root/bin/easyjson .root/src/$(PKG)/tests/nested_easy.go
+ .root/bin/easyjson .root/src/$(PKG)/tests/named_type.go
+
+test: generate root
+ go test \
+ $(PKG)/tests \
+ $(PKG)/jlexer \
+ $(PKG)/gen \
+ $(PKG)/buffer
+ go test -benchmem -tags use_easyjson -bench . $(PKG)/benchmark
+ golint -set_exit_status .root/src/$(PKG)/tests/*_easyjson.go
+
+bench-other: generate root
+ @go test -benchmem -bench . $(PKG)/benchmark
+ @go test -benchmem -tags use_ffjson -bench . $(PKG)/benchmark
+ @go test -benchmem -tags use_jsoniter -bench . $(PKG)/benchmark
+ @go test -benchmem -tags use_codec -bench . $(PKG)/benchmark
+
+bench-python:
+ benchmark/ujson.sh
+
+
+.PHONY: root clean generate test build
diff --git a/vendor/github.com/mailru/easyjson/README.md b/vendor/github.com/mailru/easyjson/README.md
new file mode 100644
index 000000000..9366e3f71
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/README.md
@@ -0,0 +1,331 @@
+# easyjson [![Build Status](https://travis-ci.org/mailru/easyjson.svg?branch=master)](https://travis-ci.org/mailru/easyjson) [![Go Report Card](https://goreportcard.com/badge/github.com/mailru/easyjson)](https://goreportcard.com/report/github.com/mailru/easyjson)
+
+Package easyjson provides a fast and easy way to marshal/unmarshal Go structs
+to/from JSON without the use of reflection. In performance tests, easyjson
+outperforms the standard `encoding/json` package by a factor of 4-5x, and other
+JSON encoding packages by a factor of 2-3x.
+
+easyjson aims to keep generated Go code simple enough so that it can be easily
+optimized or fixed. Another goal is to provide users with the ability to
+customize the generated code by providing options not available with the
+standard `encoding/json` package, such as generating "snake_case" names or
+enabling `omitempty` behavior by default.
+
+## Usage
+```sh
+# install
+go get -u github.com/mailru/easyjson/...
+
+# run
+easyjson -all <file>.go
+```
+
+The above will generate `<file>_easyjson.go` containing the appropriate marshaler and
+unmarshaler funcs for all structs contained in `<file>.go`.
+
+Please note that easyjson requires a full Go build environment and the `GOPATH`
+environment variable to be set. This is because easyjson code generation
+invokes `go run` on a temporary file (an approach to code generation borrowed
+from [ffjson](https://github.com/pquerna/ffjson)).
+
+## Options
+```txt
+Usage of easyjson:
+ -all
+ generate marshaler/unmarshalers for all structs in a file
+ -build_tags string
+ build tags to add to generated file
+ -leave_temps
+ do not delete temporary files
+ -no_std_marshalers
+ don't generate MarshalJSON/UnmarshalJSON funcs
+ -noformat
+ do not run 'gofmt -w' on output file
+ -omit_empty
+ omit empty fields by default
+ -output_filename string
+ specify the filename of the output
+ -pkg
+ process the whole package instead of just the given file
+ -snake_case
+ use snake_case names instead of CamelCase by default
+ -lower_camel_case
+ use lowerCamelCase instead of CamelCase by default
+ -stubs
+ only generate stubs for marshaler/unmarshaler funcs
+```
+
+Using `-all` will generate marshalers/unmarshalers for all Go structs in the
+file. If `-all` is not provided, then only those structs whose preceding
+comment starts with `easyjson:json` will have marshalers/unmarshalers
+generated. For example:
+
+```go
+//easyjson:json
+type A struct {}
+```
+
+Additional option notes:
+
+* `-snake_case` tells easyjson to generate snake\_case field names by default
+ (unless overridden by a field tag). The CamelCase to snake\_case conversion
+ algorithm should work in most cases (ie, HTTPVersion will be converted to
+ "http_version").
+
+* `-build_tags` will add the specified build tags to generated Go sources.
+
+## Generated Marshaler/Unmarshaler Funcs
+
+For Go struct types, easyjson generates the funcs `MarshalEasyJSON` /
+`UnmarshalEasyJSON` for marshaling/unmarshaling JSON. In turn, these satisify
+the `easyjson.Marshaler` and `easyjson.Unmarshaler` interfaces and when used in
+conjunction with `easyjson.Marshal` / `easyjson.Unmarshal` avoid unnecessary
+reflection / type assertions during marshaling/unmarshaling to/from JSON for Go
+structs.
+
+easyjson also generates `MarshalJSON` and `UnmarshalJSON` funcs for Go struct
+types compatible with the standard `json.Marshaler` and `json.Unmarshaler`
+interfaces. Please be aware that using the standard `json.Marshal` /
+`json.Unmarshal` for marshaling/unmarshaling will incur a significant
+performance penalty when compared to using `easyjson.Marshal` /
+`easyjson.Unmarshal`.
+
+Additionally, easyjson exposes utility funcs that use the `MarshalEasyJSON` and
+`UnmarshalEasyJSON` for marshaling/unmarshaling to and from standard readers
+and writers. For example, easyjson provides `easyjson.MarshalToHTTPResponseWriter`
+which marshals to the standard `http.ResponseWriter`. Please see the [GoDoc
+listing](https://godoc.org/github.com/mailru/easyjson) for the full listing of
+utility funcs that are available.
+
+## Controlling easyjson Marshaling and Unmarshaling Behavior
+
+Go types can provide their own `MarshalEasyJSON` and `UnmarshalEasyJSON` funcs
+that satisify the `easyjson.Marshaler` / `easyjson.Unmarshaler` interfaces.
+These will be used by `easyjson.Marshal` and `easyjson.Unmarshal` when defined
+for a Go type.
+
+Go types can also satisify the `easyjson.Optional` interface, which allows the
+type to define its own `omitempty` logic.
+
+## Type Wrappers
+
+easyjson provides additional type wrappers defined in the `easyjson/opt`
+package. These wrap the standard Go primitives and in turn satisify the
+easyjson interfaces.
+
+The `easyjson/opt` type wrappers are useful when needing to distinguish between
+a missing value and/or when needing to specifying a default value. Type
+wrappers allow easyjson to avoid additional pointers and heap allocations and
+can significantly increase performance when used properly.
+
+## Memory Pooling
+
+easyjson uses a buffer pool that allocates data in increasing chunks from 128
+to 32768 bytes. Chunks of 512 bytes and larger will be reused with the help of
+`sync.Pool`. The maximum size of a chunk is bounded to reduce redundant memory
+allocation and to allow larger reusable buffers.
+
+easyjson's custom allocation buffer pool is defined in the `easyjson/buffer`
+package, and the default behavior pool behavior can be modified (if necessary)
+through a call to `buffer.Init()` prior to any marshaling or unmarshaling.
+Please see the [GoDoc listing](https://godoc.org/github.com/mailru/easyjson/buffer)
+for more information.
+
+## Issues, Notes, and Limitations
+
+* easyjson is still early in its development. As such, there are likely to be
+ bugs and missing features when compared to `encoding/json`. In the case of a
+ missing feature or bug, please create a GitHub issue. Pull requests are
+ welcome!
+
+* Unlike `encoding/json`, object keys are case-sensitive. Case-insensitive
+ matching is not currently provided due to the significant performance hit
+ when doing case-insensitive key matching. In the future, case-insensitive
+ object key matching may be provided via an option to the generator.
+
+* easyjson makes use of `unsafe`, which simplifies the code and
+ provides significant performance benefits by allowing no-copy
+ conversion from `[]byte` to `string`. That said, `unsafe` is used
+ only when unmarshaling and parsing JSON, and any `unsafe` operations
+ / memory allocations done will be safely deallocated by
+ easyjson. Set the build tag `easyjson_nounsafe` to compile it
+ without `unsafe`.
+
+* easyjson is compatible with Google App Engine. The `appengine` build
+ tag (set by App Engine's environment) will automatically disable the
+ use of `unsafe`, which is not allowed in App Engine's Standard
+ Environment. Note that the use with App Engine is still experimental.
+
+* Floats are formatted using the default precision from Go's `strconv` package.
+ As such, easyjson will not correctly handle high precision floats when
+ marshaling/unmarshaling JSON. Note, however, that there are very few/limited
+ uses where this behavior is not sufficient for general use. That said, a
+ different package may be needed if precise marshaling/unmarshaling of high
+ precision floats to/from JSON is required.
+
+* While unmarshaling, the JSON parser does the minimal amount of work needed to
+ skip over unmatching parens, and as such full validation is not done for the
+ entire JSON value being unmarshaled/parsed.
+
+* Currently there is no true streaming support for encoding/decoding as
+ typically for many uses/protocols the final, marshaled length of the JSON
+ needs to be known prior to sending the data. Currently this is not possible
+ with easyjson's architecture.
+
+## Benchmarks
+
+Most benchmarks were done using the example
+[13kB example JSON](https://dev.twitter.com/rest/reference/get/search/tweets)
+(9k after eliminating whitespace). This example is similar to real-world data,
+is well-structured, and contains a healthy variety of different types, making
+it ideal for JSON serialization benchmarks.
+
+Note:
+
+* For small request benchmarks, an 80 byte portion of the above example was
+ used.
+
+* For large request marshaling benchmarks, a struct containing 50 regular
+ samples was used, making a ~500kB output JSON.
+
+* Benchmarks are showing the results of easyjson's default behaviour,
+ which makes use of `unsafe`.
+
+Benchmarks are available in the repository and can be run by invoking `make`.
+
+### easyjson vs. encoding/json
+
+easyjson is roughly 5-6 times faster than the standard `encoding/json` for
+unmarshaling, and 3-4 times faster for non-concurrent marshaling. Concurrent
+marshaling is 6-7x faster if marshaling to a writer.
+
+### easyjson vs. ffjson
+
+easyjson uses the same approach for JSON marshaling as
+[ffjson](https://github.com/pquerna/ffjson), but takes a significantly
+different approach to lexing and parsing JSON during unmarshaling. This means
+easyjson is roughly 2-3x faster for unmarshaling and 1.5-2x faster for
+non-concurrent unmarshaling.
+
+As of this writing, `ffjson` seems to have issues when used concurrently:
+specifically, large request pooling hurts `ffjson`'s performance and causes
+scalability issues. These issues with `ffjson` can likely be fixed, but as of
+writing remain outstanding/known issues with `ffjson`.
+
+easyjson and `ffjson` have similar performance for small requests, however
+easyjson outperforms `ffjson` by roughly 2-5x times for large requests when
+used with a writer.
+
+### easyjson vs. go/codec
+
+[go/codec](https://github.com/ugorji/go) provides
+compile-time helpers for JSON generation. In this case, helpers do not work
+like marshalers as they are encoding-independent.
+
+easyjson is generally 2x faster than `go/codec` for non-concurrent benchmarks
+and about 3x faster for concurrent encoding (without marshaling to a writer).
+
+In an attempt to measure marshaling performance of `go/codec` (as opposed to
+allocations/memcpy/writer interface invocations), a benchmark was done with
+resetting length of a byte slice rather than resetting the whole slice to nil.
+However, the optimization in this exact form may not be applicable in practice,
+since the memory is not freed between marshaling operations.
+
+### easyjson vs 'ujson' python module
+
+[ujson](https://github.com/esnme/ultrajson) is using C code for parsing, so it
+is interesting to see how plain golang compares to that. It is imporant to note
+that the resulting object for python is slower to access, since the library
+parses JSON object into dictionaries.
+
+easyjson is slightly faster for unmarshaling and 2-3x faster than `ujson` for
+marshaling.
+
+### Benchmark Results
+
+`ffjson` results are from February 4th, 2016, using the latest `ffjson` and go1.6.
+`go/codec` results are from March 4th, 2016, using the latest `go/codec` and go1.6.
+
+#### Unmarshaling
+
+| lib | json size | MB/s | allocs/op | B/op |
+|:---------|:----------|-----:|----------:|------:|
+| standard | regular | 22 | 218 | 10229 |
+| standard | small | 9.7 | 14 | 720 |
+| | | | | |
+| easyjson | regular | 125 | 128 | 9794 |
+| easyjson | small | 67 | 3 | 128 |
+| | | | | |
+| ffjson | regular | 66 | 141 | 9985 |
+| ffjson | small | 17.6 | 10 | 488 |
+| | | | | |
+| codec | regular | 55 | 434 | 19299 |
+| codec | small | 29 | 7 | 336 |
+| | | | | |
+| ujson | regular | 103 | N/A | N/A |
+
+#### Marshaling, one goroutine.
+
+| lib | json size | MB/s | allocs/op | B/op |
+|:----------|:----------|-----:|----------:|------:|
+| standard | regular | 75 | 9 | 23256 |
+| standard | small | 32 | 3 | 328 |
+| standard | large | 80 | 17 | 1.2M |
+| | | | | |
+| easyjson | regular | 213 | 9 | 10260 |
+| easyjson* | regular | 263 | 8 | 742 |
+| easyjson | small | 125 | 1 | 128 |
+| easyjson | large | 212 | 33 | 490k |
+| easyjson* | large | 262 | 25 | 2879 |
+| | | | | |
+| ffjson | regular | 122 | 153 | 21340 |
+| ffjson** | regular | 146 | 152 | 4897 |
+| ffjson | small | 36 | 5 | 384 |
+| ffjson** | small | 64 | 4 | 128 |
+| ffjson | large | 134 | 7317 | 818k |
+| ffjson** | large | 125 | 7320 | 827k |
+| | | | | |
+| codec | regular | 80 | 17 | 33601 |
+| codec*** | regular | 108 | 9 | 1153 |
+| codec | small | 42 | 3 | 304 |
+| codec*** | small | 56 | 1 | 48 |
+| codec | large | 73 | 483 | 2.5M |
+| codec*** | large | 103 | 451 | 66007 |
+| | | | | |
+| ujson | regular | 92 | N/A | N/A |
+
+\* marshaling to a writer,
+\*\* using `ffjson.Pool()`,
+\*\*\* reusing output slice instead of resetting it to nil
+
+#### Marshaling, concurrent.
+
+| lib | json size | MB/s | allocs/op | B/op |
+|:----------|:----------|-----:|----------:|------:|
+| standard | regular | 252 | 9 | 23257 |
+| standard | small | 124 | 3 | 328 |
+| standard | large | 289 | 17 | 1.2M |
+| | | | | |
+| easyjson | regular | 792 | 9 | 10597 |
+| easyjson* | regular | 1748 | 8 | 779 |
+| easyjson | small | 333 | 1 | 128 |
+| easyjson | large | 718 | 36 | 548k |
+| easyjson* | large | 2134 | 25 | 4957 |
+| | | | | |
+| ffjson | regular | 301 | 153 | 21629 |
+| ffjson** | regular | 707 | 152 | 5148 |
+| ffjson | small | 62 | 5 | 384 |
+| ffjson** | small | 282 | 4 | 128 |
+| ffjson | large | 438 | 7330 | 1.0M |
+| ffjson** | large | 131 | 7319 | 820k |
+| | | | | |
+| codec | regular | 183 | 17 | 33603 |
+| codec*** | regular | 671 | 9 | 1157 |
+| codec | small | 147 | 3 | 304 |
+| codec*** | small | 299 | 1 | 48 |
+| codec | large | 190 | 483 | 2.5M |
+| codec*** | large | 752 | 451 | 77574 |
+
+\* marshaling to a writer,
+\*\* using `ffjson.Pool()`,
+\*\*\* reusing output slice instead of resetting it to nil
diff --git a/vendor/github.com/mailru/easyjson/benchmark/codec_test.go b/vendor/github.com/mailru/easyjson/benchmark/codec_test.go
new file mode 100644
index 000000000..5c77072ee
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/benchmark/codec_test.go
@@ -0,0 +1,279 @@
+// +build use_codec
+
+package benchmark
+
+import (
+ "testing"
+
+ "github.com/ugorji/go/codec"
+)
+
+func BenchmarkCodec_Unmarshal_M(b *testing.B) {
+ var h codec.Handle = new(codec.JsonHandle)
+ dec := codec.NewDecoderBytes(nil, h)
+
+ b.SetBytes(int64(len(largeStructText)))
+ for i := 0; i < b.N; i++ {
+ var s LargeStruct
+ dec.ResetBytes(largeStructText)
+ if err := dec.Decode(&s); err != nil {
+ b.Error(err)
+ }
+ }
+}
+
+func BenchmarkCodec_Unmarshal_S(b *testing.B) {
+ var h codec.Handle = new(codec.JsonHandle)
+ dec := codec.NewDecoderBytes(nil, h)
+
+ b.SetBytes(int64(len(smallStructText)))
+ for i := 0; i < b.N; i++ {
+ var s LargeStruct
+ dec.ResetBytes(smallStructText)
+ if err := dec.Decode(&s); err != nil {
+ b.Error(err)
+ }
+ }
+}
+
+func BenchmarkCodec_Marshal_S(b *testing.B) {
+ var h codec.Handle = new(codec.JsonHandle)
+
+ var out []byte
+ enc := codec.NewEncoderBytes(&out, h)
+
+ var l int64
+ for i := 0; i < b.N; i++ {
+ enc.ResetBytes(&out)
+ if err := enc.Encode(&smallStructData); err != nil {
+ b.Error(err)
+ }
+ l = int64(len(out))
+ out = nil
+ }
+
+ b.SetBytes(l)
+}
+
+func BenchmarkCodec_Marshal_M(b *testing.B) {
+ var h codec.Handle = new(codec.JsonHandle)
+
+ var out []byte
+ enc := codec.NewEncoderBytes(&out, h)
+
+ var l int64
+ for i := 0; i < b.N; i++ {
+ enc.ResetBytes(&out)
+ if err := enc.Encode(&largeStructData); err != nil {
+ b.Error(err)
+ }
+ l = int64(len(out))
+ out = nil
+ }
+
+ b.SetBytes(l)
+}
+
+func BenchmarkCodec_Marshal_L(b *testing.B) {
+ var h codec.Handle = new(codec.JsonHandle)
+
+ var out []byte
+ enc := codec.NewEncoderBytes(&out, h)
+
+ var l int64
+ for i := 0; i < b.N; i++ {
+ enc.ResetBytes(&out)
+ if err := enc.Encode(&xlStructData); err != nil {
+ b.Error(err)
+ }
+ l = int64(len(out))
+ out = nil
+ }
+
+ b.SetBytes(l)
+}
+
+func BenchmarkCodec_Marshal_S_Reuse(b *testing.B) {
+ var h codec.Handle = new(codec.JsonHandle)
+
+ var out []byte
+ enc := codec.NewEncoderBytes(&out, h)
+
+ var l int64
+ for i := 0; i < b.N; i++ {
+ enc.ResetBytes(&out)
+ if err := enc.Encode(&smallStructData); err != nil {
+ b.Error(err)
+ }
+ l = int64(len(out))
+ out = out[:0]
+ }
+
+ b.SetBytes(l)
+}
+
+func BenchmarkCodec_Marshal_M_Reuse(b *testing.B) {
+ var h codec.Handle = new(codec.JsonHandle)
+
+ var out []byte
+ enc := codec.NewEncoderBytes(&out, h)
+
+ var l int64
+ for i := 0; i < b.N; i++ {
+ enc.ResetBytes(&out)
+ if err := enc.Encode(&largeStructData); err != nil {
+ b.Error(err)
+ }
+ l = int64(len(out))
+ out = out[:0]
+ }
+
+ b.SetBytes(l)
+}
+
+func BenchmarkCodec_Marshal_L_Reuse(b *testing.B) {
+ var h codec.Handle = new(codec.JsonHandle)
+
+ var out []byte
+ enc := codec.NewEncoderBytes(&out, h)
+
+ var l int64
+ for i := 0; i < b.N; i++ {
+ enc.ResetBytes(&out)
+ if err := enc.Encode(&xlStructData); err != nil {
+ b.Error(err)
+ }
+ l = int64(len(out))
+ out = out[:0]
+ }
+
+ b.SetBytes(l)
+}
+
+func BenchmarkCodec_Marshal_S_Parallel(b *testing.B) {
+ var l int64
+
+ b.RunParallel(func(pb *testing.PB) {
+ var out []byte
+
+ var h codec.Handle = new(codec.JsonHandle)
+ enc := codec.NewEncoderBytes(&out, h)
+
+ for pb.Next() {
+ enc.ResetBytes(&out)
+ if err := enc.Encode(&smallStructData); err != nil {
+ b.Error(err)
+ }
+ l = int64(len(out))
+ out = nil
+ }
+ })
+
+ b.SetBytes(l)
+}
+
+func BenchmarkCodec_Marshal_M_Parallel(b *testing.B) {
+ var l int64
+
+ b.RunParallel(func(pb *testing.PB) {
+ var h codec.Handle = new(codec.JsonHandle)
+
+ var out []byte
+ enc := codec.NewEncoderBytes(&out, h)
+
+ for pb.Next() {
+ enc.ResetBytes(&out)
+ if err := enc.Encode(&largeStructData); err != nil {
+ b.Error(err)
+ }
+ l = int64(len(out))
+ out = nil
+ }
+ })
+ b.SetBytes(l)
+}
+
+func BenchmarkCodec_Marshal_L_Parallel(b *testing.B) {
+ var l int64
+
+ b.RunParallel(func(pb *testing.PB) {
+ var h codec.Handle = new(codec.JsonHandle)
+
+ var out []byte
+ enc := codec.NewEncoderBytes(&out, h)
+
+ for pb.Next() {
+ enc.ResetBytes(&out)
+ if err := enc.Encode(&xlStructData); err != nil {
+ b.Error(err)
+ }
+ l = int64(len(out))
+ out = nil
+ }
+ })
+ b.SetBytes(l)
+}
+
+func BenchmarkCodec_Marshal_S_Parallel_Reuse(b *testing.B) {
+ var l int64
+
+ b.RunParallel(func(pb *testing.PB) {
+ var out []byte
+
+ var h codec.Handle = new(codec.JsonHandle)
+ enc := codec.NewEncoderBytes(&out, h)
+
+ for pb.Next() {
+ enc.ResetBytes(&out)
+ if err := enc.Encode(&smallStructData); err != nil {
+ b.Error(err)
+ }
+ l = int64(len(out))
+ out = out[:0]
+ }
+ })
+
+ b.SetBytes(l)
+}
+
+func BenchmarkCodec_Marshal_M_Parallel_Reuse(b *testing.B) {
+ var l int64
+
+ b.RunParallel(func(pb *testing.PB) {
+ var h codec.Handle = new(codec.JsonHandle)
+
+ var out []byte
+ enc := codec.NewEncoderBytes(&out, h)
+
+ for pb.Next() {
+ enc.ResetBytes(&out)
+ if err := enc.Encode(&largeStructData); err != nil {
+ b.Error(err)
+ }
+ l = int64(len(out))
+ out = out[:0]
+ }
+ })
+ b.SetBytes(l)
+}
+
+func BenchmarkCodec_Marshal_L_Parallel_Reuse(b *testing.B) {
+ var l int64
+
+ b.RunParallel(func(pb *testing.PB) {
+ var h codec.Handle = new(codec.JsonHandle)
+
+ var out []byte
+ enc := codec.NewEncoderBytes(&out, h)
+
+ for pb.Next() {
+ enc.ResetBytes(&out)
+ if err := enc.Encode(&xlStructData); err != nil {
+ b.Error(err)
+ }
+ l = int64(len(out))
+ out = out[:0]
+ }
+ })
+ b.SetBytes(l)
+}
diff --git a/vendor/github.com/mailru/easyjson/benchmark/data.go b/vendor/github.com/mailru/easyjson/benchmark/data.go
new file mode 100644
index 000000000..71eb91a94
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/benchmark/data.go
@@ -0,0 +1,148 @@
+// Package benchmark provides a simple benchmark for easyjson against default serialization and ffjson.
+// The data example is taken from https://dev.twitter.com/rest/reference/get/search/tweets
+package benchmark
+
+import (
+ "io/ioutil"
+)
+
+var largeStructText, _ = ioutil.ReadFile("example.json")
+var xlStructData XLStruct
+
+func init() {
+ for i := 0; i < 50; i++ {
+ xlStructData.Data = append(xlStructData.Data, largeStructData)
+ }
+}
+
+var smallStructText = []byte(`{"hashtags":[{"indices":[5, 10],"text":"some-text"}],"urls":[],"user_mentions":[]}`)
+var smallStructData = Entities{
+ Hashtags: []Hashtag{{Indices: []int{5, 10}, Text: "some-text"}},
+ Urls: []*string{},
+ UserMentions: []*string{},
+}
+
+type SearchMetadata struct {
+ CompletedIn float64 `json:"completed_in"`
+ Count int `json:"count"`
+ MaxID int64 `json:"max_id"`
+ MaxIDStr string `json:"max_id_str"`
+ NextResults string `json:"next_results"`
+ Query string `json:"query"`
+ RefreshURL string `json:"refresh_url"`
+ SinceID int64 `json:"since_id"`
+ SinceIDStr string `json:"since_id_str"`
+}
+
+type Hashtag struct {
+ Indices []int `json:"indices"`
+ Text string `json:"text"`
+}
+
+//easyjson:json
+type Entities struct {
+ Hashtags []Hashtag `json:"hashtags"`
+ Urls []*string `json:"urls"`
+ UserMentions []*string `json:"user_mentions"`
+}
+
+type UserEntityDescription struct {
+ Urls []*string `json:"urls"`
+}
+
+type URL struct {
+ ExpandedURL *string `json:"expanded_url"`
+ Indices []int `json:"indices"`
+ URL string `json:"url"`
+}
+
+type UserEntityURL struct {
+ Urls []URL `json:"urls"`
+}
+
+type UserEntities struct {
+ Description UserEntityDescription `json:"description"`
+ URL UserEntityURL `json:"url"`
+}
+
+type User struct {
+ ContributorsEnabled bool `json:"contributors_enabled"`
+ CreatedAt string `json:"created_at"`
+ DefaultProfile bool `json:"default_profile"`
+ DefaultProfileImage bool `json:"default_profile_image"`
+ Description string `json:"description"`
+ Entities UserEntities `json:"entities"`
+ FavouritesCount int `json:"favourites_count"`
+ FollowRequestSent *string `json:"follow_request_sent"`
+ FollowersCount int `json:"followers_count"`
+ Following *string `json:"following"`
+ FriendsCount int `json:"friends_count"`
+ GeoEnabled bool `json:"geo_enabled"`
+ ID int `json:"id"`
+ IDStr string `json:"id_str"`
+ IsTranslator bool `json:"is_translator"`
+ Lang string `json:"lang"`
+ ListedCount int `json:"listed_count"`
+ Location string `json:"location"`
+ Name string `json:"name"`
+ Notifications *string `json:"notifications"`
+ ProfileBackgroundColor string `json:"profile_background_color"`
+ ProfileBackgroundImageURL string `json:"profile_background_image_url"`
+ ProfileBackgroundImageURLHTTPS string `json:"profile_background_image_url_https"`
+ ProfileBackgroundTile bool `json:"profile_background_tile"`
+ ProfileImageURL string `json:"profile_image_url"`
+ ProfileImageURLHTTPS string `json:"profile_image_url_https"`
+ ProfileLinkColor string `json:"profile_link_color"`
+ ProfileSidebarBorderColor string `json:"profile_sidebar_border_color"`
+ ProfileSidebarFillColor string `json:"profile_sidebar_fill_color"`
+ ProfileTextColor string `json:"profile_text_color"`
+ ProfileUseBackgroundImage bool `json:"profile_use_background_image"`
+ Protected bool `json:"protected"`
+ ScreenName string `json:"screen_name"`
+ ShowAllInlineMedia bool `json:"show_all_inline_media"`
+ StatusesCount int `json:"statuses_count"`
+ TimeZone string `json:"time_zone"`
+ URL *string `json:"url"`
+ UtcOffset int `json:"utc_offset"`
+ Verified bool `json:"verified"`
+}
+
+type StatusMetadata struct {
+ IsoLanguageCode string `json:"iso_language_code"`
+ ResultType string `json:"result_type"`
+}
+
+type Status struct {
+ Contributors *string `json:"contributors"`
+ Coordinates *string `json:"coordinates"`
+ CreatedAt string `json:"created_at"`
+ Entities Entities `json:"entities"`
+ Favorited bool `json:"favorited"`
+ Geo *string `json:"geo"`
+ ID int64 `json:"id"`
+ IDStr string `json:"id_str"`
+ InReplyToScreenName *string `json:"in_reply_to_screen_name"`
+ InReplyToStatusID *string `json:"in_reply_to_status_id"`
+ InReplyToStatusIDStr *string `json:"in_reply_to_status_id_str"`
+ InReplyToUserID *string `json:"in_reply_to_user_id"`
+ InReplyToUserIDStr *string `json:"in_reply_to_user_id_str"`
+ Metadata StatusMetadata `json:"metadata"`
+ Place *string `json:"place"`
+ RetweetCount int `json:"retweet_count"`
+ Retweeted bool `json:"retweeted"`
+ Source string `json:"source"`
+ Text string `json:"text"`
+ Truncated bool `json:"truncated"`
+ User User `json:"user"`
+}
+
+//easyjson:json
+type LargeStruct struct {
+ SearchMetadata SearchMetadata `json:"search_metadata"`
+ Statuses []Status `json:"statuses"`
+}
+
+//easyjson:json
+type XLStruct struct {
+ Data []LargeStruct
+}
diff --git a/vendor/github.com/mailru/easyjson/benchmark/data_codec.go b/vendor/github.com/mailru/easyjson/benchmark/data_codec.go
new file mode 100644
index 000000000..d2d83fac6
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/benchmark/data_codec.go
@@ -0,0 +1,6914 @@
+//+build use_codec
+//+build !easyjson_nounsafe
+//+build !appengine
+
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package benchmark
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "runtime"
+ "unsafe"
+
+ codec1978 "github.com/ugorji/go/codec"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF89225 = 1
+ codecSelferC_RAW9225 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray9225 = 10
+ codecSelferValueTypeMap9225 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey9225 = 2
+ codecSelfer_containerMapValue9225 = 3
+ codecSelfer_containerMapEnd9225 = 4
+ codecSelfer_containerArrayElem9225 = 6
+ codecSelfer_containerArrayEnd9225 = 7
+)
+
+var (
+ codecSelferBitsize9225 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr9225 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelferUnsafeString9225 struct {
+ Data uintptr
+ Len int
+}
+
+type codecSelfer9225 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 unsafe.Pointer
+ _ = v0
+ }
+}
+
+func (x *SearchMetadata) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [9]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(9)
+ } else {
+ yynn2 = 9
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeFloat64(float64(x.CompletedIn))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("completed_in"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeFloat64(float64(x.CompletedIn))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Count))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("count"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeInt(int64(x.Count))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MaxID))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("max_id"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeInt(int64(x.MaxID))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.MaxIDStr))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("max_id_str"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.MaxIDStr))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.NextResults))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("next_results"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.NextResults))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym19 := z.EncBinary()
+ _ = yym19
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.Query))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("query"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym20 := z.EncBinary()
+ _ = yym20
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.Query))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.RefreshURL))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("refresh_url"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.RefreshURL))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeInt(int64(x.SinceID))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("since_id"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ r.EncodeInt(int64(x.SinceID))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym28 := z.EncBinary()
+ _ = yym28
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.SinceIDStr))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("since_id_str"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym29 := z.EncBinary()
+ _ = yym29
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.SinceIDStr))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd9225)
+ }
+ }
+ }
+}
+
+func (x *SearchMetadata) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap9225 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray9225 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225)
+ }
+ }
+}
+
+func (x *SearchMetadata) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey9225)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)}
+ yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr))
+ z.DecSendContainerState(codecSelfer_containerMapValue9225)
+ switch yys3 {
+ case "completed_in":
+ if r.TryDecodeAsNil() {
+ x.CompletedIn = 0
+ } else {
+ yyv4 := &x.CompletedIn
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *((*float64)(yyv4)) = float64(r.DecodeFloat(false))
+ }
+ }
+ case "count":
+ if r.TryDecodeAsNil() {
+ x.Count = 0
+ } else {
+ yyv6 := &x.Count
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ *((*int)(yyv6)) = int(r.DecodeInt(codecSelferBitsize9225))
+ }
+ }
+ case "max_id":
+ if r.TryDecodeAsNil() {
+ x.MaxID = 0
+ } else {
+ yyv8 := &x.MaxID
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ *((*int)(yyv8)) = int(r.DecodeInt(codecSelferBitsize9225))
+ }
+ }
+ case "max_id_str":
+ if r.TryDecodeAsNil() {
+ x.MaxIDStr = ""
+ } else {
+ yyv10 := &x.MaxIDStr
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else {
+ *((*string)(yyv10)) = r.DecodeString()
+ }
+ }
+ case "next_results":
+ if r.TryDecodeAsNil() {
+ x.NextResults = ""
+ } else {
+ yyv12 := &x.NextResults
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ *((*string)(yyv12)) = r.DecodeString()
+ }
+ }
+ case "query":
+ if r.TryDecodeAsNil() {
+ x.Query = ""
+ } else {
+ yyv14 := &x.Query
+ yym15 := z.DecBinary()
+ _ = yym15
+ if false {
+ } else {
+ *((*string)(yyv14)) = r.DecodeString()
+ }
+ }
+ case "refresh_url":
+ if r.TryDecodeAsNil() {
+ x.RefreshURL = ""
+ } else {
+ yyv16 := &x.RefreshURL
+ yym17 := z.DecBinary()
+ _ = yym17
+ if false {
+ } else {
+ *((*string)(yyv16)) = r.DecodeString()
+ }
+ }
+ case "since_id":
+ if r.TryDecodeAsNil() {
+ x.SinceID = 0
+ } else {
+ yyv18 := &x.SinceID
+ yym19 := z.DecBinary()
+ _ = yym19
+ if false {
+ } else {
+ *((*int)(yyv18)) = int(r.DecodeInt(codecSelferBitsize9225))
+ }
+ }
+ case "since_id_str":
+ if r.TryDecodeAsNil() {
+ x.SinceIDStr = ""
+ } else {
+ yyv20 := &x.SinceIDStr
+ yym21 := z.DecBinary()
+ _ = yym21
+ if false {
+ } else {
+ *((*string)(yyv20)) = r.DecodeString()
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+}
+
+func (x *SearchMetadata) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj22 int
+ var yyb22 bool
+ var yyhl22 bool = l >= 0
+ yyj22++
+ if yyhl22 {
+ yyb22 = yyj22 > l
+ } else {
+ yyb22 = r.CheckBreak()
+ }
+ if yyb22 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.CompletedIn = 0
+ } else {
+ yyv23 := &x.CompletedIn
+ yym24 := z.DecBinary()
+ _ = yym24
+ if false {
+ } else {
+ *((*float64)(yyv23)) = float64(r.DecodeFloat(false))
+ }
+ }
+ yyj22++
+ if yyhl22 {
+ yyb22 = yyj22 > l
+ } else {
+ yyb22 = r.CheckBreak()
+ }
+ if yyb22 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Count = 0
+ } else {
+ yyv25 := &x.Count
+ yym26 := z.DecBinary()
+ _ = yym26
+ if false {
+ } else {
+ *((*int)(yyv25)) = int(r.DecodeInt(codecSelferBitsize9225))
+ }
+ }
+ yyj22++
+ if yyhl22 {
+ yyb22 = yyj22 > l
+ } else {
+ yyb22 = r.CheckBreak()
+ }
+ if yyb22 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.MaxID = 0
+ } else {
+ yyv27 := &x.MaxID
+ yym28 := z.DecBinary()
+ _ = yym28
+ if false {
+ } else {
+ *((*int)(yyv27)) = int(r.DecodeInt(codecSelferBitsize9225))
+ }
+ }
+ yyj22++
+ if yyhl22 {
+ yyb22 = yyj22 > l
+ } else {
+ yyb22 = r.CheckBreak()
+ }
+ if yyb22 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.MaxIDStr = ""
+ } else {
+ yyv29 := &x.MaxIDStr
+ yym30 := z.DecBinary()
+ _ = yym30
+ if false {
+ } else {
+ *((*string)(yyv29)) = r.DecodeString()
+ }
+ }
+ yyj22++
+ if yyhl22 {
+ yyb22 = yyj22 > l
+ } else {
+ yyb22 = r.CheckBreak()
+ }
+ if yyb22 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.NextResults = ""
+ } else {
+ yyv31 := &x.NextResults
+ yym32 := z.DecBinary()
+ _ = yym32
+ if false {
+ } else {
+ *((*string)(yyv31)) = r.DecodeString()
+ }
+ }
+ yyj22++
+ if yyhl22 {
+ yyb22 = yyj22 > l
+ } else {
+ yyb22 = r.CheckBreak()
+ }
+ if yyb22 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Query = ""
+ } else {
+ yyv33 := &x.Query
+ yym34 := z.DecBinary()
+ _ = yym34
+ if false {
+ } else {
+ *((*string)(yyv33)) = r.DecodeString()
+ }
+ }
+ yyj22++
+ if yyhl22 {
+ yyb22 = yyj22 > l
+ } else {
+ yyb22 = r.CheckBreak()
+ }
+ if yyb22 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.RefreshURL = ""
+ } else {
+ yyv35 := &x.RefreshURL
+ yym36 := z.DecBinary()
+ _ = yym36
+ if false {
+ } else {
+ *((*string)(yyv35)) = r.DecodeString()
+ }
+ }
+ yyj22++
+ if yyhl22 {
+ yyb22 = yyj22 > l
+ } else {
+ yyb22 = r.CheckBreak()
+ }
+ if yyb22 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.SinceID = 0
+ } else {
+ yyv37 := &x.SinceID
+ yym38 := z.DecBinary()
+ _ = yym38
+ if false {
+ } else {
+ *((*int)(yyv37)) = int(r.DecodeInt(codecSelferBitsize9225))
+ }
+ }
+ yyj22++
+ if yyhl22 {
+ yyb22 = yyj22 > l
+ } else {
+ yyb22 = r.CheckBreak()
+ }
+ if yyb22 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.SinceIDStr = ""
+ } else {
+ yyv39 := &x.SinceIDStr
+ yym40 := z.DecBinary()
+ _ = yym40
+ if false {
+ } else {
+ *((*string)(yyv39)) = r.DecodeString()
+ }
+ }
+ for {
+ yyj22++
+ if yyhl22 {
+ yyb22 = yyj22 > l
+ } else {
+ yyb22 = r.CheckBreak()
+ }
+ if yyb22 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ z.DecStructFieldNotFound(yyj22-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+}
+
+func (x *Hashtag) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.Indices == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ z.F.EncSliceIntV(x.Indices, false, e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("indices"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.Indices == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.EncSliceIntV(x.Indices, false, e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.Text))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("text"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.Text))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd9225)
+ }
+ }
+ }
+}
+
+func (x *Hashtag) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap9225 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray9225 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225)
+ }
+ }
+}
+
+func (x *Hashtag) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey9225)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)}
+ yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr))
+ z.DecSendContainerState(codecSelfer_containerMapValue9225)
+ switch yys3 {
+ case "indices":
+ if r.TryDecodeAsNil() {
+ x.Indices = nil
+ } else {
+ yyv4 := &x.Indices
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ z.F.DecSliceIntX(yyv4, false, d)
+ }
+ }
+ case "text":
+ if r.TryDecodeAsNil() {
+ x.Text = ""
+ } else {
+ yyv6 := &x.Text
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ *((*string)(yyv6)) = r.DecodeString()
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+}
+
+func (x *Hashtag) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Indices = nil
+ } else {
+ yyv9 := &x.Indices
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ z.F.DecSliceIntX(yyv9, false, d)
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Text = ""
+ } else {
+ yyv11 := &x.Text
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ *((*string)(yyv11)) = r.DecodeString()
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+}
+
+func (x *Entities) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 3
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.Hashtags == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceHashtag(([]Hashtag)(x.Hashtags), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("hashtags"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.Hashtags == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceHashtag(([]Hashtag)(x.Hashtags), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.Urls == nil {
+ r.EncodeNil()
+ } else {
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.encSlicePtrtostring(([]*string)(x.Urls), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("urls"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.Urls == nil {
+ r.EncodeNil()
+ } else {
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.encSlicePtrtostring(([]*string)(x.Urls), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.UserMentions == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSlicePtrtostring(([]*string)(x.UserMentions), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("user_mentions"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.UserMentions == nil {
+ r.EncodeNil()
+ } else {
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ h.encSlicePtrtostring(([]*string)(x.UserMentions), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd9225)
+ }
+ }
+ }
+}
+
+func (x *Entities) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap9225 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray9225 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225)
+ }
+ }
+}
+
+func (x *Entities) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey9225)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)}
+ yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr))
+ z.DecSendContainerState(codecSelfer_containerMapValue9225)
+ switch yys3 {
+ case "hashtags":
+ if r.TryDecodeAsNil() {
+ x.Hashtags = nil
+ } else {
+ yyv4 := &x.Hashtags
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceHashtag((*[]Hashtag)(yyv4), d)
+ }
+ }
+ case "urls":
+ if r.TryDecodeAsNil() {
+ x.Urls = nil
+ } else {
+ yyv6 := &x.Urls
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ h.decSlicePtrtostring((*[]*string)(yyv6), d)
+ }
+ }
+ case "user_mentions":
+ if r.TryDecodeAsNil() {
+ x.UserMentions = nil
+ } else {
+ yyv8 := &x.UserMentions
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.decSlicePtrtostring((*[]*string)(yyv8), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+}
+
+func (x *Entities) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Hashtags = nil
+ } else {
+ yyv11 := &x.Hashtags
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ h.decSliceHashtag((*[]Hashtag)(yyv11), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Urls = nil
+ } else {
+ yyv13 := &x.Urls
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ h.decSlicePtrtostring((*[]*string)(yyv13), d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.UserMentions = nil
+ } else {
+ yyv15 := &x.UserMentions
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ h.decSlicePtrtostring((*[]*string)(yyv15), d)
+ }
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+}
+
+func (x *UserEntityDescription) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.Urls == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSlicePtrtostring(([]*string)(x.Urls), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("urls"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.Urls == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSlicePtrtostring(([]*string)(x.Urls), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd9225)
+ }
+ }
+ }
+}
+
+func (x *UserEntityDescription) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap9225 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray9225 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225)
+ }
+ }
+}
+
+func (x *UserEntityDescription) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey9225)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)}
+ yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr))
+ z.DecSendContainerState(codecSelfer_containerMapValue9225)
+ switch yys3 {
+ case "urls":
+ if r.TryDecodeAsNil() {
+ x.Urls = nil
+ } else {
+ yyv4 := &x.Urls
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSlicePtrtostring((*[]*string)(yyv4), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+}
+
+func (x *UserEntityDescription) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Urls = nil
+ } else {
+ yyv7 := &x.Urls
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSlicePtrtostring((*[]*string)(yyv7), d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+}
+
+func (x *URL) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [3]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn2 = 3
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.ExpandedURL == nil {
+ r.EncodeNil()
+ } else {
+ yy4 := *x.ExpandedURL
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy4))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("expanded_url"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.ExpandedURL == nil {
+ r.EncodeNil()
+ } else {
+ yy6 := *x.ExpandedURL
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy6))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.Indices == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ z.F.EncSliceIntV(x.Indices, false, e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("indices"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.Indices == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ z.F.EncSliceIntV(x.Indices, false, e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.URL))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("url"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.URL))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd9225)
+ }
+ }
+ }
+}
+
+func (x *URL) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap9225 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray9225 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225)
+ }
+ }
+}
+
+func (x *URL) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey9225)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)}
+ yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr))
+ z.DecSendContainerState(codecSelfer_containerMapValue9225)
+ switch yys3 {
+ case "expanded_url":
+ if r.TryDecodeAsNil() {
+ if x.ExpandedURL != nil {
+ x.ExpandedURL = nil
+ }
+ } else {
+ if x.ExpandedURL == nil {
+ x.ExpandedURL = new(string)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *((*string)(x.ExpandedURL)) = r.DecodeString()
+ }
+ }
+ case "indices":
+ if r.TryDecodeAsNil() {
+ x.Indices = nil
+ } else {
+ yyv6 := &x.Indices
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ z.F.DecSliceIntX(yyv6, false, d)
+ }
+ }
+ case "url":
+ if r.TryDecodeAsNil() {
+ x.URL = ""
+ } else {
+ yyv8 := &x.URL
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ *((*string)(yyv8)) = r.DecodeString()
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+}
+
+func (x *URL) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj10 int
+ var yyb10 bool
+ var yyhl10 bool = l >= 0
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ if x.ExpandedURL != nil {
+ x.ExpandedURL = nil
+ }
+ } else {
+ if x.ExpandedURL == nil {
+ x.ExpandedURL = new(string)
+ }
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ *((*string)(x.ExpandedURL)) = r.DecodeString()
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Indices = nil
+ } else {
+ yyv13 := &x.Indices
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ z.F.DecSliceIntX(yyv13, false, d)
+ }
+ }
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.URL = ""
+ } else {
+ yyv15 := &x.URL
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ *((*string)(yyv15)) = r.DecodeString()
+ }
+ }
+ for {
+ yyj10++
+ if yyhl10 {
+ yyb10 = yyj10 > l
+ } else {
+ yyb10 = r.CheckBreak()
+ }
+ if yyb10 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ z.DecStructFieldNotFound(yyj10-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+}
+
+func (x *UserEntityURL) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.Urls == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceURL(([]URL)(x.Urls), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("urls"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.Urls == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceURL(([]URL)(x.Urls), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd9225)
+ }
+ }
+ }
+}
+
+func (x *UserEntityURL) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap9225 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray9225 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225)
+ }
+ }
+}
+
+func (x *UserEntityURL) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey9225)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)}
+ yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr))
+ z.DecSendContainerState(codecSelfer_containerMapValue9225)
+ switch yys3 {
+ case "urls":
+ if r.TryDecodeAsNil() {
+ x.Urls = nil
+ } else {
+ yyv4 := &x.Urls
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceURL((*[]URL)(yyv4), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+}
+
+func (x *UserEntityURL) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Urls = nil
+ } else {
+ yyv7 := &x.Urls
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceURL((*[]URL)(yyv7), d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+}
+
+func (x *UserEntities) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yy4 := &x.Description
+ yy4.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("description"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yy6 := &x.Description
+ yy6.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yy9 := &x.URL
+ yy9.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("url"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yy11 := &x.URL
+ yy11.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd9225)
+ }
+ }
+ }
+}
+
+func (x *UserEntities) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap9225 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray9225 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225)
+ }
+ }
+}
+
+func (x *UserEntities) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey9225)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)}
+ yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr))
+ z.DecSendContainerState(codecSelfer_containerMapValue9225)
+ switch yys3 {
+ case "description":
+ if r.TryDecodeAsNil() {
+ x.Description = UserEntityDescription{}
+ } else {
+ yyv4 := &x.Description
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "url":
+ if r.TryDecodeAsNil() {
+ x.URL = UserEntityURL{}
+ } else {
+ yyv5 := &x.URL
+ yyv5.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+}
+
+func (x *UserEntities) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Description = UserEntityDescription{}
+ } else {
+ yyv7 := &x.Description
+ yyv7.CodecDecodeSelf(d)
+ }
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.URL = UserEntityURL{}
+ } else {
+ yyv8 := &x.URL
+ yyv8.CodecDecodeSelf(d)
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+}
+
+func (x *User) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [39]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(39)
+ } else {
+ yynn2 = 39
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ContributorsEnabled))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("contributors_enabled"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ContributorsEnabled))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.CreatedAt))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("created_at"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.CreatedAt))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeBool(bool(x.DefaultProfile))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("default_profile"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym11 := z.EncBinary()
+ _ = yym11
+ if false {
+ } else {
+ r.EncodeBool(bool(x.DefaultProfile))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else {
+ r.EncodeBool(bool(x.DefaultProfileImage))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("default_profile_image"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeBool(bool(x.DefaultProfileImage))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym16 := z.EncBinary()
+ _ = yym16
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.Description))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("description"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym17 := z.EncBinary()
+ _ = yym17
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.Description))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yy19 := &x.Entities
+ yy19.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("entities"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yy21 := &x.Entities
+ yy21.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym24 := z.EncBinary()
+ _ = yym24
+ if false {
+ } else {
+ r.EncodeInt(int64(x.FavouritesCount))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("favourites_count"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym25 := z.EncBinary()
+ _ = yym25
+ if false {
+ } else {
+ r.EncodeInt(int64(x.FavouritesCount))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.FollowRequestSent == nil {
+ r.EncodeNil()
+ } else {
+ yy27 := *x.FollowRequestSent
+ yym28 := z.EncBinary()
+ _ = yym28
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy27))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("follow_request_sent"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.FollowRequestSent == nil {
+ r.EncodeNil()
+ } else {
+ yy29 := *x.FollowRequestSent
+ yym30 := z.EncBinary()
+ _ = yym30
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy29))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym32 := z.EncBinary()
+ _ = yym32
+ if false {
+ } else {
+ r.EncodeInt(int64(x.FollowersCount))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("followers_count"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym33 := z.EncBinary()
+ _ = yym33
+ if false {
+ } else {
+ r.EncodeInt(int64(x.FollowersCount))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.Following == nil {
+ r.EncodeNil()
+ } else {
+ yy35 := *x.Following
+ yym36 := z.EncBinary()
+ _ = yym36
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy35))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("following"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.Following == nil {
+ r.EncodeNil()
+ } else {
+ yy37 := *x.Following
+ yym38 := z.EncBinary()
+ _ = yym38
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy37))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym40 := z.EncBinary()
+ _ = yym40
+ if false {
+ } else {
+ r.EncodeInt(int64(x.FriendsCount))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("friends_count"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym41 := z.EncBinary()
+ _ = yym41
+ if false {
+ } else {
+ r.EncodeInt(int64(x.FriendsCount))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym43 := z.EncBinary()
+ _ = yym43
+ if false {
+ } else {
+ r.EncodeBool(bool(x.GeoEnabled))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("geo_enabled"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym44 := z.EncBinary()
+ _ = yym44
+ if false {
+ } else {
+ r.EncodeBool(bool(x.GeoEnabled))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym46 := z.EncBinary()
+ _ = yym46
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ID))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("id"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym47 := z.EncBinary()
+ _ = yym47
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ID))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym49 := z.EncBinary()
+ _ = yym49
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.IDStr))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("id_str"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym50 := z.EncBinary()
+ _ = yym50
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.IDStr))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym52 := z.EncBinary()
+ _ = yym52
+ if false {
+ } else {
+ r.EncodeBool(bool(x.IsTranslator))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("is_translator"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym53 := z.EncBinary()
+ _ = yym53
+ if false {
+ } else {
+ r.EncodeBool(bool(x.IsTranslator))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym55 := z.EncBinary()
+ _ = yym55
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.Lang))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("lang"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym56 := z.EncBinary()
+ _ = yym56
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.Lang))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym58 := z.EncBinary()
+ _ = yym58
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ListedCount))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("listed_count"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym59 := z.EncBinary()
+ _ = yym59
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ListedCount))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym61 := z.EncBinary()
+ _ = yym61
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.Location))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("location"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym62 := z.EncBinary()
+ _ = yym62
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.Location))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym64 := z.EncBinary()
+ _ = yym64
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.Name))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym65 := z.EncBinary()
+ _ = yym65
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.Name))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.Notifications == nil {
+ r.EncodeNil()
+ } else {
+ yy67 := *x.Notifications
+ yym68 := z.EncBinary()
+ _ = yym68
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy67))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("notifications"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.Notifications == nil {
+ r.EncodeNil()
+ } else {
+ yy69 := *x.Notifications
+ yym70 := z.EncBinary()
+ _ = yym70
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy69))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym72 := z.EncBinary()
+ _ = yym72
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.ProfileBackgroundColor))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("profile_background_color"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym73 := z.EncBinary()
+ _ = yym73
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.ProfileBackgroundColor))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym75 := z.EncBinary()
+ _ = yym75
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.ProfileBackgroundImageURL))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("profile_background_image_url"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym76 := z.EncBinary()
+ _ = yym76
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.ProfileBackgroundImageURL))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym78 := z.EncBinary()
+ _ = yym78
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.ProfileBackgroundImageURLHTTPS))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("profile_background_image_url_https"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym79 := z.EncBinary()
+ _ = yym79
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.ProfileBackgroundImageURLHTTPS))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym81 := z.EncBinary()
+ _ = yym81
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ProfileBackgroundTile))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("profile_background_tile"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym82 := z.EncBinary()
+ _ = yym82
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ProfileBackgroundTile))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym84 := z.EncBinary()
+ _ = yym84
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.ProfileImageURL))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("profile_image_url"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym85 := z.EncBinary()
+ _ = yym85
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.ProfileImageURL))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym87 := z.EncBinary()
+ _ = yym87
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.ProfileImageURLHTTPS))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("profile_image_url_https"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym88 := z.EncBinary()
+ _ = yym88
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.ProfileImageURLHTTPS))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym90 := z.EncBinary()
+ _ = yym90
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.ProfileLinkColor))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("profile_link_color"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym91 := z.EncBinary()
+ _ = yym91
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.ProfileLinkColor))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym93 := z.EncBinary()
+ _ = yym93
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.ProfileSidebarBorderColor))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("profile_sidebar_border_color"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym94 := z.EncBinary()
+ _ = yym94
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.ProfileSidebarBorderColor))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym96 := z.EncBinary()
+ _ = yym96
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.ProfileSidebarFillColor))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("profile_sidebar_fill_color"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym97 := z.EncBinary()
+ _ = yym97
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.ProfileSidebarFillColor))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym99 := z.EncBinary()
+ _ = yym99
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.ProfileTextColor))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("profile_text_color"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym100 := z.EncBinary()
+ _ = yym100
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.ProfileTextColor))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym102 := z.EncBinary()
+ _ = yym102
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ProfileUseBackgroundImage))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("profile_use_background_image"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym103 := z.EncBinary()
+ _ = yym103
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ProfileUseBackgroundImage))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym105 := z.EncBinary()
+ _ = yym105
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Protected))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("protected"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym106 := z.EncBinary()
+ _ = yym106
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Protected))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym108 := z.EncBinary()
+ _ = yym108
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.ScreenName))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("screen_name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym109 := z.EncBinary()
+ _ = yym109
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.ScreenName))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym111 := z.EncBinary()
+ _ = yym111
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ShowAllInlineMedia))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("show_all_inline_media"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym112 := z.EncBinary()
+ _ = yym112
+ if false {
+ } else {
+ r.EncodeBool(bool(x.ShowAllInlineMedia))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym114 := z.EncBinary()
+ _ = yym114
+ if false {
+ } else {
+ r.EncodeInt(int64(x.StatusesCount))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("statuses_count"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym115 := z.EncBinary()
+ _ = yym115
+ if false {
+ } else {
+ r.EncodeInt(int64(x.StatusesCount))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym117 := z.EncBinary()
+ _ = yym117
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.TimeZone))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("time_zone"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym118 := z.EncBinary()
+ _ = yym118
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.TimeZone))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.URL == nil {
+ r.EncodeNil()
+ } else {
+ yy120 := *x.URL
+ yym121 := z.EncBinary()
+ _ = yym121
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy120))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("url"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.URL == nil {
+ r.EncodeNil()
+ } else {
+ yy122 := *x.URL
+ yym123 := z.EncBinary()
+ _ = yym123
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy122))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym125 := z.EncBinary()
+ _ = yym125
+ if false {
+ } else {
+ r.EncodeInt(int64(x.UtcOffset))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("utc_offset"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym126 := z.EncBinary()
+ _ = yym126
+ if false {
+ } else {
+ r.EncodeInt(int64(x.UtcOffset))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym128 := z.EncBinary()
+ _ = yym128
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Verified))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("verified"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym129 := z.EncBinary()
+ _ = yym129
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Verified))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd9225)
+ }
+ }
+ }
+}
+
+func (x *User) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap9225 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray9225 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225)
+ }
+ }
+}
+
+func (x *User) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey9225)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)}
+ yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr))
+ z.DecSendContainerState(codecSelfer_containerMapValue9225)
+ switch yys3 {
+ case "contributors_enabled":
+ if r.TryDecodeAsNil() {
+ x.ContributorsEnabled = false
+ } else {
+ yyv4 := &x.ContributorsEnabled
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *((*bool)(yyv4)) = r.DecodeBool()
+ }
+ }
+ case "created_at":
+ if r.TryDecodeAsNil() {
+ x.CreatedAt = ""
+ } else {
+ yyv6 := &x.CreatedAt
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ *((*string)(yyv6)) = r.DecodeString()
+ }
+ }
+ case "default_profile":
+ if r.TryDecodeAsNil() {
+ x.DefaultProfile = false
+ } else {
+ yyv8 := &x.DefaultProfile
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ *((*bool)(yyv8)) = r.DecodeBool()
+ }
+ }
+ case "default_profile_image":
+ if r.TryDecodeAsNil() {
+ x.DefaultProfileImage = false
+ } else {
+ yyv10 := &x.DefaultProfileImage
+ yym11 := z.DecBinary()
+ _ = yym11
+ if false {
+ } else {
+ *((*bool)(yyv10)) = r.DecodeBool()
+ }
+ }
+ case "description":
+ if r.TryDecodeAsNil() {
+ x.Description = ""
+ } else {
+ yyv12 := &x.Description
+ yym13 := z.DecBinary()
+ _ = yym13
+ if false {
+ } else {
+ *((*string)(yyv12)) = r.DecodeString()
+ }
+ }
+ case "entities":
+ if r.TryDecodeAsNil() {
+ x.Entities = UserEntities{}
+ } else {
+ yyv14 := &x.Entities
+ yyv14.CodecDecodeSelf(d)
+ }
+ case "favourites_count":
+ if r.TryDecodeAsNil() {
+ x.FavouritesCount = 0
+ } else {
+ yyv15 := &x.FavouritesCount
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ *((*int)(yyv15)) = int(r.DecodeInt(codecSelferBitsize9225))
+ }
+ }
+ case "follow_request_sent":
+ if r.TryDecodeAsNil() {
+ if x.FollowRequestSent != nil {
+ x.FollowRequestSent = nil
+ }
+ } else {
+ if x.FollowRequestSent == nil {
+ x.FollowRequestSent = new(string)
+ }
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else {
+ *((*string)(x.FollowRequestSent)) = r.DecodeString()
+ }
+ }
+ case "followers_count":
+ if r.TryDecodeAsNil() {
+ x.FollowersCount = 0
+ } else {
+ yyv19 := &x.FollowersCount
+ yym20 := z.DecBinary()
+ _ = yym20
+ if false {
+ } else {
+ *((*int)(yyv19)) = int(r.DecodeInt(codecSelferBitsize9225))
+ }
+ }
+ case "following":
+ if r.TryDecodeAsNil() {
+ if x.Following != nil {
+ x.Following = nil
+ }
+ } else {
+ if x.Following == nil {
+ x.Following = new(string)
+ }
+ yym22 := z.DecBinary()
+ _ = yym22
+ if false {
+ } else {
+ *((*string)(x.Following)) = r.DecodeString()
+ }
+ }
+ case "friends_count":
+ if r.TryDecodeAsNil() {
+ x.FriendsCount = 0
+ } else {
+ yyv23 := &x.FriendsCount
+ yym24 := z.DecBinary()
+ _ = yym24
+ if false {
+ } else {
+ *((*int)(yyv23)) = int(r.DecodeInt(codecSelferBitsize9225))
+ }
+ }
+ case "geo_enabled":
+ if r.TryDecodeAsNil() {
+ x.GeoEnabled = false
+ } else {
+ yyv25 := &x.GeoEnabled
+ yym26 := z.DecBinary()
+ _ = yym26
+ if false {
+ } else {
+ *((*bool)(yyv25)) = r.DecodeBool()
+ }
+ }
+ case "id":
+ if r.TryDecodeAsNil() {
+ x.ID = 0
+ } else {
+ yyv27 := &x.ID
+ yym28 := z.DecBinary()
+ _ = yym28
+ if false {
+ } else {
+ *((*int)(yyv27)) = int(r.DecodeInt(codecSelferBitsize9225))
+ }
+ }
+ case "id_str":
+ if r.TryDecodeAsNil() {
+ x.IDStr = ""
+ } else {
+ yyv29 := &x.IDStr
+ yym30 := z.DecBinary()
+ _ = yym30
+ if false {
+ } else {
+ *((*string)(yyv29)) = r.DecodeString()
+ }
+ }
+ case "is_translator":
+ if r.TryDecodeAsNil() {
+ x.IsTranslator = false
+ } else {
+ yyv31 := &x.IsTranslator
+ yym32 := z.DecBinary()
+ _ = yym32
+ if false {
+ } else {
+ *((*bool)(yyv31)) = r.DecodeBool()
+ }
+ }
+ case "lang":
+ if r.TryDecodeAsNil() {
+ x.Lang = ""
+ } else {
+ yyv33 := &x.Lang
+ yym34 := z.DecBinary()
+ _ = yym34
+ if false {
+ } else {
+ *((*string)(yyv33)) = r.DecodeString()
+ }
+ }
+ case "listed_count":
+ if r.TryDecodeAsNil() {
+ x.ListedCount = 0
+ } else {
+ yyv35 := &x.ListedCount
+ yym36 := z.DecBinary()
+ _ = yym36
+ if false {
+ } else {
+ *((*int)(yyv35)) = int(r.DecodeInt(codecSelferBitsize9225))
+ }
+ }
+ case "location":
+ if r.TryDecodeAsNil() {
+ x.Location = ""
+ } else {
+ yyv37 := &x.Location
+ yym38 := z.DecBinary()
+ _ = yym38
+ if false {
+ } else {
+ *((*string)(yyv37)) = r.DecodeString()
+ }
+ }
+ case "name":
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ yyv39 := &x.Name
+ yym40 := z.DecBinary()
+ _ = yym40
+ if false {
+ } else {
+ *((*string)(yyv39)) = r.DecodeString()
+ }
+ }
+ case "notifications":
+ if r.TryDecodeAsNil() {
+ if x.Notifications != nil {
+ x.Notifications = nil
+ }
+ } else {
+ if x.Notifications == nil {
+ x.Notifications = new(string)
+ }
+ yym42 := z.DecBinary()
+ _ = yym42
+ if false {
+ } else {
+ *((*string)(x.Notifications)) = r.DecodeString()
+ }
+ }
+ case "profile_background_color":
+ if r.TryDecodeAsNil() {
+ x.ProfileBackgroundColor = ""
+ } else {
+ yyv43 := &x.ProfileBackgroundColor
+ yym44 := z.DecBinary()
+ _ = yym44
+ if false {
+ } else {
+ *((*string)(yyv43)) = r.DecodeString()
+ }
+ }
+ case "profile_background_image_url":
+ if r.TryDecodeAsNil() {
+ x.ProfileBackgroundImageURL = ""
+ } else {
+ yyv45 := &x.ProfileBackgroundImageURL
+ yym46 := z.DecBinary()
+ _ = yym46
+ if false {
+ } else {
+ *((*string)(yyv45)) = r.DecodeString()
+ }
+ }
+ case "profile_background_image_url_https":
+ if r.TryDecodeAsNil() {
+ x.ProfileBackgroundImageURLHTTPS = ""
+ } else {
+ yyv47 := &x.ProfileBackgroundImageURLHTTPS
+ yym48 := z.DecBinary()
+ _ = yym48
+ if false {
+ } else {
+ *((*string)(yyv47)) = r.DecodeString()
+ }
+ }
+ case "profile_background_tile":
+ if r.TryDecodeAsNil() {
+ x.ProfileBackgroundTile = false
+ } else {
+ yyv49 := &x.ProfileBackgroundTile
+ yym50 := z.DecBinary()
+ _ = yym50
+ if false {
+ } else {
+ *((*bool)(yyv49)) = r.DecodeBool()
+ }
+ }
+ case "profile_image_url":
+ if r.TryDecodeAsNil() {
+ x.ProfileImageURL = ""
+ } else {
+ yyv51 := &x.ProfileImageURL
+ yym52 := z.DecBinary()
+ _ = yym52
+ if false {
+ } else {
+ *((*string)(yyv51)) = r.DecodeString()
+ }
+ }
+ case "profile_image_url_https":
+ if r.TryDecodeAsNil() {
+ x.ProfileImageURLHTTPS = ""
+ } else {
+ yyv53 := &x.ProfileImageURLHTTPS
+ yym54 := z.DecBinary()
+ _ = yym54
+ if false {
+ } else {
+ *((*string)(yyv53)) = r.DecodeString()
+ }
+ }
+ case "profile_link_color":
+ if r.TryDecodeAsNil() {
+ x.ProfileLinkColor = ""
+ } else {
+ yyv55 := &x.ProfileLinkColor
+ yym56 := z.DecBinary()
+ _ = yym56
+ if false {
+ } else {
+ *((*string)(yyv55)) = r.DecodeString()
+ }
+ }
+ case "profile_sidebar_border_color":
+ if r.TryDecodeAsNil() {
+ x.ProfileSidebarBorderColor = ""
+ } else {
+ yyv57 := &x.ProfileSidebarBorderColor
+ yym58 := z.DecBinary()
+ _ = yym58
+ if false {
+ } else {
+ *((*string)(yyv57)) = r.DecodeString()
+ }
+ }
+ case "profile_sidebar_fill_color":
+ if r.TryDecodeAsNil() {
+ x.ProfileSidebarFillColor = ""
+ } else {
+ yyv59 := &x.ProfileSidebarFillColor
+ yym60 := z.DecBinary()
+ _ = yym60
+ if false {
+ } else {
+ *((*string)(yyv59)) = r.DecodeString()
+ }
+ }
+ case "profile_text_color":
+ if r.TryDecodeAsNil() {
+ x.ProfileTextColor = ""
+ } else {
+ yyv61 := &x.ProfileTextColor
+ yym62 := z.DecBinary()
+ _ = yym62
+ if false {
+ } else {
+ *((*string)(yyv61)) = r.DecodeString()
+ }
+ }
+ case "profile_use_background_image":
+ if r.TryDecodeAsNil() {
+ x.ProfileUseBackgroundImage = false
+ } else {
+ yyv63 := &x.ProfileUseBackgroundImage
+ yym64 := z.DecBinary()
+ _ = yym64
+ if false {
+ } else {
+ *((*bool)(yyv63)) = r.DecodeBool()
+ }
+ }
+ case "protected":
+ if r.TryDecodeAsNil() {
+ x.Protected = false
+ } else {
+ yyv65 := &x.Protected
+ yym66 := z.DecBinary()
+ _ = yym66
+ if false {
+ } else {
+ *((*bool)(yyv65)) = r.DecodeBool()
+ }
+ }
+ case "screen_name":
+ if r.TryDecodeAsNil() {
+ x.ScreenName = ""
+ } else {
+ yyv67 := &x.ScreenName
+ yym68 := z.DecBinary()
+ _ = yym68
+ if false {
+ } else {
+ *((*string)(yyv67)) = r.DecodeString()
+ }
+ }
+ case "show_all_inline_media":
+ if r.TryDecodeAsNil() {
+ x.ShowAllInlineMedia = false
+ } else {
+ yyv69 := &x.ShowAllInlineMedia
+ yym70 := z.DecBinary()
+ _ = yym70
+ if false {
+ } else {
+ *((*bool)(yyv69)) = r.DecodeBool()
+ }
+ }
+ case "statuses_count":
+ if r.TryDecodeAsNil() {
+ x.StatusesCount = 0
+ } else {
+ yyv71 := &x.StatusesCount
+ yym72 := z.DecBinary()
+ _ = yym72
+ if false {
+ } else {
+ *((*int)(yyv71)) = int(r.DecodeInt(codecSelferBitsize9225))
+ }
+ }
+ case "time_zone":
+ if r.TryDecodeAsNil() {
+ x.TimeZone = ""
+ } else {
+ yyv73 := &x.TimeZone
+ yym74 := z.DecBinary()
+ _ = yym74
+ if false {
+ } else {
+ *((*string)(yyv73)) = r.DecodeString()
+ }
+ }
+ case "url":
+ if r.TryDecodeAsNil() {
+ if x.URL != nil {
+ x.URL = nil
+ }
+ } else {
+ if x.URL == nil {
+ x.URL = new(string)
+ }
+ yym76 := z.DecBinary()
+ _ = yym76
+ if false {
+ } else {
+ *((*string)(x.URL)) = r.DecodeString()
+ }
+ }
+ case "utc_offset":
+ if r.TryDecodeAsNil() {
+ x.UtcOffset = 0
+ } else {
+ yyv77 := &x.UtcOffset
+ yym78 := z.DecBinary()
+ _ = yym78
+ if false {
+ } else {
+ *((*int)(yyv77)) = int(r.DecodeInt(codecSelferBitsize9225))
+ }
+ }
+ case "verified":
+ if r.TryDecodeAsNil() {
+ x.Verified = false
+ } else {
+ yyv79 := &x.Verified
+ yym80 := z.DecBinary()
+ _ = yym80
+ if false {
+ } else {
+ *((*bool)(yyv79)) = r.DecodeBool()
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+}
+
+func (x *User) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj81 int
+ var yyb81 bool
+ var yyhl81 bool = l >= 0
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.ContributorsEnabled = false
+ } else {
+ yyv82 := &x.ContributorsEnabled
+ yym83 := z.DecBinary()
+ _ = yym83
+ if false {
+ } else {
+ *((*bool)(yyv82)) = r.DecodeBool()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.CreatedAt = ""
+ } else {
+ yyv84 := &x.CreatedAt
+ yym85 := z.DecBinary()
+ _ = yym85
+ if false {
+ } else {
+ *((*string)(yyv84)) = r.DecodeString()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.DefaultProfile = false
+ } else {
+ yyv86 := &x.DefaultProfile
+ yym87 := z.DecBinary()
+ _ = yym87
+ if false {
+ } else {
+ *((*bool)(yyv86)) = r.DecodeBool()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.DefaultProfileImage = false
+ } else {
+ yyv88 := &x.DefaultProfileImage
+ yym89 := z.DecBinary()
+ _ = yym89
+ if false {
+ } else {
+ *((*bool)(yyv88)) = r.DecodeBool()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Description = ""
+ } else {
+ yyv90 := &x.Description
+ yym91 := z.DecBinary()
+ _ = yym91
+ if false {
+ } else {
+ *((*string)(yyv90)) = r.DecodeString()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Entities = UserEntities{}
+ } else {
+ yyv92 := &x.Entities
+ yyv92.CodecDecodeSelf(d)
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.FavouritesCount = 0
+ } else {
+ yyv93 := &x.FavouritesCount
+ yym94 := z.DecBinary()
+ _ = yym94
+ if false {
+ } else {
+ *((*int)(yyv93)) = int(r.DecodeInt(codecSelferBitsize9225))
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ if x.FollowRequestSent != nil {
+ x.FollowRequestSent = nil
+ }
+ } else {
+ if x.FollowRequestSent == nil {
+ x.FollowRequestSent = new(string)
+ }
+ yym96 := z.DecBinary()
+ _ = yym96
+ if false {
+ } else {
+ *((*string)(x.FollowRequestSent)) = r.DecodeString()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.FollowersCount = 0
+ } else {
+ yyv97 := &x.FollowersCount
+ yym98 := z.DecBinary()
+ _ = yym98
+ if false {
+ } else {
+ *((*int)(yyv97)) = int(r.DecodeInt(codecSelferBitsize9225))
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ if x.Following != nil {
+ x.Following = nil
+ }
+ } else {
+ if x.Following == nil {
+ x.Following = new(string)
+ }
+ yym100 := z.DecBinary()
+ _ = yym100
+ if false {
+ } else {
+ *((*string)(x.Following)) = r.DecodeString()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.FriendsCount = 0
+ } else {
+ yyv101 := &x.FriendsCount
+ yym102 := z.DecBinary()
+ _ = yym102
+ if false {
+ } else {
+ *((*int)(yyv101)) = int(r.DecodeInt(codecSelferBitsize9225))
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.GeoEnabled = false
+ } else {
+ yyv103 := &x.GeoEnabled
+ yym104 := z.DecBinary()
+ _ = yym104
+ if false {
+ } else {
+ *((*bool)(yyv103)) = r.DecodeBool()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.ID = 0
+ } else {
+ yyv105 := &x.ID
+ yym106 := z.DecBinary()
+ _ = yym106
+ if false {
+ } else {
+ *((*int)(yyv105)) = int(r.DecodeInt(codecSelferBitsize9225))
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.IDStr = ""
+ } else {
+ yyv107 := &x.IDStr
+ yym108 := z.DecBinary()
+ _ = yym108
+ if false {
+ } else {
+ *((*string)(yyv107)) = r.DecodeString()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.IsTranslator = false
+ } else {
+ yyv109 := &x.IsTranslator
+ yym110 := z.DecBinary()
+ _ = yym110
+ if false {
+ } else {
+ *((*bool)(yyv109)) = r.DecodeBool()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Lang = ""
+ } else {
+ yyv111 := &x.Lang
+ yym112 := z.DecBinary()
+ _ = yym112
+ if false {
+ } else {
+ *((*string)(yyv111)) = r.DecodeString()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.ListedCount = 0
+ } else {
+ yyv113 := &x.ListedCount
+ yym114 := z.DecBinary()
+ _ = yym114
+ if false {
+ } else {
+ *((*int)(yyv113)) = int(r.DecodeInt(codecSelferBitsize9225))
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Location = ""
+ } else {
+ yyv115 := &x.Location
+ yym116 := z.DecBinary()
+ _ = yym116
+ if false {
+ } else {
+ *((*string)(yyv115)) = r.DecodeString()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Name = ""
+ } else {
+ yyv117 := &x.Name
+ yym118 := z.DecBinary()
+ _ = yym118
+ if false {
+ } else {
+ *((*string)(yyv117)) = r.DecodeString()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ if x.Notifications != nil {
+ x.Notifications = nil
+ }
+ } else {
+ if x.Notifications == nil {
+ x.Notifications = new(string)
+ }
+ yym120 := z.DecBinary()
+ _ = yym120
+ if false {
+ } else {
+ *((*string)(x.Notifications)) = r.DecodeString()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.ProfileBackgroundColor = ""
+ } else {
+ yyv121 := &x.ProfileBackgroundColor
+ yym122 := z.DecBinary()
+ _ = yym122
+ if false {
+ } else {
+ *((*string)(yyv121)) = r.DecodeString()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.ProfileBackgroundImageURL = ""
+ } else {
+ yyv123 := &x.ProfileBackgroundImageURL
+ yym124 := z.DecBinary()
+ _ = yym124
+ if false {
+ } else {
+ *((*string)(yyv123)) = r.DecodeString()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.ProfileBackgroundImageURLHTTPS = ""
+ } else {
+ yyv125 := &x.ProfileBackgroundImageURLHTTPS
+ yym126 := z.DecBinary()
+ _ = yym126
+ if false {
+ } else {
+ *((*string)(yyv125)) = r.DecodeString()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.ProfileBackgroundTile = false
+ } else {
+ yyv127 := &x.ProfileBackgroundTile
+ yym128 := z.DecBinary()
+ _ = yym128
+ if false {
+ } else {
+ *((*bool)(yyv127)) = r.DecodeBool()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.ProfileImageURL = ""
+ } else {
+ yyv129 := &x.ProfileImageURL
+ yym130 := z.DecBinary()
+ _ = yym130
+ if false {
+ } else {
+ *((*string)(yyv129)) = r.DecodeString()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.ProfileImageURLHTTPS = ""
+ } else {
+ yyv131 := &x.ProfileImageURLHTTPS
+ yym132 := z.DecBinary()
+ _ = yym132
+ if false {
+ } else {
+ *((*string)(yyv131)) = r.DecodeString()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.ProfileLinkColor = ""
+ } else {
+ yyv133 := &x.ProfileLinkColor
+ yym134 := z.DecBinary()
+ _ = yym134
+ if false {
+ } else {
+ *((*string)(yyv133)) = r.DecodeString()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.ProfileSidebarBorderColor = ""
+ } else {
+ yyv135 := &x.ProfileSidebarBorderColor
+ yym136 := z.DecBinary()
+ _ = yym136
+ if false {
+ } else {
+ *((*string)(yyv135)) = r.DecodeString()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.ProfileSidebarFillColor = ""
+ } else {
+ yyv137 := &x.ProfileSidebarFillColor
+ yym138 := z.DecBinary()
+ _ = yym138
+ if false {
+ } else {
+ *((*string)(yyv137)) = r.DecodeString()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.ProfileTextColor = ""
+ } else {
+ yyv139 := &x.ProfileTextColor
+ yym140 := z.DecBinary()
+ _ = yym140
+ if false {
+ } else {
+ *((*string)(yyv139)) = r.DecodeString()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.ProfileUseBackgroundImage = false
+ } else {
+ yyv141 := &x.ProfileUseBackgroundImage
+ yym142 := z.DecBinary()
+ _ = yym142
+ if false {
+ } else {
+ *((*bool)(yyv141)) = r.DecodeBool()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Protected = false
+ } else {
+ yyv143 := &x.Protected
+ yym144 := z.DecBinary()
+ _ = yym144
+ if false {
+ } else {
+ *((*bool)(yyv143)) = r.DecodeBool()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.ScreenName = ""
+ } else {
+ yyv145 := &x.ScreenName
+ yym146 := z.DecBinary()
+ _ = yym146
+ if false {
+ } else {
+ *((*string)(yyv145)) = r.DecodeString()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.ShowAllInlineMedia = false
+ } else {
+ yyv147 := &x.ShowAllInlineMedia
+ yym148 := z.DecBinary()
+ _ = yym148
+ if false {
+ } else {
+ *((*bool)(yyv147)) = r.DecodeBool()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.StatusesCount = 0
+ } else {
+ yyv149 := &x.StatusesCount
+ yym150 := z.DecBinary()
+ _ = yym150
+ if false {
+ } else {
+ *((*int)(yyv149)) = int(r.DecodeInt(codecSelferBitsize9225))
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.TimeZone = ""
+ } else {
+ yyv151 := &x.TimeZone
+ yym152 := z.DecBinary()
+ _ = yym152
+ if false {
+ } else {
+ *((*string)(yyv151)) = r.DecodeString()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ if x.URL != nil {
+ x.URL = nil
+ }
+ } else {
+ if x.URL == nil {
+ x.URL = new(string)
+ }
+ yym154 := z.DecBinary()
+ _ = yym154
+ if false {
+ } else {
+ *((*string)(x.URL)) = r.DecodeString()
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.UtcOffset = 0
+ } else {
+ yyv155 := &x.UtcOffset
+ yym156 := z.DecBinary()
+ _ = yym156
+ if false {
+ } else {
+ *((*int)(yyv155)) = int(r.DecodeInt(codecSelferBitsize9225))
+ }
+ }
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Verified = false
+ } else {
+ yyv157 := &x.Verified
+ yym158 := z.DecBinary()
+ _ = yym158
+ if false {
+ } else {
+ *((*bool)(yyv157)) = r.DecodeBool()
+ }
+ }
+ for {
+ yyj81++
+ if yyhl81 {
+ yyb81 = yyj81 > l
+ } else {
+ yyb81 = r.CheckBreak()
+ }
+ if yyb81 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ z.DecStructFieldNotFound(yyj81-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+}
+
+func (x *StatusMetadata) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.IsoLanguageCode))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("iso_language_code"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.IsoLanguageCode))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.ResultType))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("result_type"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym8 := z.EncBinary()
+ _ = yym8
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.ResultType))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd9225)
+ }
+ }
+ }
+}
+
+func (x *StatusMetadata) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap9225 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray9225 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225)
+ }
+ }
+}
+
+func (x *StatusMetadata) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey9225)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)}
+ yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr))
+ z.DecSendContainerState(codecSelfer_containerMapValue9225)
+ switch yys3 {
+ case "iso_language_code":
+ if r.TryDecodeAsNil() {
+ x.IsoLanguageCode = ""
+ } else {
+ yyv4 := &x.IsoLanguageCode
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *((*string)(yyv4)) = r.DecodeString()
+ }
+ }
+ case "result_type":
+ if r.TryDecodeAsNil() {
+ x.ResultType = ""
+ } else {
+ yyv6 := &x.ResultType
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ *((*string)(yyv6)) = r.DecodeString()
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+}
+
+func (x *StatusMetadata) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj8 int
+ var yyb8 bool
+ var yyhl8 bool = l >= 0
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.IsoLanguageCode = ""
+ } else {
+ yyv9 := &x.IsoLanguageCode
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ *((*string)(yyv9)) = r.DecodeString()
+ }
+ }
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.ResultType = ""
+ } else {
+ yyv11 := &x.ResultType
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ *((*string)(yyv11)) = r.DecodeString()
+ }
+ }
+ for {
+ yyj8++
+ if yyhl8 {
+ yyb8 = yyj8 > l
+ } else {
+ yyb8 = r.CheckBreak()
+ }
+ if yyb8 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ z.DecStructFieldNotFound(yyj8-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+}
+
+func (x *Status) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [21]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(21)
+ } else {
+ yynn2 = 21
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.Contributors == nil {
+ r.EncodeNil()
+ } else {
+ yy4 := *x.Contributors
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy4))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("contributors"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.Contributors == nil {
+ r.EncodeNil()
+ } else {
+ yy6 := *x.Contributors
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy6))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.Coordinates == nil {
+ r.EncodeNil()
+ } else {
+ yy9 := *x.Coordinates
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy9))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("coordinates"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.Coordinates == nil {
+ r.EncodeNil()
+ } else {
+ yy11 := *x.Coordinates
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy11))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym14 := z.EncBinary()
+ _ = yym14
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.CreatedAt))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("created_at"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym15 := z.EncBinary()
+ _ = yym15
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.CreatedAt))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yy17 := &x.Entities
+ yy17.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("entities"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yy19 := &x.Entities
+ yy19.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym22 := z.EncBinary()
+ _ = yym22
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Favorited))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("favorited"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym23 := z.EncBinary()
+ _ = yym23
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Favorited))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.Geo == nil {
+ r.EncodeNil()
+ } else {
+ yy25 := *x.Geo
+ yym26 := z.EncBinary()
+ _ = yym26
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy25))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("geo"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.Geo == nil {
+ r.EncodeNil()
+ } else {
+ yy27 := *x.Geo
+ yym28 := z.EncBinary()
+ _ = yym28
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy27))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym30 := z.EncBinary()
+ _ = yym30
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ID))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("id"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym31 := z.EncBinary()
+ _ = yym31
+ if false {
+ } else {
+ r.EncodeInt(int64(x.ID))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym33 := z.EncBinary()
+ _ = yym33
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.IDStr))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("id_str"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym34 := z.EncBinary()
+ _ = yym34
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.IDStr))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.InReplyToScreenName == nil {
+ r.EncodeNil()
+ } else {
+ yy36 := *x.InReplyToScreenName
+ yym37 := z.EncBinary()
+ _ = yym37
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy36))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("in_reply_to_screen_name"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.InReplyToScreenName == nil {
+ r.EncodeNil()
+ } else {
+ yy38 := *x.InReplyToScreenName
+ yym39 := z.EncBinary()
+ _ = yym39
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy38))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.InReplyToStatusID == nil {
+ r.EncodeNil()
+ } else {
+ yy41 := *x.InReplyToStatusID
+ yym42 := z.EncBinary()
+ _ = yym42
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy41))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("in_reply_to_status_id"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.InReplyToStatusID == nil {
+ r.EncodeNil()
+ } else {
+ yy43 := *x.InReplyToStatusID
+ yym44 := z.EncBinary()
+ _ = yym44
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy43))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.InReplyToStatusIDStr == nil {
+ r.EncodeNil()
+ } else {
+ yy46 := *x.InReplyToStatusIDStr
+ yym47 := z.EncBinary()
+ _ = yym47
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy46))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("in_reply_to_status_id_str"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.InReplyToStatusIDStr == nil {
+ r.EncodeNil()
+ } else {
+ yy48 := *x.InReplyToStatusIDStr
+ yym49 := z.EncBinary()
+ _ = yym49
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy48))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.InReplyToUserID == nil {
+ r.EncodeNil()
+ } else {
+ yy51 := *x.InReplyToUserID
+ yym52 := z.EncBinary()
+ _ = yym52
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy51))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("in_reply_to_user_id"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.InReplyToUserID == nil {
+ r.EncodeNil()
+ } else {
+ yy53 := *x.InReplyToUserID
+ yym54 := z.EncBinary()
+ _ = yym54
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy53))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.InReplyToUserIDStr == nil {
+ r.EncodeNil()
+ } else {
+ yy56 := *x.InReplyToUserIDStr
+ yym57 := z.EncBinary()
+ _ = yym57
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy56))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("in_reply_to_user_id_str"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.InReplyToUserIDStr == nil {
+ r.EncodeNil()
+ } else {
+ yy58 := *x.InReplyToUserIDStr
+ yym59 := z.EncBinary()
+ _ = yym59
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy58))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yy61 := &x.Metadata
+ yy61.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yy63 := &x.Metadata
+ yy63.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.Place == nil {
+ r.EncodeNil()
+ } else {
+ yy66 := *x.Place
+ yym67 := z.EncBinary()
+ _ = yym67
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy66))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("place"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.Place == nil {
+ r.EncodeNil()
+ } else {
+ yy68 := *x.Place
+ yym69 := z.EncBinary()
+ _ = yym69
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy68))
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym71 := z.EncBinary()
+ _ = yym71
+ if false {
+ } else {
+ r.EncodeInt(int64(x.RetweetCount))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("retweet_count"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym72 := z.EncBinary()
+ _ = yym72
+ if false {
+ } else {
+ r.EncodeInt(int64(x.RetweetCount))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym74 := z.EncBinary()
+ _ = yym74
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Retweeted))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("retweeted"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym75 := z.EncBinary()
+ _ = yym75
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Retweeted))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym77 := z.EncBinary()
+ _ = yym77
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.Source))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("source"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym78 := z.EncBinary()
+ _ = yym78
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.Source))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym80 := z.EncBinary()
+ _ = yym80
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.Text))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("text"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym81 := z.EncBinary()
+ _ = yym81
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(x.Text))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yym83 := z.EncBinary()
+ _ = yym83
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Truncated))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("truncated"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yym84 := z.EncBinary()
+ _ = yym84
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Truncated))
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yy86 := &x.User
+ yy86.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("user"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yy88 := &x.User
+ yy88.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd9225)
+ }
+ }
+ }
+}
+
+func (x *Status) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap9225 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray9225 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225)
+ }
+ }
+}
+
+func (x *Status) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey9225)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)}
+ yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr))
+ z.DecSendContainerState(codecSelfer_containerMapValue9225)
+ switch yys3 {
+ case "contributors":
+ if r.TryDecodeAsNil() {
+ if x.Contributors != nil {
+ x.Contributors = nil
+ }
+ } else {
+ if x.Contributors == nil {
+ x.Contributors = new(string)
+ }
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *((*string)(x.Contributors)) = r.DecodeString()
+ }
+ }
+ case "coordinates":
+ if r.TryDecodeAsNil() {
+ if x.Coordinates != nil {
+ x.Coordinates = nil
+ }
+ } else {
+ if x.Coordinates == nil {
+ x.Coordinates = new(string)
+ }
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ *((*string)(x.Coordinates)) = r.DecodeString()
+ }
+ }
+ case "created_at":
+ if r.TryDecodeAsNil() {
+ x.CreatedAt = ""
+ } else {
+ yyv8 := &x.CreatedAt
+ yym9 := z.DecBinary()
+ _ = yym9
+ if false {
+ } else {
+ *((*string)(yyv8)) = r.DecodeString()
+ }
+ }
+ case "entities":
+ if r.TryDecodeAsNil() {
+ x.Entities = Entities{}
+ } else {
+ yyv10 := &x.Entities
+ yyv10.CodecDecodeSelf(d)
+ }
+ case "favorited":
+ if r.TryDecodeAsNil() {
+ x.Favorited = false
+ } else {
+ yyv11 := &x.Favorited
+ yym12 := z.DecBinary()
+ _ = yym12
+ if false {
+ } else {
+ *((*bool)(yyv11)) = r.DecodeBool()
+ }
+ }
+ case "geo":
+ if r.TryDecodeAsNil() {
+ if x.Geo != nil {
+ x.Geo = nil
+ }
+ } else {
+ if x.Geo == nil {
+ x.Geo = new(string)
+ }
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else {
+ *((*string)(x.Geo)) = r.DecodeString()
+ }
+ }
+ case "id":
+ if r.TryDecodeAsNil() {
+ x.ID = 0
+ } else {
+ yyv15 := &x.ID
+ yym16 := z.DecBinary()
+ _ = yym16
+ if false {
+ } else {
+ *((*int64)(yyv15)) = int64(r.DecodeInt(64))
+ }
+ }
+ case "id_str":
+ if r.TryDecodeAsNil() {
+ x.IDStr = ""
+ } else {
+ yyv17 := &x.IDStr
+ yym18 := z.DecBinary()
+ _ = yym18
+ if false {
+ } else {
+ *((*string)(yyv17)) = r.DecodeString()
+ }
+ }
+ case "in_reply_to_screen_name":
+ if r.TryDecodeAsNil() {
+ if x.InReplyToScreenName != nil {
+ x.InReplyToScreenName = nil
+ }
+ } else {
+ if x.InReplyToScreenName == nil {
+ x.InReplyToScreenName = new(string)
+ }
+ yym20 := z.DecBinary()
+ _ = yym20
+ if false {
+ } else {
+ *((*string)(x.InReplyToScreenName)) = r.DecodeString()
+ }
+ }
+ case "in_reply_to_status_id":
+ if r.TryDecodeAsNil() {
+ if x.InReplyToStatusID != nil {
+ x.InReplyToStatusID = nil
+ }
+ } else {
+ if x.InReplyToStatusID == nil {
+ x.InReplyToStatusID = new(string)
+ }
+ yym22 := z.DecBinary()
+ _ = yym22
+ if false {
+ } else {
+ *((*string)(x.InReplyToStatusID)) = r.DecodeString()
+ }
+ }
+ case "in_reply_to_status_id_str":
+ if r.TryDecodeAsNil() {
+ if x.InReplyToStatusIDStr != nil {
+ x.InReplyToStatusIDStr = nil
+ }
+ } else {
+ if x.InReplyToStatusIDStr == nil {
+ x.InReplyToStatusIDStr = new(string)
+ }
+ yym24 := z.DecBinary()
+ _ = yym24
+ if false {
+ } else {
+ *((*string)(x.InReplyToStatusIDStr)) = r.DecodeString()
+ }
+ }
+ case "in_reply_to_user_id":
+ if r.TryDecodeAsNil() {
+ if x.InReplyToUserID != nil {
+ x.InReplyToUserID = nil
+ }
+ } else {
+ if x.InReplyToUserID == nil {
+ x.InReplyToUserID = new(string)
+ }
+ yym26 := z.DecBinary()
+ _ = yym26
+ if false {
+ } else {
+ *((*string)(x.InReplyToUserID)) = r.DecodeString()
+ }
+ }
+ case "in_reply_to_user_id_str":
+ if r.TryDecodeAsNil() {
+ if x.InReplyToUserIDStr != nil {
+ x.InReplyToUserIDStr = nil
+ }
+ } else {
+ if x.InReplyToUserIDStr == nil {
+ x.InReplyToUserIDStr = new(string)
+ }
+ yym28 := z.DecBinary()
+ _ = yym28
+ if false {
+ } else {
+ *((*string)(x.InReplyToUserIDStr)) = r.DecodeString()
+ }
+ }
+ case "metadata":
+ if r.TryDecodeAsNil() {
+ x.Metadata = StatusMetadata{}
+ } else {
+ yyv29 := &x.Metadata
+ yyv29.CodecDecodeSelf(d)
+ }
+ case "place":
+ if r.TryDecodeAsNil() {
+ if x.Place != nil {
+ x.Place = nil
+ }
+ } else {
+ if x.Place == nil {
+ x.Place = new(string)
+ }
+ yym31 := z.DecBinary()
+ _ = yym31
+ if false {
+ } else {
+ *((*string)(x.Place)) = r.DecodeString()
+ }
+ }
+ case "retweet_count":
+ if r.TryDecodeAsNil() {
+ x.RetweetCount = 0
+ } else {
+ yyv32 := &x.RetweetCount
+ yym33 := z.DecBinary()
+ _ = yym33
+ if false {
+ } else {
+ *((*int)(yyv32)) = int(r.DecodeInt(codecSelferBitsize9225))
+ }
+ }
+ case "retweeted":
+ if r.TryDecodeAsNil() {
+ x.Retweeted = false
+ } else {
+ yyv34 := &x.Retweeted
+ yym35 := z.DecBinary()
+ _ = yym35
+ if false {
+ } else {
+ *((*bool)(yyv34)) = r.DecodeBool()
+ }
+ }
+ case "source":
+ if r.TryDecodeAsNil() {
+ x.Source = ""
+ } else {
+ yyv36 := &x.Source
+ yym37 := z.DecBinary()
+ _ = yym37
+ if false {
+ } else {
+ *((*string)(yyv36)) = r.DecodeString()
+ }
+ }
+ case "text":
+ if r.TryDecodeAsNil() {
+ x.Text = ""
+ } else {
+ yyv38 := &x.Text
+ yym39 := z.DecBinary()
+ _ = yym39
+ if false {
+ } else {
+ *((*string)(yyv38)) = r.DecodeString()
+ }
+ }
+ case "truncated":
+ if r.TryDecodeAsNil() {
+ x.Truncated = false
+ } else {
+ yyv40 := &x.Truncated
+ yym41 := z.DecBinary()
+ _ = yym41
+ if false {
+ } else {
+ *((*bool)(yyv40)) = r.DecodeBool()
+ }
+ }
+ case "user":
+ if r.TryDecodeAsNil() {
+ x.User = User{}
+ } else {
+ yyv42 := &x.User
+ yyv42.CodecDecodeSelf(d)
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+}
+
+func (x *Status) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj43 int
+ var yyb43 bool
+ var yyhl43 bool = l >= 0
+ yyj43++
+ if yyhl43 {
+ yyb43 = yyj43 > l
+ } else {
+ yyb43 = r.CheckBreak()
+ }
+ if yyb43 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ if x.Contributors != nil {
+ x.Contributors = nil
+ }
+ } else {
+ if x.Contributors == nil {
+ x.Contributors = new(string)
+ }
+ yym45 := z.DecBinary()
+ _ = yym45
+ if false {
+ } else {
+ *((*string)(x.Contributors)) = r.DecodeString()
+ }
+ }
+ yyj43++
+ if yyhl43 {
+ yyb43 = yyj43 > l
+ } else {
+ yyb43 = r.CheckBreak()
+ }
+ if yyb43 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ if x.Coordinates != nil {
+ x.Coordinates = nil
+ }
+ } else {
+ if x.Coordinates == nil {
+ x.Coordinates = new(string)
+ }
+ yym47 := z.DecBinary()
+ _ = yym47
+ if false {
+ } else {
+ *((*string)(x.Coordinates)) = r.DecodeString()
+ }
+ }
+ yyj43++
+ if yyhl43 {
+ yyb43 = yyj43 > l
+ } else {
+ yyb43 = r.CheckBreak()
+ }
+ if yyb43 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.CreatedAt = ""
+ } else {
+ yyv48 := &x.CreatedAt
+ yym49 := z.DecBinary()
+ _ = yym49
+ if false {
+ } else {
+ *((*string)(yyv48)) = r.DecodeString()
+ }
+ }
+ yyj43++
+ if yyhl43 {
+ yyb43 = yyj43 > l
+ } else {
+ yyb43 = r.CheckBreak()
+ }
+ if yyb43 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Entities = Entities{}
+ } else {
+ yyv50 := &x.Entities
+ yyv50.CodecDecodeSelf(d)
+ }
+ yyj43++
+ if yyhl43 {
+ yyb43 = yyj43 > l
+ } else {
+ yyb43 = r.CheckBreak()
+ }
+ if yyb43 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Favorited = false
+ } else {
+ yyv51 := &x.Favorited
+ yym52 := z.DecBinary()
+ _ = yym52
+ if false {
+ } else {
+ *((*bool)(yyv51)) = r.DecodeBool()
+ }
+ }
+ yyj43++
+ if yyhl43 {
+ yyb43 = yyj43 > l
+ } else {
+ yyb43 = r.CheckBreak()
+ }
+ if yyb43 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ if x.Geo != nil {
+ x.Geo = nil
+ }
+ } else {
+ if x.Geo == nil {
+ x.Geo = new(string)
+ }
+ yym54 := z.DecBinary()
+ _ = yym54
+ if false {
+ } else {
+ *((*string)(x.Geo)) = r.DecodeString()
+ }
+ }
+ yyj43++
+ if yyhl43 {
+ yyb43 = yyj43 > l
+ } else {
+ yyb43 = r.CheckBreak()
+ }
+ if yyb43 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.ID = 0
+ } else {
+ yyv55 := &x.ID
+ yym56 := z.DecBinary()
+ _ = yym56
+ if false {
+ } else {
+ *((*int64)(yyv55)) = int64(r.DecodeInt(64))
+ }
+ }
+ yyj43++
+ if yyhl43 {
+ yyb43 = yyj43 > l
+ } else {
+ yyb43 = r.CheckBreak()
+ }
+ if yyb43 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.IDStr = ""
+ } else {
+ yyv57 := &x.IDStr
+ yym58 := z.DecBinary()
+ _ = yym58
+ if false {
+ } else {
+ *((*string)(yyv57)) = r.DecodeString()
+ }
+ }
+ yyj43++
+ if yyhl43 {
+ yyb43 = yyj43 > l
+ } else {
+ yyb43 = r.CheckBreak()
+ }
+ if yyb43 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ if x.InReplyToScreenName != nil {
+ x.InReplyToScreenName = nil
+ }
+ } else {
+ if x.InReplyToScreenName == nil {
+ x.InReplyToScreenName = new(string)
+ }
+ yym60 := z.DecBinary()
+ _ = yym60
+ if false {
+ } else {
+ *((*string)(x.InReplyToScreenName)) = r.DecodeString()
+ }
+ }
+ yyj43++
+ if yyhl43 {
+ yyb43 = yyj43 > l
+ } else {
+ yyb43 = r.CheckBreak()
+ }
+ if yyb43 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ if x.InReplyToStatusID != nil {
+ x.InReplyToStatusID = nil
+ }
+ } else {
+ if x.InReplyToStatusID == nil {
+ x.InReplyToStatusID = new(string)
+ }
+ yym62 := z.DecBinary()
+ _ = yym62
+ if false {
+ } else {
+ *((*string)(x.InReplyToStatusID)) = r.DecodeString()
+ }
+ }
+ yyj43++
+ if yyhl43 {
+ yyb43 = yyj43 > l
+ } else {
+ yyb43 = r.CheckBreak()
+ }
+ if yyb43 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ if x.InReplyToStatusIDStr != nil {
+ x.InReplyToStatusIDStr = nil
+ }
+ } else {
+ if x.InReplyToStatusIDStr == nil {
+ x.InReplyToStatusIDStr = new(string)
+ }
+ yym64 := z.DecBinary()
+ _ = yym64
+ if false {
+ } else {
+ *((*string)(x.InReplyToStatusIDStr)) = r.DecodeString()
+ }
+ }
+ yyj43++
+ if yyhl43 {
+ yyb43 = yyj43 > l
+ } else {
+ yyb43 = r.CheckBreak()
+ }
+ if yyb43 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ if x.InReplyToUserID != nil {
+ x.InReplyToUserID = nil
+ }
+ } else {
+ if x.InReplyToUserID == nil {
+ x.InReplyToUserID = new(string)
+ }
+ yym66 := z.DecBinary()
+ _ = yym66
+ if false {
+ } else {
+ *((*string)(x.InReplyToUserID)) = r.DecodeString()
+ }
+ }
+ yyj43++
+ if yyhl43 {
+ yyb43 = yyj43 > l
+ } else {
+ yyb43 = r.CheckBreak()
+ }
+ if yyb43 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ if x.InReplyToUserIDStr != nil {
+ x.InReplyToUserIDStr = nil
+ }
+ } else {
+ if x.InReplyToUserIDStr == nil {
+ x.InReplyToUserIDStr = new(string)
+ }
+ yym68 := z.DecBinary()
+ _ = yym68
+ if false {
+ } else {
+ *((*string)(x.InReplyToUserIDStr)) = r.DecodeString()
+ }
+ }
+ yyj43++
+ if yyhl43 {
+ yyb43 = yyj43 > l
+ } else {
+ yyb43 = r.CheckBreak()
+ }
+ if yyb43 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Metadata = StatusMetadata{}
+ } else {
+ yyv69 := &x.Metadata
+ yyv69.CodecDecodeSelf(d)
+ }
+ yyj43++
+ if yyhl43 {
+ yyb43 = yyj43 > l
+ } else {
+ yyb43 = r.CheckBreak()
+ }
+ if yyb43 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ if x.Place != nil {
+ x.Place = nil
+ }
+ } else {
+ if x.Place == nil {
+ x.Place = new(string)
+ }
+ yym71 := z.DecBinary()
+ _ = yym71
+ if false {
+ } else {
+ *((*string)(x.Place)) = r.DecodeString()
+ }
+ }
+ yyj43++
+ if yyhl43 {
+ yyb43 = yyj43 > l
+ } else {
+ yyb43 = r.CheckBreak()
+ }
+ if yyb43 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.RetweetCount = 0
+ } else {
+ yyv72 := &x.RetweetCount
+ yym73 := z.DecBinary()
+ _ = yym73
+ if false {
+ } else {
+ *((*int)(yyv72)) = int(r.DecodeInt(codecSelferBitsize9225))
+ }
+ }
+ yyj43++
+ if yyhl43 {
+ yyb43 = yyj43 > l
+ } else {
+ yyb43 = r.CheckBreak()
+ }
+ if yyb43 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Retweeted = false
+ } else {
+ yyv74 := &x.Retweeted
+ yym75 := z.DecBinary()
+ _ = yym75
+ if false {
+ } else {
+ *((*bool)(yyv74)) = r.DecodeBool()
+ }
+ }
+ yyj43++
+ if yyhl43 {
+ yyb43 = yyj43 > l
+ } else {
+ yyb43 = r.CheckBreak()
+ }
+ if yyb43 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Source = ""
+ } else {
+ yyv76 := &x.Source
+ yym77 := z.DecBinary()
+ _ = yym77
+ if false {
+ } else {
+ *((*string)(yyv76)) = r.DecodeString()
+ }
+ }
+ yyj43++
+ if yyhl43 {
+ yyb43 = yyj43 > l
+ } else {
+ yyb43 = r.CheckBreak()
+ }
+ if yyb43 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Text = ""
+ } else {
+ yyv78 := &x.Text
+ yym79 := z.DecBinary()
+ _ = yym79
+ if false {
+ } else {
+ *((*string)(yyv78)) = r.DecodeString()
+ }
+ }
+ yyj43++
+ if yyhl43 {
+ yyb43 = yyj43 > l
+ } else {
+ yyb43 = r.CheckBreak()
+ }
+ if yyb43 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Truncated = false
+ } else {
+ yyv80 := &x.Truncated
+ yym81 := z.DecBinary()
+ _ = yym81
+ if false {
+ } else {
+ *((*bool)(yyv80)) = r.DecodeBool()
+ }
+ }
+ yyj43++
+ if yyhl43 {
+ yyb43 = yyj43 > l
+ } else {
+ yyb43 = r.CheckBreak()
+ }
+ if yyb43 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.User = User{}
+ } else {
+ yyv82 := &x.User
+ yyv82.CodecDecodeSelf(d)
+ }
+ for {
+ yyj43++
+ if yyhl43 {
+ yyb43 = yyj43 > l
+ } else {
+ yyb43 = r.CheckBreak()
+ }
+ if yyb43 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ z.DecStructFieldNotFound(yyj43-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+}
+
+func (x *LargeStruct) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [2]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(2)
+ } else {
+ yynn2 = 2
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yy4 := &x.SearchMetadata
+ yy4.CodecEncodeSelf(e)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("search_metadata"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ yy6 := &x.SearchMetadata
+ yy6.CodecEncodeSelf(e)
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.Statuses == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ h.encSliceStatus(([]Status)(x.Statuses), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("statuses"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.Statuses == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.encSliceStatus(([]Status)(x.Statuses), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd9225)
+ }
+ }
+ }
+}
+
+func (x *LargeStruct) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap9225 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray9225 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225)
+ }
+ }
+}
+
+func (x *LargeStruct) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey9225)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)}
+ yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr))
+ z.DecSendContainerState(codecSelfer_containerMapValue9225)
+ switch yys3 {
+ case "search_metadata":
+ if r.TryDecodeAsNil() {
+ x.SearchMetadata = SearchMetadata{}
+ } else {
+ yyv4 := &x.SearchMetadata
+ yyv4.CodecDecodeSelf(d)
+ }
+ case "statuses":
+ if r.TryDecodeAsNil() {
+ x.Statuses = nil
+ } else {
+ yyv5 := &x.Statuses
+ yym6 := z.DecBinary()
+ _ = yym6
+ if false {
+ } else {
+ h.decSliceStatus((*[]Status)(yyv5), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+}
+
+func (x *LargeStruct) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj7 int
+ var yyb7 bool
+ var yyhl7 bool = l >= 0
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.SearchMetadata = SearchMetadata{}
+ } else {
+ yyv8 := &x.SearchMetadata
+ yyv8.CodecDecodeSelf(d)
+ }
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Statuses = nil
+ } else {
+ yyv9 := &x.Statuses
+ yym10 := z.DecBinary()
+ _ = yym10
+ if false {
+ } else {
+ h.decSliceStatus((*[]Status)(yyv9), d)
+ }
+ }
+ for {
+ yyj7++
+ if yyhl7 {
+ yyb7 = yyj7 > l
+ } else {
+ yyb7 = r.CheckBreak()
+ }
+ if yyb7 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ z.DecStructFieldNotFound(yyj7-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+}
+
+func (x *XLStruct) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep2 := !z.EncBinary()
+ yy2arr2 := z.EncBasicHandle().StructToArray
+ var yyq2 [1]bool
+ _, _, _ = yysep2, yyq2, yy2arr2
+ const yyr2 bool = false
+ var yynn2 int
+ if yyr2 || yy2arr2 {
+ r.EncodeArrayStart(1)
+ } else {
+ yynn2 = 1
+ for _, b := range yyq2 {
+ if b {
+ yynn2++
+ }
+ }
+ r.EncodeMapStart(yynn2)
+ yynn2 = 0
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if x.Data == nil {
+ r.EncodeNil()
+ } else {
+ yym4 := z.EncBinary()
+ _ = yym4
+ if false {
+ } else {
+ h.encSliceLargeStruct(([]LargeStruct)(x.Data), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey9225)
+ r.EncodeString(codecSelferC_UTF89225, string("Data"))
+ z.EncSendContainerState(codecSelfer_containerMapValue9225)
+ if x.Data == nil {
+ r.EncodeNil()
+ } else {
+ yym5 := z.EncBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.encSliceLargeStruct(([]LargeStruct)(x.Data), e)
+ }
+ }
+ }
+ if yyr2 || yy2arr2 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd9225)
+ }
+ }
+ }
+}
+
+func (x *XLStruct) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym1 := z.DecBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct2 := r.ContainerType()
+ if yyct2 == codecSelferValueTypeMap9225 {
+ yyl2 := r.ReadMapStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+ } else {
+ x.codecDecodeSelfFromMap(yyl2, d)
+ }
+ } else if yyct2 == codecSelferValueTypeArray9225 {
+ yyl2 := r.ReadArrayStart()
+ if yyl2 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ } else {
+ x.codecDecodeSelfFromArray(yyl2, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225)
+ }
+ }
+}
+
+func (x *XLStruct) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys3Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys3Slc
+ var yyhl3 bool = l >= 0
+ for yyj3 := 0; ; yyj3++ {
+ if yyhl3 {
+ if yyj3 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey9225)
+ yys3Slc = r.DecodeBytes(yys3Slc, true, true)
+ yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)}
+ yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr))
+ z.DecSendContainerState(codecSelfer_containerMapValue9225)
+ switch yys3 {
+ case "Data":
+ if r.TryDecodeAsNil() {
+ x.Data = nil
+ } else {
+ yyv4 := &x.Data
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ h.decSliceLargeStruct((*[]LargeStruct)(yyv4), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys3)
+ } // end switch yys3
+ } // end for yyj3
+ z.DecSendContainerState(codecSelfer_containerMapEnd9225)
+}
+
+func (x *XLStruct) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj6 int
+ var yyb6 bool
+ var yyhl6 bool = l >= 0
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ if r.TryDecodeAsNil() {
+ x.Data = nil
+ } else {
+ yyv7 := &x.Data
+ yym8 := z.DecBinary()
+ _ = yym8
+ if false {
+ } else {
+ h.decSliceLargeStruct((*[]LargeStruct)(yyv7), d)
+ }
+ }
+ for {
+ yyj6++
+ if yyhl6 {
+ yyb6 = yyj6 > l
+ } else {
+ yyb6 = r.CheckBreak()
+ }
+ if yyb6 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem9225)
+ z.DecStructFieldNotFound(yyj6-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd9225)
+}
+
+func (x codecSelfer9225) encSliceHashtag(v []Hashtag, e *codec1978.Encoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd9225)
+}
+
+func (x codecSelfer9225) decSliceHashtag(v *[]Hashtag, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Hashtag{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Hashtag, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Hashtag, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Hashtag{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Hashtag{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Hashtag{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Hashtag{}) // var yyz1 Hashtag
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Hashtag{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Hashtag{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer9225) encSlicePtrtostring(v []*string, e *codec1978.Encoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ if yyv1 == nil {
+ r.EncodeNil()
+ } else {
+ yy2 := *yyv1
+ yym3 := z.EncBinary()
+ _ = yym3
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF89225, string(yy2))
+ }
+ }
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd9225)
+}
+
+func (x codecSelfer9225) decSlicePtrtostring(v *[]*string, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []*string{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]*string, yyrl1)
+ }
+ } else {
+ yyv1 = make([]*string, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ if yyv1[yyj1] != nil {
+ *yyv1[yyj1] = ""
+ }
+ } else {
+ if yyv1[yyj1] == nil {
+ yyv1[yyj1] = new(string)
+ }
+ yyw2 := yyv1[yyj1]
+ yym3 := z.DecBinary()
+ _ = yym3
+ if false {
+ } else {
+ *((*string)(yyw2)) = r.DecodeString()
+ }
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, nil)
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ if yyv1[yyj1] != nil {
+ *yyv1[yyj1] = ""
+ }
+ } else {
+ if yyv1[yyj1] == nil {
+ yyv1[yyj1] = new(string)
+ }
+ yyw4 := yyv1[yyj1]
+ yym5 := z.DecBinary()
+ _ = yym5
+ if false {
+ } else {
+ *((*string)(yyw4)) = r.DecodeString()
+ }
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, nil) // var yyz1 *string
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ if yyv1[yyj1] != nil {
+ *yyv1[yyj1] = ""
+ }
+ } else {
+ if yyv1[yyj1] == nil {
+ yyv1[yyj1] = new(string)
+ }
+ yyw6 := yyv1[yyj1]
+ yym7 := z.DecBinary()
+ _ = yym7
+ if false {
+ } else {
+ *((*string)(yyw6)) = r.DecodeString()
+ }
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []*string{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer9225) encSliceURL(v []URL, e *codec1978.Encoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd9225)
+}
+
+func (x codecSelfer9225) decSliceURL(v *[]URL, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []URL{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]URL, yyrl1)
+ }
+ } else {
+ yyv1 = make([]URL, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = URL{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, URL{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = URL{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, URL{}) // var yyz1 URL
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = URL{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []URL{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer9225) encSliceStatus(v []Status, e *codec1978.Encoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd9225)
+}
+
+func (x codecSelfer9225) decSliceStatus(v *[]Status, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []Status{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 752)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]Status, yyrl1)
+ }
+ } else {
+ yyv1 = make([]Status, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Status{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, Status{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Status{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, Status{}) // var yyz1 Status
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = Status{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []Status{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
+
+func (x codecSelfer9225) encSliceLargeStruct(v []LargeStruct, e *codec1978.Encoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv1 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem9225)
+ yy2 := &yyv1
+ yy2.CodecEncodeSelf(e)
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd9225)
+}
+
+func (x codecSelfer9225) decSliceLargeStruct(v *[]LargeStruct, d *codec1978.Decoder) {
+ var h codecSelfer9225
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv1 := *v
+ yyh1, yyl1 := z.DecSliceHelperStart()
+ var yyc1 bool
+ _ = yyc1
+ if yyl1 == 0 {
+ if yyv1 == nil {
+ yyv1 = []LargeStruct{}
+ yyc1 = true
+ } else if len(yyv1) != 0 {
+ yyv1 = yyv1[:0]
+ yyc1 = true
+ }
+ } else if yyl1 > 0 {
+ var yyrr1, yyrl1 int
+ var yyrt1 bool
+ _, _ = yyrl1, yyrt1
+ yyrr1 = yyl1 // len(yyv1)
+ if yyl1 > cap(yyv1) {
+
+ yyrg1 := len(yyv1) > 0
+ yyv21 := yyv1
+ yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 136)
+ if yyrt1 {
+ if yyrl1 <= cap(yyv1) {
+ yyv1 = yyv1[:yyrl1]
+ } else {
+ yyv1 = make([]LargeStruct, yyrl1)
+ }
+ } else {
+ yyv1 = make([]LargeStruct, yyrl1)
+ }
+ yyc1 = true
+ yyrr1 = len(yyv1)
+ if yyrg1 {
+ copy(yyv1, yyv21)
+ }
+ } else if yyl1 != len(yyv1) {
+ yyv1 = yyv1[:yyl1]
+ yyc1 = true
+ }
+ yyj1 := 0
+ for ; yyj1 < yyrr1; yyj1++ {
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LargeStruct{}
+ } else {
+ yyv2 := &yyv1[yyj1]
+ yyv2.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt1 {
+ for ; yyj1 < yyl1; yyj1++ {
+ yyv1 = append(yyv1, LargeStruct{})
+ yyh1.ElemContainerState(yyj1)
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LargeStruct{}
+ } else {
+ yyv3 := &yyv1[yyj1]
+ yyv3.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj1 := 0
+ for ; !r.CheckBreak(); yyj1++ {
+
+ if yyj1 >= len(yyv1) {
+ yyv1 = append(yyv1, LargeStruct{}) // var yyz1 LargeStruct
+ yyc1 = true
+ }
+ yyh1.ElemContainerState(yyj1)
+ if yyj1 < len(yyv1) {
+ if r.TryDecodeAsNil() {
+ yyv1[yyj1] = LargeStruct{}
+ } else {
+ yyv4 := &yyv1[yyj1]
+ yyv4.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj1 < len(yyv1) {
+ yyv1 = yyv1[:yyj1]
+ yyc1 = true
+ } else if yyj1 == 0 && yyv1 == nil {
+ yyv1 = []LargeStruct{}
+ yyc1 = true
+ }
+ }
+ yyh1.End()
+ if yyc1 {
+ *v = yyv1
+ }
+}
diff --git a/vendor/github.com/mailru/easyjson/benchmark/data_ffjson.go b/vendor/github.com/mailru/easyjson/benchmark/data_ffjson.go
new file mode 100644
index 000000000..9f000d3ad
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/benchmark/data_ffjson.go
@@ -0,0 +1,6723 @@
+// +build use_ffjson
+
+// DO NOT EDIT!
+// Code generated by ffjson <https://github.com/pquerna/ffjson>
+// source: .root/src/github.com/mailru/easyjson/benchmark/data.go
+// DO NOT EDIT!
+
+package benchmark
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ fflib "github.com/pquerna/ffjson/fflib/v1"
+)
+
+func (mj *Entities) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if mj == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := mj.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+func (mj *Entities) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if mj == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{"hashtags":`)
+ if mj.Hashtags != nil {
+ buf.WriteString(`[`)
+ for i, v := range mj.Hashtags {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+
+ {
+
+ err = v.MarshalJSONBuf(buf)
+ if err != nil {
+ return err
+ }
+
+ }
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteString(`,"urls":`)
+ if mj.Urls != nil {
+ buf.WriteString(`[`)
+ for i, v := range mj.Urls {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ if v != nil {
+ fflib.WriteJsonString(buf, string(*v))
+ } else {
+ buf.WriteString(`null`)
+ }
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteString(`,"user_mentions":`)
+ if mj.UserMentions != nil {
+ buf.WriteString(`[`)
+ for i, v := range mj.UserMentions {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ if v != nil {
+ fflib.WriteJsonString(buf, string(*v))
+ } else {
+ buf.WriteString(`null`)
+ }
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffj_t_Entitiesbase = iota
+ ffj_t_Entitiesno_such_key
+
+ ffj_t_Entities_Hashtags
+
+ ffj_t_Entities_Urls
+
+ ffj_t_Entities_UserMentions
+)
+
+var ffj_key_Entities_Hashtags = []byte("hashtags")
+
+var ffj_key_Entities_Urls = []byte("urls")
+
+var ffj_key_Entities_UserMentions = []byte("user_mentions")
+
+func (uj *Entities) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+func (uj *Entities) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error = nil
+ currentKey := ffj_t_Entitiesbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffj_t_Entitiesno_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'h':
+
+ if bytes.Equal(ffj_key_Entities_Hashtags, kn) {
+ currentKey = ffj_t_Entities_Hashtags
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'u':
+
+ if bytes.Equal(ffj_key_Entities_Urls, kn) {
+ currentKey = ffj_t_Entities_Urls
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_Entities_UserMentions, kn) {
+ currentKey = ffj_t_Entities_UserMentions
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.EqualFoldRight(ffj_key_Entities_UserMentions, kn) {
+ currentKey = ffj_t_Entities_UserMentions
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_Entities_Urls, kn) {
+ currentKey = ffj_t_Entities_Urls
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_Entities_Hashtags, kn) {
+ currentKey = ffj_t_Entities_Hashtags
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffj_t_Entitiesno_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffj_t_Entities_Hashtags:
+ goto handle_Hashtags
+
+ case ffj_t_Entities_Urls:
+ goto handle_Urls
+
+ case ffj_t_Entities_UserMentions:
+ goto handle_UserMentions
+
+ case ffj_t_Entitiesno_such_key:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_Hashtags:
+
+ /* handler: uj.Hashtags type=[]benchmark.Hashtag kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ uj.Hashtags = nil
+ } else {
+
+ uj.Hashtags = make([]Hashtag, 0)
+
+ wantVal := true
+
+ for {
+
+ var tmp_uj__Hashtags Hashtag
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmp_uj__Hashtags type=benchmark.Hashtag kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+
+ err = tmp_uj__Hashtags.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key)
+ if err != nil {
+ return err
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ uj.Hashtags = append(uj.Hashtags, tmp_uj__Hashtags)
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Urls:
+
+ /* handler: uj.Urls type=[]*string kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ uj.Urls = nil
+ } else {
+
+ uj.Urls = make([]*string, 0)
+
+ wantVal := true
+
+ for {
+
+ var tmp_uj__Urls *string
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmp_uj__Urls type=*string kind=ptr quoted=false*/
+
+ {
+
+ if tok == fflib.FFTok_null {
+ tmp_uj__Urls = nil
+ } else {
+ if tmp_uj__Urls == nil {
+ tmp_uj__Urls = new(string)
+ }
+
+ /* handler: tmp_uj__Urls type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ tmp_uj__Urls = nil
+
+ } else {
+
+ var tval string
+ outBuf := fs.Output.Bytes()
+
+ tval = string(string(outBuf))
+ tmp_uj__Urls = &tval
+
+ }
+ }
+
+ }
+ }
+
+ uj.Urls = append(uj.Urls, tmp_uj__Urls)
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_UserMentions:
+
+ /* handler: uj.UserMentions type=[]*string kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ uj.UserMentions = nil
+ } else {
+
+ uj.UserMentions = make([]*string, 0)
+
+ wantVal := true
+
+ for {
+
+ var tmp_uj__UserMentions *string
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmp_uj__UserMentions type=*string kind=ptr quoted=false*/
+
+ {
+
+ if tok == fflib.FFTok_null {
+ tmp_uj__UserMentions = nil
+ } else {
+ if tmp_uj__UserMentions == nil {
+ tmp_uj__UserMentions = new(string)
+ }
+
+ /* handler: tmp_uj__UserMentions type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ tmp_uj__UserMentions = nil
+
+ } else {
+
+ var tval string
+ outBuf := fs.Output.Bytes()
+
+ tval = string(string(outBuf))
+ tmp_uj__UserMentions = &tval
+
+ }
+ }
+
+ }
+ }
+
+ uj.UserMentions = append(uj.UserMentions, tmp_uj__UserMentions)
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+ return nil
+}
+
+func (mj *Hashtag) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if mj == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := mj.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+func (mj *Hashtag) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if mj == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{"indices":`)
+ if mj.Indices != nil {
+ buf.WriteString(`[`)
+ for i, v := range mj.Indices {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ fflib.FormatBits2(buf, uint64(v), 10, v < 0)
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteString(`,"text":`)
+ fflib.WriteJsonString(buf, string(mj.Text))
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffj_t_Hashtagbase = iota
+ ffj_t_Hashtagno_such_key
+
+ ffj_t_Hashtag_Indices
+
+ ffj_t_Hashtag_Text
+)
+
+var ffj_key_Hashtag_Indices = []byte("indices")
+
+var ffj_key_Hashtag_Text = []byte("text")
+
+func (uj *Hashtag) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+func (uj *Hashtag) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error = nil
+ currentKey := ffj_t_Hashtagbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffj_t_Hashtagno_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'i':
+
+ if bytes.Equal(ffj_key_Hashtag_Indices, kn) {
+ currentKey = ffj_t_Hashtag_Indices
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 't':
+
+ if bytes.Equal(ffj_key_Hashtag_Text, kn) {
+ currentKey = ffj_t_Hashtag_Text
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.SimpleLetterEqualFold(ffj_key_Hashtag_Text, kn) {
+ currentKey = ffj_t_Hashtag_Text
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_Hashtag_Indices, kn) {
+ currentKey = ffj_t_Hashtag_Indices
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffj_t_Hashtagno_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffj_t_Hashtag_Indices:
+ goto handle_Indices
+
+ case ffj_t_Hashtag_Text:
+ goto handle_Text
+
+ case ffj_t_Hashtagno_such_key:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_Indices:
+
+ /* handler: uj.Indices type=[]int kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ uj.Indices = nil
+ } else {
+
+ uj.Indices = make([]int, 0)
+
+ wantVal := true
+
+ for {
+
+ var tmp_uj__Indices int
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmp_uj__Indices type=int kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ tmp_uj__Indices = int(tval)
+
+ }
+ }
+
+ uj.Indices = append(uj.Indices, tmp_uj__Indices)
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Text:
+
+ /* handler: uj.Text type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.Text = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+ return nil
+}
+
+func (mj *LargeStruct) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if mj == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := mj.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+func (mj *LargeStruct) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if mj == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{"search_metadata":`)
+
+ {
+
+ err = mj.SearchMetadata.MarshalJSONBuf(buf)
+ if err != nil {
+ return err
+ }
+
+ }
+ buf.WriteString(`,"statuses":`)
+ if mj.Statuses != nil {
+ buf.WriteString(`[`)
+ for i, v := range mj.Statuses {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+
+ {
+
+ err = v.MarshalJSONBuf(buf)
+ if err != nil {
+ return err
+ }
+
+ }
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffj_t_LargeStructbase = iota
+ ffj_t_LargeStructno_such_key
+
+ ffj_t_LargeStruct_SearchMetadata
+
+ ffj_t_LargeStruct_Statuses
+)
+
+var ffj_key_LargeStruct_SearchMetadata = []byte("search_metadata")
+
+var ffj_key_LargeStruct_Statuses = []byte("statuses")
+
+func (uj *LargeStruct) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+func (uj *LargeStruct) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error = nil
+ currentKey := ffj_t_LargeStructbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffj_t_LargeStructno_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 's':
+
+ if bytes.Equal(ffj_key_LargeStruct_SearchMetadata, kn) {
+ currentKey = ffj_t_LargeStruct_SearchMetadata
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_LargeStruct_Statuses, kn) {
+ currentKey = ffj_t_LargeStruct_Statuses
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.EqualFoldRight(ffj_key_LargeStruct_Statuses, kn) {
+ currentKey = ffj_t_LargeStruct_Statuses
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_LargeStruct_SearchMetadata, kn) {
+ currentKey = ffj_t_LargeStruct_SearchMetadata
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffj_t_LargeStructno_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffj_t_LargeStruct_SearchMetadata:
+ goto handle_SearchMetadata
+
+ case ffj_t_LargeStruct_Statuses:
+ goto handle_Statuses
+
+ case ffj_t_LargeStructno_such_key:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_SearchMetadata:
+
+ /* handler: uj.SearchMetadata type=benchmark.SearchMetadata kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+
+ err = uj.SearchMetadata.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key)
+ if err != nil {
+ return err
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Statuses:
+
+ /* handler: uj.Statuses type=[]benchmark.Status kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ uj.Statuses = nil
+ } else {
+
+ uj.Statuses = make([]Status, 0)
+
+ wantVal := true
+
+ for {
+
+ var tmp_uj__Statuses Status
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmp_uj__Statuses type=benchmark.Status kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+
+ err = tmp_uj__Statuses.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key)
+ if err != nil {
+ return err
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ uj.Statuses = append(uj.Statuses, tmp_uj__Statuses)
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+ return nil
+}
+
+func (mj *SearchMetadata) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if mj == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := mj.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+func (mj *SearchMetadata) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if mj == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{"completed_in":`)
+ fflib.AppendFloat(buf, float64(mj.CompletedIn), 'g', -1, 64)
+ buf.WriteString(`,"count":`)
+ fflib.FormatBits2(buf, uint64(mj.Count), 10, mj.Count < 0)
+ buf.WriteString(`,"max_id":`)
+ fflib.FormatBits2(buf, uint64(mj.MaxID), 10, mj.MaxID < 0)
+ buf.WriteString(`,"max_id_str":`)
+ fflib.WriteJsonString(buf, string(mj.MaxIDStr))
+ buf.WriteString(`,"next_results":`)
+ fflib.WriteJsonString(buf, string(mj.NextResults))
+ buf.WriteString(`,"query":`)
+ fflib.WriteJsonString(buf, string(mj.Query))
+ buf.WriteString(`,"refresh_url":`)
+ fflib.WriteJsonString(buf, string(mj.RefreshURL))
+ buf.WriteString(`,"since_id":`)
+ fflib.FormatBits2(buf, uint64(mj.SinceID), 10, mj.SinceID < 0)
+ buf.WriteString(`,"since_id_str":`)
+ fflib.WriteJsonString(buf, string(mj.SinceIDStr))
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffj_t_SearchMetadatabase = iota
+ ffj_t_SearchMetadatano_such_key
+
+ ffj_t_SearchMetadata_CompletedIn
+
+ ffj_t_SearchMetadata_Count
+
+ ffj_t_SearchMetadata_MaxID
+
+ ffj_t_SearchMetadata_MaxIDStr
+
+ ffj_t_SearchMetadata_NextResults
+
+ ffj_t_SearchMetadata_Query
+
+ ffj_t_SearchMetadata_RefreshURL
+
+ ffj_t_SearchMetadata_SinceID
+
+ ffj_t_SearchMetadata_SinceIDStr
+)
+
+var ffj_key_SearchMetadata_CompletedIn = []byte("completed_in")
+
+var ffj_key_SearchMetadata_Count = []byte("count")
+
+var ffj_key_SearchMetadata_MaxID = []byte("max_id")
+
+var ffj_key_SearchMetadata_MaxIDStr = []byte("max_id_str")
+
+var ffj_key_SearchMetadata_NextResults = []byte("next_results")
+
+var ffj_key_SearchMetadata_Query = []byte("query")
+
+var ffj_key_SearchMetadata_RefreshURL = []byte("refresh_url")
+
+var ffj_key_SearchMetadata_SinceID = []byte("since_id")
+
+var ffj_key_SearchMetadata_SinceIDStr = []byte("since_id_str")
+
+func (uj *SearchMetadata) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+func (uj *SearchMetadata) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error = nil
+ currentKey := ffj_t_SearchMetadatabase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffj_t_SearchMetadatano_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'c':
+
+ if bytes.Equal(ffj_key_SearchMetadata_CompletedIn, kn) {
+ currentKey = ffj_t_SearchMetadata_CompletedIn
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_SearchMetadata_Count, kn) {
+ currentKey = ffj_t_SearchMetadata_Count
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'm':
+
+ if bytes.Equal(ffj_key_SearchMetadata_MaxID, kn) {
+ currentKey = ffj_t_SearchMetadata_MaxID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_SearchMetadata_MaxIDStr, kn) {
+ currentKey = ffj_t_SearchMetadata_MaxIDStr
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'n':
+
+ if bytes.Equal(ffj_key_SearchMetadata_NextResults, kn) {
+ currentKey = ffj_t_SearchMetadata_NextResults
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'q':
+
+ if bytes.Equal(ffj_key_SearchMetadata_Query, kn) {
+ currentKey = ffj_t_SearchMetadata_Query
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'r':
+
+ if bytes.Equal(ffj_key_SearchMetadata_RefreshURL, kn) {
+ currentKey = ffj_t_SearchMetadata_RefreshURL
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 's':
+
+ if bytes.Equal(ffj_key_SearchMetadata_SinceID, kn) {
+ currentKey = ffj_t_SearchMetadata_SinceID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_SearchMetadata_SinceIDStr, kn) {
+ currentKey = ffj_t_SearchMetadata_SinceIDStr
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.EqualFoldRight(ffj_key_SearchMetadata_SinceIDStr, kn) {
+ currentKey = ffj_t_SearchMetadata_SinceIDStr
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_SearchMetadata_SinceID, kn) {
+ currentKey = ffj_t_SearchMetadata_SinceID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_SearchMetadata_RefreshURL, kn) {
+ currentKey = ffj_t_SearchMetadata_RefreshURL
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffj_key_SearchMetadata_Query, kn) {
+ currentKey = ffj_t_SearchMetadata_Query
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_SearchMetadata_NextResults, kn) {
+ currentKey = ffj_t_SearchMetadata_NextResults
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_SearchMetadata_MaxIDStr, kn) {
+ currentKey = ffj_t_SearchMetadata_MaxIDStr
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.AsciiEqualFold(ffj_key_SearchMetadata_MaxID, kn) {
+ currentKey = ffj_t_SearchMetadata_MaxID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffj_key_SearchMetadata_Count, kn) {
+ currentKey = ffj_t_SearchMetadata_Count
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.AsciiEqualFold(ffj_key_SearchMetadata_CompletedIn, kn) {
+ currentKey = ffj_t_SearchMetadata_CompletedIn
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffj_t_SearchMetadatano_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffj_t_SearchMetadata_CompletedIn:
+ goto handle_CompletedIn
+
+ case ffj_t_SearchMetadata_Count:
+ goto handle_Count
+
+ case ffj_t_SearchMetadata_MaxID:
+ goto handle_MaxID
+
+ case ffj_t_SearchMetadata_MaxIDStr:
+ goto handle_MaxIDStr
+
+ case ffj_t_SearchMetadata_NextResults:
+ goto handle_NextResults
+
+ case ffj_t_SearchMetadata_Query:
+ goto handle_Query
+
+ case ffj_t_SearchMetadata_RefreshURL:
+ goto handle_RefreshURL
+
+ case ffj_t_SearchMetadata_SinceID:
+ goto handle_SinceID
+
+ case ffj_t_SearchMetadata_SinceIDStr:
+ goto handle_SinceIDStr
+
+ case ffj_t_SearchMetadatano_such_key:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_CompletedIn:
+
+ /* handler: uj.CompletedIn type=float64 kind=float64 quoted=false*/
+
+ {
+ if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ uj.CompletedIn = float64(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Count:
+
+ /* handler: uj.Count type=int kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ uj.Count = int(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_MaxID:
+
+ /* handler: uj.MaxID type=int kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ uj.MaxID = int(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_MaxIDStr:
+
+ /* handler: uj.MaxIDStr type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.MaxIDStr = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_NextResults:
+
+ /* handler: uj.NextResults type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.NextResults = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Query:
+
+ /* handler: uj.Query type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.Query = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_RefreshURL:
+
+ /* handler: uj.RefreshURL type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.RefreshURL = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_SinceID:
+
+ /* handler: uj.SinceID type=int kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ uj.SinceID = int(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_SinceIDStr:
+
+ /* handler: uj.SinceIDStr type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.SinceIDStr = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+ return nil
+}
+
+func (mj *Status) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if mj == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := mj.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+func (mj *Status) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if mj == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ if mj.Contributors != nil {
+ buf.WriteString(`{"contributors":`)
+ fflib.WriteJsonString(buf, string(*mj.Contributors))
+ } else {
+ buf.WriteString(`{"contributors":null`)
+ }
+ if mj.Coordinates != nil {
+ buf.WriteString(`,"coordinates":`)
+ fflib.WriteJsonString(buf, string(*mj.Coordinates))
+ } else {
+ buf.WriteString(`,"coordinates":null`)
+ }
+ buf.WriteString(`,"created_at":`)
+ fflib.WriteJsonString(buf, string(mj.CreatedAt))
+ buf.WriteString(`,"entities":`)
+
+ {
+
+ err = mj.Entities.MarshalJSONBuf(buf)
+ if err != nil {
+ return err
+ }
+
+ }
+ if mj.Favorited {
+ buf.WriteString(`,"favorited":true`)
+ } else {
+ buf.WriteString(`,"favorited":false`)
+ }
+ if mj.Geo != nil {
+ buf.WriteString(`,"geo":`)
+ fflib.WriteJsonString(buf, string(*mj.Geo))
+ } else {
+ buf.WriteString(`,"geo":null`)
+ }
+ buf.WriteString(`,"id":`)
+ fflib.FormatBits2(buf, uint64(mj.ID), 10, mj.ID < 0)
+ buf.WriteString(`,"id_str":`)
+ fflib.WriteJsonString(buf, string(mj.IDStr))
+ if mj.InReplyToScreenName != nil {
+ buf.WriteString(`,"in_reply_to_screen_name":`)
+ fflib.WriteJsonString(buf, string(*mj.InReplyToScreenName))
+ } else {
+ buf.WriteString(`,"in_reply_to_screen_name":null`)
+ }
+ if mj.InReplyToStatusID != nil {
+ buf.WriteString(`,"in_reply_to_status_id":`)
+ fflib.WriteJsonString(buf, string(*mj.InReplyToStatusID))
+ } else {
+ buf.WriteString(`,"in_reply_to_status_id":null`)
+ }
+ if mj.InReplyToStatusIDStr != nil {
+ buf.WriteString(`,"in_reply_to_status_id_str":`)
+ fflib.WriteJsonString(buf, string(*mj.InReplyToStatusIDStr))
+ } else {
+ buf.WriteString(`,"in_reply_to_status_id_str":null`)
+ }
+ if mj.InReplyToUserID != nil {
+ buf.WriteString(`,"in_reply_to_user_id":`)
+ fflib.WriteJsonString(buf, string(*mj.InReplyToUserID))
+ } else {
+ buf.WriteString(`,"in_reply_to_user_id":null`)
+ }
+ if mj.InReplyToUserIDStr != nil {
+ buf.WriteString(`,"in_reply_to_user_id_str":`)
+ fflib.WriteJsonString(buf, string(*mj.InReplyToUserIDStr))
+ } else {
+ buf.WriteString(`,"in_reply_to_user_id_str":null`)
+ }
+ buf.WriteString(`,"metadata":`)
+
+ {
+
+ err = mj.Metadata.MarshalJSONBuf(buf)
+ if err != nil {
+ return err
+ }
+
+ }
+ if mj.Place != nil {
+ buf.WriteString(`,"place":`)
+ fflib.WriteJsonString(buf, string(*mj.Place))
+ } else {
+ buf.WriteString(`,"place":null`)
+ }
+ buf.WriteString(`,"retweet_count":`)
+ fflib.FormatBits2(buf, uint64(mj.RetweetCount), 10, mj.RetweetCount < 0)
+ if mj.Retweeted {
+ buf.WriteString(`,"retweeted":true`)
+ } else {
+ buf.WriteString(`,"retweeted":false`)
+ }
+ buf.WriteString(`,"source":`)
+ fflib.WriteJsonString(buf, string(mj.Source))
+ buf.WriteString(`,"text":`)
+ fflib.WriteJsonString(buf, string(mj.Text))
+ if mj.Truncated {
+ buf.WriteString(`,"truncated":true`)
+ } else {
+ buf.WriteString(`,"truncated":false`)
+ }
+ buf.WriteString(`,"user":`)
+
+ {
+
+ err = mj.User.MarshalJSONBuf(buf)
+ if err != nil {
+ return err
+ }
+
+ }
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffj_t_Statusbase = iota
+ ffj_t_Statusno_such_key
+
+ ffj_t_Status_Contributors
+
+ ffj_t_Status_Coordinates
+
+ ffj_t_Status_CreatedAt
+
+ ffj_t_Status_Entities
+
+ ffj_t_Status_Favorited
+
+ ffj_t_Status_Geo
+
+ ffj_t_Status_ID
+
+ ffj_t_Status_IDStr
+
+ ffj_t_Status_InReplyToScreenName
+
+ ffj_t_Status_InReplyToStatusID
+
+ ffj_t_Status_InReplyToStatusIDStr
+
+ ffj_t_Status_InReplyToUserID
+
+ ffj_t_Status_InReplyToUserIDStr
+
+ ffj_t_Status_Metadata
+
+ ffj_t_Status_Place
+
+ ffj_t_Status_RetweetCount
+
+ ffj_t_Status_Retweeted
+
+ ffj_t_Status_Source
+
+ ffj_t_Status_Text
+
+ ffj_t_Status_Truncated
+
+ ffj_t_Status_User
+)
+
+var ffj_key_Status_Contributors = []byte("contributors")
+
+var ffj_key_Status_Coordinates = []byte("coordinates")
+
+var ffj_key_Status_CreatedAt = []byte("created_at")
+
+var ffj_key_Status_Entities = []byte("entities")
+
+var ffj_key_Status_Favorited = []byte("favorited")
+
+var ffj_key_Status_Geo = []byte("geo")
+
+var ffj_key_Status_ID = []byte("id")
+
+var ffj_key_Status_IDStr = []byte("id_str")
+
+var ffj_key_Status_InReplyToScreenName = []byte("in_reply_to_screen_name")
+
+var ffj_key_Status_InReplyToStatusID = []byte("in_reply_to_status_id")
+
+var ffj_key_Status_InReplyToStatusIDStr = []byte("in_reply_to_status_id_str")
+
+var ffj_key_Status_InReplyToUserID = []byte("in_reply_to_user_id")
+
+var ffj_key_Status_InReplyToUserIDStr = []byte("in_reply_to_user_id_str")
+
+var ffj_key_Status_Metadata = []byte("metadata")
+
+var ffj_key_Status_Place = []byte("place")
+
+var ffj_key_Status_RetweetCount = []byte("retweet_count")
+
+var ffj_key_Status_Retweeted = []byte("retweeted")
+
+var ffj_key_Status_Source = []byte("source")
+
+var ffj_key_Status_Text = []byte("text")
+
+var ffj_key_Status_Truncated = []byte("truncated")
+
+var ffj_key_Status_User = []byte("user")
+
+func (uj *Status) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+func (uj *Status) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error = nil
+ currentKey := ffj_t_Statusbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffj_t_Statusno_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'c':
+
+ if bytes.Equal(ffj_key_Status_Contributors, kn) {
+ currentKey = ffj_t_Status_Contributors
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_Status_Coordinates, kn) {
+ currentKey = ffj_t_Status_Coordinates
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_Status_CreatedAt, kn) {
+ currentKey = ffj_t_Status_CreatedAt
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'e':
+
+ if bytes.Equal(ffj_key_Status_Entities, kn) {
+ currentKey = ffj_t_Status_Entities
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'f':
+
+ if bytes.Equal(ffj_key_Status_Favorited, kn) {
+ currentKey = ffj_t_Status_Favorited
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'g':
+
+ if bytes.Equal(ffj_key_Status_Geo, kn) {
+ currentKey = ffj_t_Status_Geo
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'i':
+
+ if bytes.Equal(ffj_key_Status_ID, kn) {
+ currentKey = ffj_t_Status_ID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_Status_IDStr, kn) {
+ currentKey = ffj_t_Status_IDStr
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_Status_InReplyToScreenName, kn) {
+ currentKey = ffj_t_Status_InReplyToScreenName
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_Status_InReplyToStatusID, kn) {
+ currentKey = ffj_t_Status_InReplyToStatusID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_Status_InReplyToStatusIDStr, kn) {
+ currentKey = ffj_t_Status_InReplyToStatusIDStr
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_Status_InReplyToUserID, kn) {
+ currentKey = ffj_t_Status_InReplyToUserID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_Status_InReplyToUserIDStr, kn) {
+ currentKey = ffj_t_Status_InReplyToUserIDStr
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'm':
+
+ if bytes.Equal(ffj_key_Status_Metadata, kn) {
+ currentKey = ffj_t_Status_Metadata
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'p':
+
+ if bytes.Equal(ffj_key_Status_Place, kn) {
+ currentKey = ffj_t_Status_Place
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'r':
+
+ if bytes.Equal(ffj_key_Status_RetweetCount, kn) {
+ currentKey = ffj_t_Status_RetweetCount
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_Status_Retweeted, kn) {
+ currentKey = ffj_t_Status_Retweeted
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 's':
+
+ if bytes.Equal(ffj_key_Status_Source, kn) {
+ currentKey = ffj_t_Status_Source
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 't':
+
+ if bytes.Equal(ffj_key_Status_Text, kn) {
+ currentKey = ffj_t_Status_Text
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_Status_Truncated, kn) {
+ currentKey = ffj_t_Status_Truncated
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'u':
+
+ if bytes.Equal(ffj_key_Status_User, kn) {
+ currentKey = ffj_t_Status_User
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.EqualFoldRight(ffj_key_Status_User, kn) {
+ currentKey = ffj_t_Status_User
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffj_key_Status_Truncated, kn) {
+ currentKey = ffj_t_Status_Truncated
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffj_key_Status_Text, kn) {
+ currentKey = ffj_t_Status_Text
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_Status_Source, kn) {
+ currentKey = ffj_t_Status_Source
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffj_key_Status_Retweeted, kn) {
+ currentKey = ffj_t_Status_Retweeted
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.AsciiEqualFold(ffj_key_Status_RetweetCount, kn) {
+ currentKey = ffj_t_Status_RetweetCount
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffj_key_Status_Place, kn) {
+ currentKey = ffj_t_Status_Place
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffj_key_Status_Metadata, kn) {
+ currentKey = ffj_t_Status_Metadata
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_Status_InReplyToUserIDStr, kn) {
+ currentKey = ffj_t_Status_InReplyToUserIDStr
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_Status_InReplyToUserID, kn) {
+ currentKey = ffj_t_Status_InReplyToUserID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_Status_InReplyToStatusIDStr, kn) {
+ currentKey = ffj_t_Status_InReplyToStatusIDStr
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_Status_InReplyToStatusID, kn) {
+ currentKey = ffj_t_Status_InReplyToStatusID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_Status_InReplyToScreenName, kn) {
+ currentKey = ffj_t_Status_InReplyToScreenName
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_Status_IDStr, kn) {
+ currentKey = ffj_t_Status_IDStr
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffj_key_Status_ID, kn) {
+ currentKey = ffj_t_Status_ID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffj_key_Status_Geo, kn) {
+ currentKey = ffj_t_Status_Geo
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffj_key_Status_Favorited, kn) {
+ currentKey = ffj_t_Status_Favorited
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_Status_Entities, kn) {
+ currentKey = ffj_t_Status_Entities
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.AsciiEqualFold(ffj_key_Status_CreatedAt, kn) {
+ currentKey = ffj_t_Status_CreatedAt
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_Status_Coordinates, kn) {
+ currentKey = ffj_t_Status_Coordinates
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_Status_Contributors, kn) {
+ currentKey = ffj_t_Status_Contributors
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffj_t_Statusno_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffj_t_Status_Contributors:
+ goto handle_Contributors
+
+ case ffj_t_Status_Coordinates:
+ goto handle_Coordinates
+
+ case ffj_t_Status_CreatedAt:
+ goto handle_CreatedAt
+
+ case ffj_t_Status_Entities:
+ goto handle_Entities
+
+ case ffj_t_Status_Favorited:
+ goto handle_Favorited
+
+ case ffj_t_Status_Geo:
+ goto handle_Geo
+
+ case ffj_t_Status_ID:
+ goto handle_ID
+
+ case ffj_t_Status_IDStr:
+ goto handle_IDStr
+
+ case ffj_t_Status_InReplyToScreenName:
+ goto handle_InReplyToScreenName
+
+ case ffj_t_Status_InReplyToStatusID:
+ goto handle_InReplyToStatusID
+
+ case ffj_t_Status_InReplyToStatusIDStr:
+ goto handle_InReplyToStatusIDStr
+
+ case ffj_t_Status_InReplyToUserID:
+ goto handle_InReplyToUserID
+
+ case ffj_t_Status_InReplyToUserIDStr:
+ goto handle_InReplyToUserIDStr
+
+ case ffj_t_Status_Metadata:
+ goto handle_Metadata
+
+ case ffj_t_Status_Place:
+ goto handle_Place
+
+ case ffj_t_Status_RetweetCount:
+ goto handle_RetweetCount
+
+ case ffj_t_Status_Retweeted:
+ goto handle_Retweeted
+
+ case ffj_t_Status_Source:
+ goto handle_Source
+
+ case ffj_t_Status_Text:
+ goto handle_Text
+
+ case ffj_t_Status_Truncated:
+ goto handle_Truncated
+
+ case ffj_t_Status_User:
+ goto handle_User
+
+ case ffj_t_Statusno_such_key:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_Contributors:
+
+ /* handler: uj.Contributors type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ uj.Contributors = nil
+
+ } else {
+
+ var tval string
+ outBuf := fs.Output.Bytes()
+
+ tval = string(string(outBuf))
+ uj.Contributors = &tval
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Coordinates:
+
+ /* handler: uj.Coordinates type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ uj.Coordinates = nil
+
+ } else {
+
+ var tval string
+ outBuf := fs.Output.Bytes()
+
+ tval = string(string(outBuf))
+ uj.Coordinates = &tval
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_CreatedAt:
+
+ /* handler: uj.CreatedAt type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.CreatedAt = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Entities:
+
+ /* handler: uj.Entities type=benchmark.Entities kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+
+ err = uj.Entities.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key)
+ if err != nil {
+ return err
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Favorited:
+
+ /* handler: uj.Favorited type=bool kind=bool quoted=false*/
+
+ {
+ if tok != fflib.FFTok_bool && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok))
+ }
+ }
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+ tmpb := fs.Output.Bytes()
+
+ if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 {
+
+ uj.Favorited = true
+
+ } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 {
+
+ uj.Favorited = false
+
+ } else {
+ err = errors.New("unexpected bytes for true/false value")
+ return fs.WrapErr(err)
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Geo:
+
+ /* handler: uj.Geo type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ uj.Geo = nil
+
+ } else {
+
+ var tval string
+ outBuf := fs.Output.Bytes()
+
+ tval = string(string(outBuf))
+ uj.Geo = &tval
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_ID:
+
+ /* handler: uj.ID type=int64 kind=int64 quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ uj.ID = int64(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_IDStr:
+
+ /* handler: uj.IDStr type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.IDStr = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_InReplyToScreenName:
+
+ /* handler: uj.InReplyToScreenName type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ uj.InReplyToScreenName = nil
+
+ } else {
+
+ var tval string
+ outBuf := fs.Output.Bytes()
+
+ tval = string(string(outBuf))
+ uj.InReplyToScreenName = &tval
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_InReplyToStatusID:
+
+ /* handler: uj.InReplyToStatusID type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ uj.InReplyToStatusID = nil
+
+ } else {
+
+ var tval string
+ outBuf := fs.Output.Bytes()
+
+ tval = string(string(outBuf))
+ uj.InReplyToStatusID = &tval
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_InReplyToStatusIDStr:
+
+ /* handler: uj.InReplyToStatusIDStr type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ uj.InReplyToStatusIDStr = nil
+
+ } else {
+
+ var tval string
+ outBuf := fs.Output.Bytes()
+
+ tval = string(string(outBuf))
+ uj.InReplyToStatusIDStr = &tval
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_InReplyToUserID:
+
+ /* handler: uj.InReplyToUserID type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ uj.InReplyToUserID = nil
+
+ } else {
+
+ var tval string
+ outBuf := fs.Output.Bytes()
+
+ tval = string(string(outBuf))
+ uj.InReplyToUserID = &tval
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_InReplyToUserIDStr:
+
+ /* handler: uj.InReplyToUserIDStr type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ uj.InReplyToUserIDStr = nil
+
+ } else {
+
+ var tval string
+ outBuf := fs.Output.Bytes()
+
+ tval = string(string(outBuf))
+ uj.InReplyToUserIDStr = &tval
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Metadata:
+
+ /* handler: uj.Metadata type=benchmark.StatusMetadata kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+
+ err = uj.Metadata.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key)
+ if err != nil {
+ return err
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Place:
+
+ /* handler: uj.Place type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ uj.Place = nil
+
+ } else {
+
+ var tval string
+ outBuf := fs.Output.Bytes()
+
+ tval = string(string(outBuf))
+ uj.Place = &tval
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_RetweetCount:
+
+ /* handler: uj.RetweetCount type=int kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ uj.RetweetCount = int(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Retweeted:
+
+ /* handler: uj.Retweeted type=bool kind=bool quoted=false*/
+
+ {
+ if tok != fflib.FFTok_bool && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok))
+ }
+ }
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+ tmpb := fs.Output.Bytes()
+
+ if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 {
+
+ uj.Retweeted = true
+
+ } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 {
+
+ uj.Retweeted = false
+
+ } else {
+ err = errors.New("unexpected bytes for true/false value")
+ return fs.WrapErr(err)
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Source:
+
+ /* handler: uj.Source type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.Source = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Text:
+
+ /* handler: uj.Text type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.Text = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Truncated:
+
+ /* handler: uj.Truncated type=bool kind=bool quoted=false*/
+
+ {
+ if tok != fflib.FFTok_bool && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok))
+ }
+ }
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+ tmpb := fs.Output.Bytes()
+
+ if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 {
+
+ uj.Truncated = true
+
+ } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 {
+
+ uj.Truncated = false
+
+ } else {
+ err = errors.New("unexpected bytes for true/false value")
+ return fs.WrapErr(err)
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_User:
+
+ /* handler: uj.User type=benchmark.User kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+
+ err = uj.User.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key)
+ if err != nil {
+ return err
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+ return nil
+}
+
+func (mj *StatusMetadata) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if mj == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := mj.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+func (mj *StatusMetadata) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if mj == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{"iso_language_code":`)
+ fflib.WriteJsonString(buf, string(mj.IsoLanguageCode))
+ buf.WriteString(`,"result_type":`)
+ fflib.WriteJsonString(buf, string(mj.ResultType))
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffj_t_StatusMetadatabase = iota
+ ffj_t_StatusMetadatano_such_key
+
+ ffj_t_StatusMetadata_IsoLanguageCode
+
+ ffj_t_StatusMetadata_ResultType
+)
+
+var ffj_key_StatusMetadata_IsoLanguageCode = []byte("iso_language_code")
+
+var ffj_key_StatusMetadata_ResultType = []byte("result_type")
+
+func (uj *StatusMetadata) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+func (uj *StatusMetadata) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error = nil
+ currentKey := ffj_t_StatusMetadatabase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffj_t_StatusMetadatano_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'i':
+
+ if bytes.Equal(ffj_key_StatusMetadata_IsoLanguageCode, kn) {
+ currentKey = ffj_t_StatusMetadata_IsoLanguageCode
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'r':
+
+ if bytes.Equal(ffj_key_StatusMetadata_ResultType, kn) {
+ currentKey = ffj_t_StatusMetadata_ResultType
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.EqualFoldRight(ffj_key_StatusMetadata_ResultType, kn) {
+ currentKey = ffj_t_StatusMetadata_ResultType
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_StatusMetadata_IsoLanguageCode, kn) {
+ currentKey = ffj_t_StatusMetadata_IsoLanguageCode
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffj_t_StatusMetadatano_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffj_t_StatusMetadata_IsoLanguageCode:
+ goto handle_IsoLanguageCode
+
+ case ffj_t_StatusMetadata_ResultType:
+ goto handle_ResultType
+
+ case ffj_t_StatusMetadatano_such_key:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_IsoLanguageCode:
+
+ /* handler: uj.IsoLanguageCode type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.IsoLanguageCode = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_ResultType:
+
+ /* handler: uj.ResultType type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.ResultType = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+ return nil
+}
+
+func (mj *URL) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if mj == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := mj.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+func (mj *URL) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if mj == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ if mj.ExpandedURL != nil {
+ buf.WriteString(`{"expanded_url":`)
+ fflib.WriteJsonString(buf, string(*mj.ExpandedURL))
+ } else {
+ buf.WriteString(`{"expanded_url":null`)
+ }
+ buf.WriteString(`,"indices":`)
+ if mj.Indices != nil {
+ buf.WriteString(`[`)
+ for i, v := range mj.Indices {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ fflib.FormatBits2(buf, uint64(v), 10, v < 0)
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteString(`,"url":`)
+ fflib.WriteJsonString(buf, string(mj.URL))
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffj_t_URLbase = iota
+ ffj_t_URLno_such_key
+
+ ffj_t_URL_ExpandedURL
+
+ ffj_t_URL_Indices
+
+ ffj_t_URL_URL
+)
+
+var ffj_key_URL_ExpandedURL = []byte("expanded_url")
+
+var ffj_key_URL_Indices = []byte("indices")
+
+var ffj_key_URL_URL = []byte("url")
+
+func (uj *URL) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+func (uj *URL) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error = nil
+ currentKey := ffj_t_URLbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffj_t_URLno_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'e':
+
+ if bytes.Equal(ffj_key_URL_ExpandedURL, kn) {
+ currentKey = ffj_t_URL_ExpandedURL
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'i':
+
+ if bytes.Equal(ffj_key_URL_Indices, kn) {
+ currentKey = ffj_t_URL_Indices
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'u':
+
+ if bytes.Equal(ffj_key_URL_URL, kn) {
+ currentKey = ffj_t_URL_URL
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.SimpleLetterEqualFold(ffj_key_URL_URL, kn) {
+ currentKey = ffj_t_URL_URL
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_URL_Indices, kn) {
+ currentKey = ffj_t_URL_Indices
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.AsciiEqualFold(ffj_key_URL_ExpandedURL, kn) {
+ currentKey = ffj_t_URL_ExpandedURL
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffj_t_URLno_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffj_t_URL_ExpandedURL:
+ goto handle_ExpandedURL
+
+ case ffj_t_URL_Indices:
+ goto handle_Indices
+
+ case ffj_t_URL_URL:
+ goto handle_URL
+
+ case ffj_t_URLno_such_key:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_ExpandedURL:
+
+ /* handler: uj.ExpandedURL type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ uj.ExpandedURL = nil
+
+ } else {
+
+ var tval string
+ outBuf := fs.Output.Bytes()
+
+ tval = string(string(outBuf))
+ uj.ExpandedURL = &tval
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Indices:
+
+ /* handler: uj.Indices type=[]int kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ uj.Indices = nil
+ } else {
+
+ uj.Indices = make([]int, 0)
+
+ wantVal := true
+
+ for {
+
+ var tmp_uj__Indices int
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmp_uj__Indices type=int kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ tmp_uj__Indices = int(tval)
+
+ }
+ }
+
+ uj.Indices = append(uj.Indices, tmp_uj__Indices)
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_URL:
+
+ /* handler: uj.URL type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.URL = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+ return nil
+}
+
+func (mj *User) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if mj == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := mj.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+func (mj *User) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if mj == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ if mj.ContributorsEnabled {
+ buf.WriteString(`{"contributors_enabled":true`)
+ } else {
+ buf.WriteString(`{"contributors_enabled":false`)
+ }
+ buf.WriteString(`,"created_at":`)
+ fflib.WriteJsonString(buf, string(mj.CreatedAt))
+ if mj.DefaultProfile {
+ buf.WriteString(`,"default_profile":true`)
+ } else {
+ buf.WriteString(`,"default_profile":false`)
+ }
+ if mj.DefaultProfileImage {
+ buf.WriteString(`,"default_profile_image":true`)
+ } else {
+ buf.WriteString(`,"default_profile_image":false`)
+ }
+ buf.WriteString(`,"description":`)
+ fflib.WriteJsonString(buf, string(mj.Description))
+ buf.WriteString(`,"entities":`)
+
+ {
+
+ err = mj.Entities.MarshalJSONBuf(buf)
+ if err != nil {
+ return err
+ }
+
+ }
+ buf.WriteString(`,"favourites_count":`)
+ fflib.FormatBits2(buf, uint64(mj.FavouritesCount), 10, mj.FavouritesCount < 0)
+ if mj.FollowRequestSent != nil {
+ buf.WriteString(`,"follow_request_sent":`)
+ fflib.WriteJsonString(buf, string(*mj.FollowRequestSent))
+ } else {
+ buf.WriteString(`,"follow_request_sent":null`)
+ }
+ buf.WriteString(`,"followers_count":`)
+ fflib.FormatBits2(buf, uint64(mj.FollowersCount), 10, mj.FollowersCount < 0)
+ if mj.Following != nil {
+ buf.WriteString(`,"following":`)
+ fflib.WriteJsonString(buf, string(*mj.Following))
+ } else {
+ buf.WriteString(`,"following":null`)
+ }
+ buf.WriteString(`,"friends_count":`)
+ fflib.FormatBits2(buf, uint64(mj.FriendsCount), 10, mj.FriendsCount < 0)
+ if mj.GeoEnabled {
+ buf.WriteString(`,"geo_enabled":true`)
+ } else {
+ buf.WriteString(`,"geo_enabled":false`)
+ }
+ buf.WriteString(`,"id":`)
+ fflib.FormatBits2(buf, uint64(mj.ID), 10, mj.ID < 0)
+ buf.WriteString(`,"id_str":`)
+ fflib.WriteJsonString(buf, string(mj.IDStr))
+ if mj.IsTranslator {
+ buf.WriteString(`,"is_translator":true`)
+ } else {
+ buf.WriteString(`,"is_translator":false`)
+ }
+ buf.WriteString(`,"lang":`)
+ fflib.WriteJsonString(buf, string(mj.Lang))
+ buf.WriteString(`,"listed_count":`)
+ fflib.FormatBits2(buf, uint64(mj.ListedCount), 10, mj.ListedCount < 0)
+ buf.WriteString(`,"location":`)
+ fflib.WriteJsonString(buf, string(mj.Location))
+ buf.WriteString(`,"name":`)
+ fflib.WriteJsonString(buf, string(mj.Name))
+ if mj.Notifications != nil {
+ buf.WriteString(`,"notifications":`)
+ fflib.WriteJsonString(buf, string(*mj.Notifications))
+ } else {
+ buf.WriteString(`,"notifications":null`)
+ }
+ buf.WriteString(`,"profile_background_color":`)
+ fflib.WriteJsonString(buf, string(mj.ProfileBackgroundColor))
+ buf.WriteString(`,"profile_background_image_url":`)
+ fflib.WriteJsonString(buf, string(mj.ProfileBackgroundImageURL))
+ buf.WriteString(`,"profile_background_image_url_https":`)
+ fflib.WriteJsonString(buf, string(mj.ProfileBackgroundImageURLHTTPS))
+ if mj.ProfileBackgroundTile {
+ buf.WriteString(`,"profile_background_tile":true`)
+ } else {
+ buf.WriteString(`,"profile_background_tile":false`)
+ }
+ buf.WriteString(`,"profile_image_url":`)
+ fflib.WriteJsonString(buf, string(mj.ProfileImageURL))
+ buf.WriteString(`,"profile_image_url_https":`)
+ fflib.WriteJsonString(buf, string(mj.ProfileImageURLHTTPS))
+ buf.WriteString(`,"profile_link_color":`)
+ fflib.WriteJsonString(buf, string(mj.ProfileLinkColor))
+ buf.WriteString(`,"profile_sidebar_border_color":`)
+ fflib.WriteJsonString(buf, string(mj.ProfileSidebarBorderColor))
+ buf.WriteString(`,"profile_sidebar_fill_color":`)
+ fflib.WriteJsonString(buf, string(mj.ProfileSidebarFillColor))
+ buf.WriteString(`,"profile_text_color":`)
+ fflib.WriteJsonString(buf, string(mj.ProfileTextColor))
+ if mj.ProfileUseBackgroundImage {
+ buf.WriteString(`,"profile_use_background_image":true`)
+ } else {
+ buf.WriteString(`,"profile_use_background_image":false`)
+ }
+ if mj.Protected {
+ buf.WriteString(`,"protected":true`)
+ } else {
+ buf.WriteString(`,"protected":false`)
+ }
+ buf.WriteString(`,"screen_name":`)
+ fflib.WriteJsonString(buf, string(mj.ScreenName))
+ if mj.ShowAllInlineMedia {
+ buf.WriteString(`,"show_all_inline_media":true`)
+ } else {
+ buf.WriteString(`,"show_all_inline_media":false`)
+ }
+ buf.WriteString(`,"statuses_count":`)
+ fflib.FormatBits2(buf, uint64(mj.StatusesCount), 10, mj.StatusesCount < 0)
+ buf.WriteString(`,"time_zone":`)
+ fflib.WriteJsonString(buf, string(mj.TimeZone))
+ if mj.URL != nil {
+ buf.WriteString(`,"url":`)
+ fflib.WriteJsonString(buf, string(*mj.URL))
+ } else {
+ buf.WriteString(`,"url":null`)
+ }
+ buf.WriteString(`,"utc_offset":`)
+ fflib.FormatBits2(buf, uint64(mj.UtcOffset), 10, mj.UtcOffset < 0)
+ if mj.Verified {
+ buf.WriteString(`,"verified":true`)
+ } else {
+ buf.WriteString(`,"verified":false`)
+ }
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffj_t_Userbase = iota
+ ffj_t_Userno_such_key
+
+ ffj_t_User_ContributorsEnabled
+
+ ffj_t_User_CreatedAt
+
+ ffj_t_User_DefaultProfile
+
+ ffj_t_User_DefaultProfileImage
+
+ ffj_t_User_Description
+
+ ffj_t_User_Entities
+
+ ffj_t_User_FavouritesCount
+
+ ffj_t_User_FollowRequestSent
+
+ ffj_t_User_FollowersCount
+
+ ffj_t_User_Following
+
+ ffj_t_User_FriendsCount
+
+ ffj_t_User_GeoEnabled
+
+ ffj_t_User_ID
+
+ ffj_t_User_IDStr
+
+ ffj_t_User_IsTranslator
+
+ ffj_t_User_Lang
+
+ ffj_t_User_ListedCount
+
+ ffj_t_User_Location
+
+ ffj_t_User_Name
+
+ ffj_t_User_Notifications
+
+ ffj_t_User_ProfileBackgroundColor
+
+ ffj_t_User_ProfileBackgroundImageURL
+
+ ffj_t_User_ProfileBackgroundImageURLHTTPS
+
+ ffj_t_User_ProfileBackgroundTile
+
+ ffj_t_User_ProfileImageURL
+
+ ffj_t_User_ProfileImageURLHTTPS
+
+ ffj_t_User_ProfileLinkColor
+
+ ffj_t_User_ProfileSidebarBorderColor
+
+ ffj_t_User_ProfileSidebarFillColor
+
+ ffj_t_User_ProfileTextColor
+
+ ffj_t_User_ProfileUseBackgroundImage
+
+ ffj_t_User_Protected
+
+ ffj_t_User_ScreenName
+
+ ffj_t_User_ShowAllInlineMedia
+
+ ffj_t_User_StatusesCount
+
+ ffj_t_User_TimeZone
+
+ ffj_t_User_URL
+
+ ffj_t_User_UtcOffset
+
+ ffj_t_User_Verified
+)
+
+var ffj_key_User_ContributorsEnabled = []byte("contributors_enabled")
+
+var ffj_key_User_CreatedAt = []byte("created_at")
+
+var ffj_key_User_DefaultProfile = []byte("default_profile")
+
+var ffj_key_User_DefaultProfileImage = []byte("default_profile_image")
+
+var ffj_key_User_Description = []byte("description")
+
+var ffj_key_User_Entities = []byte("entities")
+
+var ffj_key_User_FavouritesCount = []byte("favourites_count")
+
+var ffj_key_User_FollowRequestSent = []byte("follow_request_sent")
+
+var ffj_key_User_FollowersCount = []byte("followers_count")
+
+var ffj_key_User_Following = []byte("following")
+
+var ffj_key_User_FriendsCount = []byte("friends_count")
+
+var ffj_key_User_GeoEnabled = []byte("geo_enabled")
+
+var ffj_key_User_ID = []byte("id")
+
+var ffj_key_User_IDStr = []byte("id_str")
+
+var ffj_key_User_IsTranslator = []byte("is_translator")
+
+var ffj_key_User_Lang = []byte("lang")
+
+var ffj_key_User_ListedCount = []byte("listed_count")
+
+var ffj_key_User_Location = []byte("location")
+
+var ffj_key_User_Name = []byte("name")
+
+var ffj_key_User_Notifications = []byte("notifications")
+
+var ffj_key_User_ProfileBackgroundColor = []byte("profile_background_color")
+
+var ffj_key_User_ProfileBackgroundImageURL = []byte("profile_background_image_url")
+
+var ffj_key_User_ProfileBackgroundImageURLHTTPS = []byte("profile_background_image_url_https")
+
+var ffj_key_User_ProfileBackgroundTile = []byte("profile_background_tile")
+
+var ffj_key_User_ProfileImageURL = []byte("profile_image_url")
+
+var ffj_key_User_ProfileImageURLHTTPS = []byte("profile_image_url_https")
+
+var ffj_key_User_ProfileLinkColor = []byte("profile_link_color")
+
+var ffj_key_User_ProfileSidebarBorderColor = []byte("profile_sidebar_border_color")
+
+var ffj_key_User_ProfileSidebarFillColor = []byte("profile_sidebar_fill_color")
+
+var ffj_key_User_ProfileTextColor = []byte("profile_text_color")
+
+var ffj_key_User_ProfileUseBackgroundImage = []byte("profile_use_background_image")
+
+var ffj_key_User_Protected = []byte("protected")
+
+var ffj_key_User_ScreenName = []byte("screen_name")
+
+var ffj_key_User_ShowAllInlineMedia = []byte("show_all_inline_media")
+
+var ffj_key_User_StatusesCount = []byte("statuses_count")
+
+var ffj_key_User_TimeZone = []byte("time_zone")
+
+var ffj_key_User_URL = []byte("url")
+
+var ffj_key_User_UtcOffset = []byte("utc_offset")
+
+var ffj_key_User_Verified = []byte("verified")
+
+func (uj *User) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+func (uj *User) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error = nil
+ currentKey := ffj_t_Userbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffj_t_Userno_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'c':
+
+ if bytes.Equal(ffj_key_User_ContributorsEnabled, kn) {
+ currentKey = ffj_t_User_ContributorsEnabled
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_CreatedAt, kn) {
+ currentKey = ffj_t_User_CreatedAt
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'd':
+
+ if bytes.Equal(ffj_key_User_DefaultProfile, kn) {
+ currentKey = ffj_t_User_DefaultProfile
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_DefaultProfileImage, kn) {
+ currentKey = ffj_t_User_DefaultProfileImage
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_Description, kn) {
+ currentKey = ffj_t_User_Description
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'e':
+
+ if bytes.Equal(ffj_key_User_Entities, kn) {
+ currentKey = ffj_t_User_Entities
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'f':
+
+ if bytes.Equal(ffj_key_User_FavouritesCount, kn) {
+ currentKey = ffj_t_User_FavouritesCount
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_FollowRequestSent, kn) {
+ currentKey = ffj_t_User_FollowRequestSent
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_FollowersCount, kn) {
+ currentKey = ffj_t_User_FollowersCount
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_Following, kn) {
+ currentKey = ffj_t_User_Following
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_FriendsCount, kn) {
+ currentKey = ffj_t_User_FriendsCount
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'g':
+
+ if bytes.Equal(ffj_key_User_GeoEnabled, kn) {
+ currentKey = ffj_t_User_GeoEnabled
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'i':
+
+ if bytes.Equal(ffj_key_User_ID, kn) {
+ currentKey = ffj_t_User_ID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_IDStr, kn) {
+ currentKey = ffj_t_User_IDStr
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_IsTranslator, kn) {
+ currentKey = ffj_t_User_IsTranslator
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'l':
+
+ if bytes.Equal(ffj_key_User_Lang, kn) {
+ currentKey = ffj_t_User_Lang
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_ListedCount, kn) {
+ currentKey = ffj_t_User_ListedCount
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_Location, kn) {
+ currentKey = ffj_t_User_Location
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'n':
+
+ if bytes.Equal(ffj_key_User_Name, kn) {
+ currentKey = ffj_t_User_Name
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_Notifications, kn) {
+ currentKey = ffj_t_User_Notifications
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'p':
+
+ if bytes.Equal(ffj_key_User_ProfileBackgroundColor, kn) {
+ currentKey = ffj_t_User_ProfileBackgroundColor
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_ProfileBackgroundImageURL, kn) {
+ currentKey = ffj_t_User_ProfileBackgroundImageURL
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_ProfileBackgroundImageURLHTTPS, kn) {
+ currentKey = ffj_t_User_ProfileBackgroundImageURLHTTPS
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_ProfileBackgroundTile, kn) {
+ currentKey = ffj_t_User_ProfileBackgroundTile
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_ProfileImageURL, kn) {
+ currentKey = ffj_t_User_ProfileImageURL
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_ProfileImageURLHTTPS, kn) {
+ currentKey = ffj_t_User_ProfileImageURLHTTPS
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_ProfileLinkColor, kn) {
+ currentKey = ffj_t_User_ProfileLinkColor
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_ProfileSidebarBorderColor, kn) {
+ currentKey = ffj_t_User_ProfileSidebarBorderColor
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_ProfileSidebarFillColor, kn) {
+ currentKey = ffj_t_User_ProfileSidebarFillColor
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_ProfileTextColor, kn) {
+ currentKey = ffj_t_User_ProfileTextColor
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_ProfileUseBackgroundImage, kn) {
+ currentKey = ffj_t_User_ProfileUseBackgroundImage
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_Protected, kn) {
+ currentKey = ffj_t_User_Protected
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 's':
+
+ if bytes.Equal(ffj_key_User_ScreenName, kn) {
+ currentKey = ffj_t_User_ScreenName
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_ShowAllInlineMedia, kn) {
+ currentKey = ffj_t_User_ShowAllInlineMedia
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_StatusesCount, kn) {
+ currentKey = ffj_t_User_StatusesCount
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 't':
+
+ if bytes.Equal(ffj_key_User_TimeZone, kn) {
+ currentKey = ffj_t_User_TimeZone
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'u':
+
+ if bytes.Equal(ffj_key_User_URL, kn) {
+ currentKey = ffj_t_User_URL
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffj_key_User_UtcOffset, kn) {
+ currentKey = ffj_t_User_UtcOffset
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'v':
+
+ if bytes.Equal(ffj_key_User_Verified, kn) {
+ currentKey = ffj_t_User_Verified
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.SimpleLetterEqualFold(ffj_key_User_Verified, kn) {
+ currentKey = ffj_t_User_Verified
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_UtcOffset, kn) {
+ currentKey = ffj_t_User_UtcOffset
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffj_key_User_URL, kn) {
+ currentKey = ffj_t_User_URL
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.AsciiEqualFold(ffj_key_User_TimeZone, kn) {
+ currentKey = ffj_t_User_TimeZone
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_StatusesCount, kn) {
+ currentKey = ffj_t_User_StatusesCount
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_ShowAllInlineMedia, kn) {
+ currentKey = ffj_t_User_ShowAllInlineMedia
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_ScreenName, kn) {
+ currentKey = ffj_t_User_ScreenName
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffj_key_User_Protected, kn) {
+ currentKey = ffj_t_User_Protected
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_ProfileUseBackgroundImage, kn) {
+ currentKey = ffj_t_User_ProfileUseBackgroundImage
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.AsciiEqualFold(ffj_key_User_ProfileTextColor, kn) {
+ currentKey = ffj_t_User_ProfileTextColor
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_ProfileSidebarFillColor, kn) {
+ currentKey = ffj_t_User_ProfileSidebarFillColor
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_ProfileSidebarBorderColor, kn) {
+ currentKey = ffj_t_User_ProfileSidebarBorderColor
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_ProfileLinkColor, kn) {
+ currentKey = ffj_t_User_ProfileLinkColor
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_ProfileImageURLHTTPS, kn) {
+ currentKey = ffj_t_User_ProfileImageURLHTTPS
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.AsciiEqualFold(ffj_key_User_ProfileImageURL, kn) {
+ currentKey = ffj_t_User_ProfileImageURL
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_ProfileBackgroundTile, kn) {
+ currentKey = ffj_t_User_ProfileBackgroundTile
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_ProfileBackgroundImageURLHTTPS, kn) {
+ currentKey = ffj_t_User_ProfileBackgroundImageURLHTTPS
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_ProfileBackgroundImageURL, kn) {
+ currentKey = ffj_t_User_ProfileBackgroundImageURL
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_ProfileBackgroundColor, kn) {
+ currentKey = ffj_t_User_ProfileBackgroundColor
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_Notifications, kn) {
+ currentKey = ffj_t_User_Notifications
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffj_key_User_Name, kn) {
+ currentKey = ffj_t_User_Name
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffj_key_User_Location, kn) {
+ currentKey = ffj_t_User_Location
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_ListedCount, kn) {
+ currentKey = ffj_t_User_ListedCount
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffj_key_User_Lang, kn) {
+ currentKey = ffj_t_User_Lang
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_IsTranslator, kn) {
+ currentKey = ffj_t_User_IsTranslator
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_IDStr, kn) {
+ currentKey = ffj_t_User_IDStr
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffj_key_User_ID, kn) {
+ currentKey = ffj_t_User_ID
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.AsciiEqualFold(ffj_key_User_GeoEnabled, kn) {
+ currentKey = ffj_t_User_GeoEnabled
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_FriendsCount, kn) {
+ currentKey = ffj_t_User_FriendsCount
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.SimpleLetterEqualFold(ffj_key_User_Following, kn) {
+ currentKey = ffj_t_User_Following
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_FollowersCount, kn) {
+ currentKey = ffj_t_User_FollowersCount
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_FollowRequestSent, kn) {
+ currentKey = ffj_t_User_FollowRequestSent
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_FavouritesCount, kn) {
+ currentKey = ffj_t_User_FavouritesCount
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_Entities, kn) {
+ currentKey = ffj_t_User_Entities
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_Description, kn) {
+ currentKey = ffj_t_User_Description
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.AsciiEqualFold(ffj_key_User_DefaultProfileImage, kn) {
+ currentKey = ffj_t_User_DefaultProfileImage
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.AsciiEqualFold(ffj_key_User_DefaultProfile, kn) {
+ currentKey = ffj_t_User_DefaultProfile
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.AsciiEqualFold(ffj_key_User_CreatedAt, kn) {
+ currentKey = ffj_t_User_CreatedAt
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_User_ContributorsEnabled, kn) {
+ currentKey = ffj_t_User_ContributorsEnabled
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffj_t_Userno_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffj_t_User_ContributorsEnabled:
+ goto handle_ContributorsEnabled
+
+ case ffj_t_User_CreatedAt:
+ goto handle_CreatedAt
+
+ case ffj_t_User_DefaultProfile:
+ goto handle_DefaultProfile
+
+ case ffj_t_User_DefaultProfileImage:
+ goto handle_DefaultProfileImage
+
+ case ffj_t_User_Description:
+ goto handle_Description
+
+ case ffj_t_User_Entities:
+ goto handle_Entities
+
+ case ffj_t_User_FavouritesCount:
+ goto handle_FavouritesCount
+
+ case ffj_t_User_FollowRequestSent:
+ goto handle_FollowRequestSent
+
+ case ffj_t_User_FollowersCount:
+ goto handle_FollowersCount
+
+ case ffj_t_User_Following:
+ goto handle_Following
+
+ case ffj_t_User_FriendsCount:
+ goto handle_FriendsCount
+
+ case ffj_t_User_GeoEnabled:
+ goto handle_GeoEnabled
+
+ case ffj_t_User_ID:
+ goto handle_ID
+
+ case ffj_t_User_IDStr:
+ goto handle_IDStr
+
+ case ffj_t_User_IsTranslator:
+ goto handle_IsTranslator
+
+ case ffj_t_User_Lang:
+ goto handle_Lang
+
+ case ffj_t_User_ListedCount:
+ goto handle_ListedCount
+
+ case ffj_t_User_Location:
+ goto handle_Location
+
+ case ffj_t_User_Name:
+ goto handle_Name
+
+ case ffj_t_User_Notifications:
+ goto handle_Notifications
+
+ case ffj_t_User_ProfileBackgroundColor:
+ goto handle_ProfileBackgroundColor
+
+ case ffj_t_User_ProfileBackgroundImageURL:
+ goto handle_ProfileBackgroundImageURL
+
+ case ffj_t_User_ProfileBackgroundImageURLHTTPS:
+ goto handle_ProfileBackgroundImageURLHTTPS
+
+ case ffj_t_User_ProfileBackgroundTile:
+ goto handle_ProfileBackgroundTile
+
+ case ffj_t_User_ProfileImageURL:
+ goto handle_ProfileImageURL
+
+ case ffj_t_User_ProfileImageURLHTTPS:
+ goto handle_ProfileImageURLHTTPS
+
+ case ffj_t_User_ProfileLinkColor:
+ goto handle_ProfileLinkColor
+
+ case ffj_t_User_ProfileSidebarBorderColor:
+ goto handle_ProfileSidebarBorderColor
+
+ case ffj_t_User_ProfileSidebarFillColor:
+ goto handle_ProfileSidebarFillColor
+
+ case ffj_t_User_ProfileTextColor:
+ goto handle_ProfileTextColor
+
+ case ffj_t_User_ProfileUseBackgroundImage:
+ goto handle_ProfileUseBackgroundImage
+
+ case ffj_t_User_Protected:
+ goto handle_Protected
+
+ case ffj_t_User_ScreenName:
+ goto handle_ScreenName
+
+ case ffj_t_User_ShowAllInlineMedia:
+ goto handle_ShowAllInlineMedia
+
+ case ffj_t_User_StatusesCount:
+ goto handle_StatusesCount
+
+ case ffj_t_User_TimeZone:
+ goto handle_TimeZone
+
+ case ffj_t_User_URL:
+ goto handle_URL
+
+ case ffj_t_User_UtcOffset:
+ goto handle_UtcOffset
+
+ case ffj_t_User_Verified:
+ goto handle_Verified
+
+ case ffj_t_Userno_such_key:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_ContributorsEnabled:
+
+ /* handler: uj.ContributorsEnabled type=bool kind=bool quoted=false*/
+
+ {
+ if tok != fflib.FFTok_bool && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok))
+ }
+ }
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+ tmpb := fs.Output.Bytes()
+
+ if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 {
+
+ uj.ContributorsEnabled = true
+
+ } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 {
+
+ uj.ContributorsEnabled = false
+
+ } else {
+ err = errors.New("unexpected bytes for true/false value")
+ return fs.WrapErr(err)
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_CreatedAt:
+
+ /* handler: uj.CreatedAt type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.CreatedAt = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_DefaultProfile:
+
+ /* handler: uj.DefaultProfile type=bool kind=bool quoted=false*/
+
+ {
+ if tok != fflib.FFTok_bool && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok))
+ }
+ }
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+ tmpb := fs.Output.Bytes()
+
+ if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 {
+
+ uj.DefaultProfile = true
+
+ } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 {
+
+ uj.DefaultProfile = false
+
+ } else {
+ err = errors.New("unexpected bytes for true/false value")
+ return fs.WrapErr(err)
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_DefaultProfileImage:
+
+ /* handler: uj.DefaultProfileImage type=bool kind=bool quoted=false*/
+
+ {
+ if tok != fflib.FFTok_bool && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok))
+ }
+ }
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+ tmpb := fs.Output.Bytes()
+
+ if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 {
+
+ uj.DefaultProfileImage = true
+
+ } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 {
+
+ uj.DefaultProfileImage = false
+
+ } else {
+ err = errors.New("unexpected bytes for true/false value")
+ return fs.WrapErr(err)
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Description:
+
+ /* handler: uj.Description type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.Description = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Entities:
+
+ /* handler: uj.Entities type=benchmark.UserEntities kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+
+ err = uj.Entities.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key)
+ if err != nil {
+ return err
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_FavouritesCount:
+
+ /* handler: uj.FavouritesCount type=int kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ uj.FavouritesCount = int(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_FollowRequestSent:
+
+ /* handler: uj.FollowRequestSent type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ uj.FollowRequestSent = nil
+
+ } else {
+
+ var tval string
+ outBuf := fs.Output.Bytes()
+
+ tval = string(string(outBuf))
+ uj.FollowRequestSent = &tval
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_FollowersCount:
+
+ /* handler: uj.FollowersCount type=int kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ uj.FollowersCount = int(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Following:
+
+ /* handler: uj.Following type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ uj.Following = nil
+
+ } else {
+
+ var tval string
+ outBuf := fs.Output.Bytes()
+
+ tval = string(string(outBuf))
+ uj.Following = &tval
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_FriendsCount:
+
+ /* handler: uj.FriendsCount type=int kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ uj.FriendsCount = int(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_GeoEnabled:
+
+ /* handler: uj.GeoEnabled type=bool kind=bool quoted=false*/
+
+ {
+ if tok != fflib.FFTok_bool && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok))
+ }
+ }
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+ tmpb := fs.Output.Bytes()
+
+ if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 {
+
+ uj.GeoEnabled = true
+
+ } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 {
+
+ uj.GeoEnabled = false
+
+ } else {
+ err = errors.New("unexpected bytes for true/false value")
+ return fs.WrapErr(err)
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_ID:
+
+ /* handler: uj.ID type=int kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ uj.ID = int(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_IDStr:
+
+ /* handler: uj.IDStr type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.IDStr = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_IsTranslator:
+
+ /* handler: uj.IsTranslator type=bool kind=bool quoted=false*/
+
+ {
+ if tok != fflib.FFTok_bool && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok))
+ }
+ }
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+ tmpb := fs.Output.Bytes()
+
+ if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 {
+
+ uj.IsTranslator = true
+
+ } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 {
+
+ uj.IsTranslator = false
+
+ } else {
+ err = errors.New("unexpected bytes for true/false value")
+ return fs.WrapErr(err)
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Lang:
+
+ /* handler: uj.Lang type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.Lang = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_ListedCount:
+
+ /* handler: uj.ListedCount type=int kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ uj.ListedCount = int(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Location:
+
+ /* handler: uj.Location type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.Location = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Name:
+
+ /* handler: uj.Name type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.Name = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Notifications:
+
+ /* handler: uj.Notifications type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ uj.Notifications = nil
+
+ } else {
+
+ var tval string
+ outBuf := fs.Output.Bytes()
+
+ tval = string(string(outBuf))
+ uj.Notifications = &tval
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_ProfileBackgroundColor:
+
+ /* handler: uj.ProfileBackgroundColor type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.ProfileBackgroundColor = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_ProfileBackgroundImageURL:
+
+ /* handler: uj.ProfileBackgroundImageURL type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.ProfileBackgroundImageURL = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_ProfileBackgroundImageURLHTTPS:
+
+ /* handler: uj.ProfileBackgroundImageURLHTTPS type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.ProfileBackgroundImageURLHTTPS = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_ProfileBackgroundTile:
+
+ /* handler: uj.ProfileBackgroundTile type=bool kind=bool quoted=false*/
+
+ {
+ if tok != fflib.FFTok_bool && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok))
+ }
+ }
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+ tmpb := fs.Output.Bytes()
+
+ if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 {
+
+ uj.ProfileBackgroundTile = true
+
+ } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 {
+
+ uj.ProfileBackgroundTile = false
+
+ } else {
+ err = errors.New("unexpected bytes for true/false value")
+ return fs.WrapErr(err)
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_ProfileImageURL:
+
+ /* handler: uj.ProfileImageURL type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.ProfileImageURL = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_ProfileImageURLHTTPS:
+
+ /* handler: uj.ProfileImageURLHTTPS type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.ProfileImageURLHTTPS = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_ProfileLinkColor:
+
+ /* handler: uj.ProfileLinkColor type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.ProfileLinkColor = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_ProfileSidebarBorderColor:
+
+ /* handler: uj.ProfileSidebarBorderColor type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.ProfileSidebarBorderColor = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_ProfileSidebarFillColor:
+
+ /* handler: uj.ProfileSidebarFillColor type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.ProfileSidebarFillColor = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_ProfileTextColor:
+
+ /* handler: uj.ProfileTextColor type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.ProfileTextColor = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_ProfileUseBackgroundImage:
+
+ /* handler: uj.ProfileUseBackgroundImage type=bool kind=bool quoted=false*/
+
+ {
+ if tok != fflib.FFTok_bool && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok))
+ }
+ }
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+ tmpb := fs.Output.Bytes()
+
+ if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 {
+
+ uj.ProfileUseBackgroundImage = true
+
+ } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 {
+
+ uj.ProfileUseBackgroundImage = false
+
+ } else {
+ err = errors.New("unexpected bytes for true/false value")
+ return fs.WrapErr(err)
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Protected:
+
+ /* handler: uj.Protected type=bool kind=bool quoted=false*/
+
+ {
+ if tok != fflib.FFTok_bool && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok))
+ }
+ }
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+ tmpb := fs.Output.Bytes()
+
+ if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 {
+
+ uj.Protected = true
+
+ } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 {
+
+ uj.Protected = false
+
+ } else {
+ err = errors.New("unexpected bytes for true/false value")
+ return fs.WrapErr(err)
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_ScreenName:
+
+ /* handler: uj.ScreenName type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.ScreenName = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_ShowAllInlineMedia:
+
+ /* handler: uj.ShowAllInlineMedia type=bool kind=bool quoted=false*/
+
+ {
+ if tok != fflib.FFTok_bool && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok))
+ }
+ }
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+ tmpb := fs.Output.Bytes()
+
+ if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 {
+
+ uj.ShowAllInlineMedia = true
+
+ } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 {
+
+ uj.ShowAllInlineMedia = false
+
+ } else {
+ err = errors.New("unexpected bytes for true/false value")
+ return fs.WrapErr(err)
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_StatusesCount:
+
+ /* handler: uj.StatusesCount type=int kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ uj.StatusesCount = int(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_TimeZone:
+
+ /* handler: uj.TimeZone type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ uj.TimeZone = string(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_URL:
+
+ /* handler: uj.URL type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ uj.URL = nil
+
+ } else {
+
+ var tval string
+ outBuf := fs.Output.Bytes()
+
+ tval = string(string(outBuf))
+ uj.URL = &tval
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_UtcOffset:
+
+ /* handler: uj.UtcOffset type=int kind=int quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ uj.UtcOffset = int(tval)
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_Verified:
+
+ /* handler: uj.Verified type=bool kind=bool quoted=false*/
+
+ {
+ if tok != fflib.FFTok_bool && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok))
+ }
+ }
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+ tmpb := fs.Output.Bytes()
+
+ if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 {
+
+ uj.Verified = true
+
+ } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 {
+
+ uj.Verified = false
+
+ } else {
+ err = errors.New("unexpected bytes for true/false value")
+ return fs.WrapErr(err)
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+ return nil
+}
+
+func (mj *UserEntities) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if mj == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := mj.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+func (mj *UserEntities) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if mj == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{"description":`)
+
+ {
+
+ err = mj.Description.MarshalJSONBuf(buf)
+ if err != nil {
+ return err
+ }
+
+ }
+ buf.WriteString(`,"url":`)
+
+ {
+
+ err = mj.URL.MarshalJSONBuf(buf)
+ if err != nil {
+ return err
+ }
+
+ }
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffj_t_UserEntitiesbase = iota
+ ffj_t_UserEntitiesno_such_key
+
+ ffj_t_UserEntities_Description
+
+ ffj_t_UserEntities_URL
+)
+
+var ffj_key_UserEntities_Description = []byte("description")
+
+var ffj_key_UserEntities_URL = []byte("url")
+
+func (uj *UserEntities) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+func (uj *UserEntities) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error = nil
+ currentKey := ffj_t_UserEntitiesbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffj_t_UserEntitiesno_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'd':
+
+ if bytes.Equal(ffj_key_UserEntities_Description, kn) {
+ currentKey = ffj_t_UserEntities_Description
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case 'u':
+
+ if bytes.Equal(ffj_key_UserEntities_URL, kn) {
+ currentKey = ffj_t_UserEntities_URL
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.SimpleLetterEqualFold(ffj_key_UserEntities_URL, kn) {
+ currentKey = ffj_t_UserEntities_URL
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ if fflib.EqualFoldRight(ffj_key_UserEntities_Description, kn) {
+ currentKey = ffj_t_UserEntities_Description
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffj_t_UserEntitiesno_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffj_t_UserEntities_Description:
+ goto handle_Description
+
+ case ffj_t_UserEntities_URL:
+ goto handle_URL
+
+ case ffj_t_UserEntitiesno_such_key:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_Description:
+
+ /* handler: uj.Description type=benchmark.UserEntityDescription kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+
+ err = uj.Description.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key)
+ if err != nil {
+ return err
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+handle_URL:
+
+ /* handler: uj.URL type=benchmark.UserEntityURL kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+
+ err = uj.URL.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key)
+ if err != nil {
+ return err
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+ return nil
+}
+
+func (mj *UserEntityDescription) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if mj == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := mj.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+func (mj *UserEntityDescription) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if mj == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{"urls":`)
+ if mj.Urls != nil {
+ buf.WriteString(`[`)
+ for i, v := range mj.Urls {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ if v != nil {
+ fflib.WriteJsonString(buf, string(*v))
+ } else {
+ buf.WriteString(`null`)
+ }
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffj_t_UserEntityDescriptionbase = iota
+ ffj_t_UserEntityDescriptionno_such_key
+
+ ffj_t_UserEntityDescription_Urls
+)
+
+var ffj_key_UserEntityDescription_Urls = []byte("urls")
+
+func (uj *UserEntityDescription) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+func (uj *UserEntityDescription) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error = nil
+ currentKey := ffj_t_UserEntityDescriptionbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffj_t_UserEntityDescriptionno_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'u':
+
+ if bytes.Equal(ffj_key_UserEntityDescription_Urls, kn) {
+ currentKey = ffj_t_UserEntityDescription_Urls
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.EqualFoldRight(ffj_key_UserEntityDescription_Urls, kn) {
+ currentKey = ffj_t_UserEntityDescription_Urls
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffj_t_UserEntityDescriptionno_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffj_t_UserEntityDescription_Urls:
+ goto handle_Urls
+
+ case ffj_t_UserEntityDescriptionno_such_key:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_Urls:
+
+ /* handler: uj.Urls type=[]*string kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ uj.Urls = nil
+ } else {
+
+ uj.Urls = make([]*string, 0)
+
+ wantVal := true
+
+ for {
+
+ var tmp_uj__Urls *string
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmp_uj__Urls type=*string kind=ptr quoted=false*/
+
+ {
+
+ if tok == fflib.FFTok_null {
+ tmp_uj__Urls = nil
+ } else {
+ if tmp_uj__Urls == nil {
+ tmp_uj__Urls = new(string)
+ }
+
+ /* handler: tmp_uj__Urls type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ tmp_uj__Urls = nil
+
+ } else {
+
+ var tval string
+ outBuf := fs.Output.Bytes()
+
+ tval = string(string(outBuf))
+ tmp_uj__Urls = &tval
+
+ }
+ }
+
+ }
+ }
+
+ uj.Urls = append(uj.Urls, tmp_uj__Urls)
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+ return nil
+}
+
+func (mj *UserEntityURL) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if mj == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := mj.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+func (mj *UserEntityURL) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if mj == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{"urls":`)
+ if mj.Urls != nil {
+ buf.WriteString(`[`)
+ for i, v := range mj.Urls {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+
+ {
+
+ err = v.MarshalJSONBuf(buf)
+ if err != nil {
+ return err
+ }
+
+ }
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffj_t_UserEntityURLbase = iota
+ ffj_t_UserEntityURLno_such_key
+
+ ffj_t_UserEntityURL_Urls
+)
+
+var ffj_key_UserEntityURL_Urls = []byte("urls")
+
+func (uj *UserEntityURL) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+func (uj *UserEntityURL) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error = nil
+ currentKey := ffj_t_UserEntityURLbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffj_t_UserEntityURLno_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'u':
+
+ if bytes.Equal(ffj_key_UserEntityURL_Urls, kn) {
+ currentKey = ffj_t_UserEntityURL_Urls
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.EqualFoldRight(ffj_key_UserEntityURL_Urls, kn) {
+ currentKey = ffj_t_UserEntityURL_Urls
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffj_t_UserEntityURLno_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffj_t_UserEntityURL_Urls:
+ goto handle_Urls
+
+ case ffj_t_UserEntityURLno_such_key:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_Urls:
+
+ /* handler: uj.Urls type=[]benchmark.URL kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ uj.Urls = nil
+ } else {
+
+ uj.Urls = make([]URL, 0)
+
+ wantVal := true
+
+ for {
+
+ var tmp_uj__Urls URL
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmp_uj__Urls type=benchmark.URL kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+
+ err = tmp_uj__Urls.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key)
+ if err != nil {
+ return err
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ uj.Urls = append(uj.Urls, tmp_uj__Urls)
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+ return nil
+}
+
+func (mj *XLStruct) MarshalJSON() ([]byte, error) {
+ var buf fflib.Buffer
+ if mj == nil {
+ buf.WriteString("null")
+ return buf.Bytes(), nil
+ }
+ err := mj.MarshalJSONBuf(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+func (mj *XLStruct) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
+ if mj == nil {
+ buf.WriteString("null")
+ return nil
+ }
+ var err error
+ var obj []byte
+ _ = obj
+ _ = err
+ buf.WriteString(`{"Data":`)
+ if mj.Data != nil {
+ buf.WriteString(`[`)
+ for i, v := range mj.Data {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+
+ {
+
+ err = v.MarshalJSONBuf(buf)
+ if err != nil {
+ return err
+ }
+
+ }
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte('}')
+ return nil
+}
+
+const (
+ ffj_t_XLStructbase = iota
+ ffj_t_XLStructno_such_key
+
+ ffj_t_XLStruct_Data
+)
+
+var ffj_key_XLStruct_Data = []byte("Data")
+
+func (uj *XLStruct) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+}
+
+func (uj *XLStruct) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error = nil
+ currentKey := ffj_t_XLStructbase
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffj_t_XLStructno_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+
+ case 'D':
+
+ if bytes.Equal(ffj_key_XLStruct_Data, kn) {
+ currentKey = ffj_t_XLStruct_Data
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ }
+
+ if fflib.SimpleLetterEqualFold(ffj_key_XLStruct_Data, kn) {
+ currentKey = ffj_t_XLStruct_Data
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ currentKey = ffj_t_XLStructno_such_key
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null {
+ switch currentKey {
+
+ case ffj_t_XLStruct_Data:
+ goto handle_Data
+
+ case ffj_t_XLStructno_such_key:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+
+handle_Data:
+
+ /* handler: uj.Data type=[]benchmark.LargeStruct kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ uj.Data = nil
+ } else {
+
+ uj.Data = make([]LargeStruct, 0)
+
+ wantVal := true
+
+ for {
+
+ var tmp_uj__Data LargeStruct
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmp_uj__Data type=benchmark.LargeStruct kind=struct quoted=false*/
+
+ {
+ if tok == fflib.FFTok_null {
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+
+ err = tmp_uj__Data.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key)
+ if err != nil {
+ return err
+ }
+ state = fflib.FFParse_after_value
+ }
+
+ uj.Data = append(uj.Data, tmp_uj__Data)
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
+wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+done:
+ return nil
+}
diff --git a/vendor/github.com/mailru/easyjson/benchmark/data_var.go b/vendor/github.com/mailru/easyjson/benchmark/data_var.go
new file mode 100644
index 000000000..ea4202dbe
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/benchmark/data_var.go
@@ -0,0 +1,350 @@
+package benchmark
+
+var largeStructData = LargeStruct{
+ SearchMetadata: SearchMetadata{
+ CompletedIn: 0.035,
+ Count: 4,
+ MaxID: 250126199840518145,
+ MaxIDStr: "250126199840518145",
+ NextResults: "?max_id=249279667666817023&q=%23freebandnames&count=4&include_entities=1&result_type=mixed",
+ Query: "%23freebandnames",
+ RefreshURL: "?since_id=250126199840518145&q=%23freebandnames&result_type=mixed&include_entities=1",
+ SinceID: 24012619984051000,
+ SinceIDStr: "24012619984051000",
+ },
+ Statuses: []Status{
+ {
+ Contributors: nil,
+ Coordinates: nil,
+ CreatedAt: "Mon Sep 24 03:35:21 +0000 2012",
+ Entities: Entities{
+ Hashtags: []Hashtag{{
+ Indices: []int{20, 34},
+ Text: "freebandnames"},
+ },
+ Urls: []*string{},
+ UserMentions: []*string{},
+ },
+ Favorited: false,
+ Geo: nil,
+ ID: 250075927172759552,
+ IDStr: "250075927172759552",
+ InReplyToScreenName: nil,
+ InReplyToStatusID: nil,
+ InReplyToStatusIDStr: nil,
+ InReplyToUserID: nil,
+ InReplyToUserIDStr: nil,
+ Metadata: StatusMetadata{
+ IsoLanguageCode: "en",
+ ResultType: "recent",
+ },
+ Place: nil,
+ RetweetCount: 0,
+ Retweeted: false,
+ Source: "<a href=\"//itunes.apple.com/us/app/twitter/id409789998?mt=12%5C%22\" rel=\"\\\"nofollow\\\"\">Twitter for Mac</a>",
+ Text: "Aggressive Ponytail #freebandnames",
+ Truncated: false,
+ User: User{
+ ContributorsEnabled: false,
+ CreatedAt: "Mon Apr 26 06:01:55 +0000 2010",
+ DefaultProfile: true,
+ DefaultProfileImage: false,
+ Description: "Born 330 Live 310",
+ Entities: UserEntities{
+ Description: UserEntityDescription{
+ Urls: []*string{},
+ },
+ URL: UserEntityURL{
+ Urls: []URL{{
+ ExpandedURL: nil,
+ Indices: []int{0, 0},
+ URL: "",
+ }},
+ },
+ },
+ FavouritesCount: 0,
+ FollowRequestSent: nil,
+ FollowersCount: 70,
+ Following: nil,
+ FriendsCount: 110,
+ GeoEnabled: true,
+ ID: 137238150,
+ IDStr: "137238150",
+ IsTranslator: false,
+ Lang: "en",
+ ListedCount: 2,
+ Location: "LA, CA",
+ Name: "Sean Cummings",
+ Notifications: nil,
+ ProfileBackgroundColor: "C0DEED",
+ ProfileBackgroundImageURL: "http://a0.twimg.com/images/themes/theme1/bg.png",
+ ProfileBackgroundImageURLHTTPS: "https://si0.twimg.com/images/themes/theme1/bg.png",
+ ProfileBackgroundTile: false,
+ ProfileImageURL: "http://a0.twimg.com/profile_images/2359746665/1v6zfgqo8g0d3mk7ii5s_normal.jpeg",
+ ProfileImageURLHTTPS: "https://si0.twimg.com/profile_images/2359746665/1v6zfgqo8g0d3mk7ii5s_normal.jpeg",
+ ProfileLinkColor: "0084B4",
+ ProfileSidebarBorderColor: "C0DEED",
+ ProfileSidebarFillColor: "DDEEF6",
+ ProfileTextColor: "333333",
+ ProfileUseBackgroundImage: true,
+ Protected: false,
+ ScreenName: "sean_cummings",
+ ShowAllInlineMedia: false,
+ StatusesCount: 579,
+ TimeZone: "Pacific Time (US & Canada)",
+ URL: nil,
+ UtcOffset: -28800,
+ Verified: false,
+ },
+ },
+ {
+ Contributors: nil,
+ Coordinates: nil,
+ CreatedAt: "Fri Sep 21 23:40:54 +0000 2012",
+ Entities: Entities{
+ Hashtags: []Hashtag{{
+ Indices: []int{20, 34},
+ Text: "FreeBandNames",
+ }},
+ Urls: []*string{},
+ UserMentions: []*string{},
+ },
+ Favorited: false,
+ Geo: nil,
+ ID: 249292149810667520,
+ IDStr: "249292149810667520",
+ InReplyToScreenName: nil,
+ InReplyToStatusID: nil,
+ InReplyToStatusIDStr: nil,
+ InReplyToUserID: nil,
+ InReplyToUserIDStr: nil,
+ Metadata: StatusMetadata{
+ IsoLanguageCode: "pl",
+ ResultType: "recent",
+ },
+ Place: nil,
+ RetweetCount: 0,
+ Retweeted: false,
+ Source: "web",
+ Text: "Thee Namaste Nerdz. #FreeBandNames",
+ Truncated: false,
+ User: User{
+ ContributorsEnabled: false,
+ CreatedAt: "Tue Apr 07 19:05:07 +0000 2009",
+ DefaultProfile: false,
+ DefaultProfileImage: false,
+ Description: "You will come to Durham, North Carolina. I will sell you some records then, here in Durham, North Carolina. Fun will happen.",
+ Entities: UserEntities{
+ Description: UserEntityDescription{Urls: []*string{}},
+ URL: UserEntityURL{
+ Urls: []URL{{
+ ExpandedURL: nil,
+ Indices: []int{0, 32},
+ URL: "http://bullcityrecords.com/wnng/"}},
+ },
+ },
+ FavouritesCount: 8,
+ FollowRequestSent: nil,
+ FollowersCount: 2052,
+ Following: nil,
+ FriendsCount: 348,
+ GeoEnabled: false,
+ ID: 29516238,
+ IDStr: "29516238",
+ IsTranslator: false,
+ Lang: "en",
+ ListedCount: 118,
+ Location: "Durham, NC",
+ Name: "Chaz Martenstein",
+ Notifications: nil,
+ ProfileBackgroundColor: "9AE4E8",
+ ProfileBackgroundImageURL: "http://a0.twimg.com/profile_background_images/9423277/background_tile.bmp",
+ ProfileBackgroundImageURLHTTPS: "https://si0.twimg.com/profile_background_images/9423277/background_tile.bmp",
+ ProfileBackgroundTile: true,
+ ProfileImageURL: "http://a0.twimg.com/profile_images/447958234/Lichtenstein_normal.jpg",
+ ProfileImageURLHTTPS: "https://si0.twimg.com/profile_images/447958234/Lichtenstein_normal.jpg",
+ ProfileLinkColor: "0084B4",
+ ProfileSidebarBorderColor: "BDDCAD",
+ ProfileSidebarFillColor: "DDFFCC",
+ ProfileTextColor: "333333",
+ ProfileUseBackgroundImage: true,
+ Protected: false,
+ ScreenName: "bullcityrecords",
+ ShowAllInlineMedia: true,
+ StatusesCount: 7579,
+ TimeZone: "Eastern Time (US & Canada)",
+ URL: nil,
+ UtcOffset: -18000,
+ Verified: false,
+ },
+ },
+ Status{
+ Contributors: nil,
+ Coordinates: nil,
+ CreatedAt: "Fri Sep 21 23:30:20 +0000 2012",
+ Entities: Entities{
+ Hashtags: []Hashtag{{
+ Indices: []int{29, 43},
+ Text: "freebandnames",
+ }},
+ Urls: []*string{},
+ UserMentions: []*string{},
+ },
+ Favorited: false,
+ Geo: nil,
+ ID: 249289491129438208,
+ IDStr: "249289491129438208",
+ InReplyToScreenName: nil,
+ InReplyToStatusID: nil,
+ InReplyToStatusIDStr: nil,
+ InReplyToUserID: nil,
+ InReplyToUserIDStr: nil,
+ Metadata: StatusMetadata{
+ IsoLanguageCode: "en",
+ ResultType: "recent",
+ },
+ Place: nil,
+ RetweetCount: 0,
+ Retweeted: false,
+ Source: "web",
+ Text: "Mexican Heaven, Mexican Hell #freebandnames",
+ Truncated: false,
+ User: User{
+ ContributorsEnabled: false,
+ CreatedAt: "Tue Sep 01 21:21:35 +0000 2009",
+ DefaultProfile: false,
+ DefaultProfileImage: false,
+ Description: "Science Fiction Writer, sort of. Likes Superheroes, Mole People, Alt. Timelines.",
+ Entities: UserEntities{
+ Description: UserEntityDescription{
+ Urls: nil,
+ },
+ URL: UserEntityURL{
+ Urls: []URL{{
+ ExpandedURL: nil,
+ Indices: []int{0, 0},
+ URL: "",
+ }},
+ },
+ },
+ FavouritesCount: 19,
+ FollowRequestSent: nil,
+ FollowersCount: 63,
+ Following: nil,
+ FriendsCount: 63,
+ GeoEnabled: false,
+ ID: 70789458,
+ IDStr: "70789458",
+ IsTranslator: false,
+ Lang: "en",
+ ListedCount: 1,
+ Location: "Kingston New York",
+ Name: "Thomas John Wakeman",
+ Notifications: nil,
+ ProfileBackgroundColor: "352726",
+ ProfileBackgroundImageURL: "http://a0.twimg.com/images/themes/theme5/bg.gif",
+ ProfileBackgroundImageURLHTTPS: "https://si0.twimg.com/images/themes/theme5/bg.gif",
+ ProfileBackgroundTile: false,
+ ProfileImageURL: "http://a0.twimg.com/profile_images/2219333930/Froggystyle_normal.png",
+ ProfileImageURLHTTPS: "https://si0.twimg.com/profile_images/2219333930/Froggystyle_normal.png",
+ ProfileLinkColor: "D02B55",
+ ProfileSidebarBorderColor: "829D5E",
+ ProfileSidebarFillColor: "99CC33",
+ ProfileTextColor: "3E4415",
+ ProfileUseBackgroundImage: true,
+ Protected: false,
+ ScreenName: "MonkiesFist",
+ ShowAllInlineMedia: false,
+ StatusesCount: 1048,
+ TimeZone: "Eastern Time (US & Canada)",
+ URL: nil,
+ UtcOffset: -18000,
+ Verified: false,
+ },
+ },
+ Status{
+ Contributors: nil,
+ Coordinates: nil,
+ CreatedAt: "Fri Sep 21 22:51:18 +0000 2012",
+ Entities: Entities{
+ Hashtags: []Hashtag{{
+ Indices: []int{20, 34},
+ Text: "freebandnames",
+ }},
+ Urls: []*string{},
+ UserMentions: []*string{},
+ },
+ Favorited: false,
+ Geo: nil,
+ ID: 249279667666817024,
+ IDStr: "249279667666817024",
+ InReplyToScreenName: nil,
+ InReplyToStatusID: nil,
+ InReplyToStatusIDStr: nil,
+ InReplyToUserID: nil,
+ InReplyToUserIDStr: nil,
+ Metadata: StatusMetadata{
+ IsoLanguageCode: "en",
+ ResultType: "recent",
+ },
+ Place: nil,
+ RetweetCount: 0,
+ Retweeted: false,
+ Source: "<a href=\"//twitter.com/download/iphone%5C%22\" rel=\"\\\"nofollow\\\"\">Twitter for iPhone</a>",
+ Text: "The Foolish Mortals #freebandnames",
+ Truncated: false,
+ User: User{
+ ContributorsEnabled: false,
+ CreatedAt: "Mon May 04 00:05:00 +0000 2009",
+ DefaultProfile: false,
+ DefaultProfileImage: false,
+ Description: "Cartoonist, Illustrator, and T-Shirt connoisseur",
+ Entities: UserEntities{
+ Description: UserEntityDescription{
+ Urls: []*string{},
+ },
+ URL: UserEntityURL{
+ Urls: []URL{{
+ ExpandedURL: nil,
+ Indices: []int{0, 24},
+ URL: "http://www.omnitarian.me",
+ }},
+ },
+ },
+ FavouritesCount: 647,
+ FollowRequestSent: nil,
+ FollowersCount: 608,
+ Following: nil,
+ FriendsCount: 249,
+ GeoEnabled: false,
+ ID: 37539828,
+ IDStr: "37539828",
+ IsTranslator: false,
+ Lang: "en",
+ ListedCount: 52,
+ Location: "Wisconsin, USA",
+ Name: "Marty Elmer",
+ Notifications: nil,
+ ProfileBackgroundColor: "EEE3C4",
+ ProfileBackgroundImageURL: "http://a0.twimg.com/profile_background_images/106455659/rect6056-9.png",
+ ProfileBackgroundImageURLHTTPS: "https://si0.twimg.com/profile_background_images/106455659/rect6056-9.png",
+ ProfileBackgroundTile: true,
+ ProfileImageURL: "http://a0.twimg.com/profile_images/1629790393/shrinker_2000_trans_normal.png",
+ ProfileImageURLHTTPS: "https://si0.twimg.com/profile_images/1629790393/shrinker_2000_trans_normal.png",
+ ProfileLinkColor: "3B2A26",
+ ProfileSidebarBorderColor: "615A44",
+ ProfileSidebarFillColor: "BFAC83",
+ ProfileTextColor: "000000",
+ ProfileUseBackgroundImage: true,
+ Protected: false,
+ ScreenName: "Omnitarian",
+ ShowAllInlineMedia: true,
+ StatusesCount: 3575,
+ TimeZone: "Central Time (US & Canada)",
+ URL: nil,
+ UtcOffset: -21600,
+ Verified: false,
+ },
+ },
+ },
+}
diff --git a/vendor/github.com/mailru/easyjson/benchmark/default_test.go b/vendor/github.com/mailru/easyjson/benchmark/default_test.go
new file mode 100644
index 000000000..68b37910d
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/benchmark/default_test.go
@@ -0,0 +1,118 @@
+// +build !use_easyjson,!use_ffjson,!use_codec,!use_jsoniter
+
+package benchmark
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func BenchmarkStd_Unmarshal_M(b *testing.B) {
+ b.SetBytes(int64(len(largeStructText)))
+ for i := 0; i < b.N; i++ {
+ var s LargeStruct
+ err := json.Unmarshal(largeStructText, &s)
+ if err != nil {
+ b.Error(err)
+ }
+ }
+}
+
+func BenchmarkStd_Unmarshal_S(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var s Entities
+ err := json.Unmarshal(smallStructText, &s)
+ if err != nil {
+ b.Error(err)
+ }
+ }
+ b.SetBytes(int64(len(smallStructText)))
+}
+
+func BenchmarkStd_Marshal_M(b *testing.B) {
+ var l int64
+ for i := 0; i < b.N; i++ {
+ data, err := json.Marshal(&largeStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ b.SetBytes(l)
+}
+
+func BenchmarkStd_Marshal_L(b *testing.B) {
+ var l int64
+ for i := 0; i < b.N; i++ {
+ data, err := json.Marshal(&xlStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ b.SetBytes(l)
+}
+
+func BenchmarkStd_Marshal_M_Parallel(b *testing.B) {
+ var l int64
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ data, err := json.Marshal(&largeStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ })
+ b.SetBytes(l)
+}
+
+func BenchmarkStd_Marshal_L_Parallel(b *testing.B) {
+ var l int64
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ data, err := json.Marshal(&xlStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ })
+ b.SetBytes(l)
+}
+
+func BenchmarkStd_Marshal_S(b *testing.B) {
+ var l int64
+ for i := 0; i < b.N; i++ {
+ data, err := json.Marshal(&smallStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ b.SetBytes(l)
+}
+
+func BenchmarkStd_Marshal_S_Parallel(b *testing.B) {
+ var l int64
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ data, err := json.Marshal(&smallStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ })
+ b.SetBytes(l)
+}
+
+func BenchmarkStd_Marshal_M_ToWriter(b *testing.B) {
+ enc := json.NewEncoder(&DummyWriter{})
+ for i := 0; i < b.N; i++ {
+ err := enc.Encode(&largeStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ }
+}
diff --git a/vendor/github.com/mailru/easyjson/benchmark/dummy_test.go b/vendor/github.com/mailru/easyjson/benchmark/dummy_test.go
new file mode 100644
index 000000000..3d928ca7c
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/benchmark/dummy_test.go
@@ -0,0 +1,11 @@
+package benchmark
+
+import (
+ "testing"
+)
+
+type DummyWriter struct{}
+
+func (w DummyWriter) Write(data []byte) (int, error) { return len(data), nil }
+
+func TestToSuppressNoTestsWarning(t *testing.T) {}
diff --git a/vendor/github.com/mailru/easyjson/benchmark/easyjson_test.go b/vendor/github.com/mailru/easyjson/benchmark/easyjson_test.go
new file mode 100644
index 000000000..16b670b27
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/benchmark/easyjson_test.go
@@ -0,0 +1,184 @@
+// +build use_easyjson
+
+package benchmark
+
+import (
+ "testing"
+
+ "github.com/mailru/easyjson"
+ "github.com/mailru/easyjson/jwriter"
+)
+
+func BenchmarkEJ_Unmarshal_M(b *testing.B) {
+ b.SetBytes(int64(len(largeStructText)))
+ for i := 0; i < b.N; i++ {
+ var s LargeStruct
+ err := s.UnmarshalJSON(largeStructText)
+ if err != nil {
+ b.Error(err)
+ }
+ }
+}
+
+func BenchmarkEJ_Unmarshal_S(b *testing.B) {
+ b.SetBytes(int64(len(smallStructText)))
+
+ for i := 0; i < b.N; i++ {
+ var s Entities
+ err := s.UnmarshalJSON(smallStructText)
+ if err != nil {
+ b.Error(err)
+ }
+ }
+}
+
+func BenchmarkEJ_Marshal_M(b *testing.B) {
+ var l int64
+ for i := 0; i < b.N; i++ {
+ data, err := easyjson.Marshal(&largeStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ b.SetBytes(l)
+}
+
+func BenchmarkEJ_Marshal_L(b *testing.B) {
+ var l int64
+ for i := 0; i < b.N; i++ {
+ data, err := easyjson.Marshal(&xlStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ b.SetBytes(l)
+}
+
+func BenchmarkEJ_Marshal_L_ToWriter(b *testing.B) {
+ var l int64
+ out := &DummyWriter{}
+ for i := 0; i < b.N; i++ {
+ w := jwriter.Writer{}
+ xlStructData.MarshalEasyJSON(&w)
+ if w.Error != nil {
+ b.Error(w.Error)
+ }
+
+ l = int64(w.Size())
+ w.DumpTo(out)
+ }
+ b.SetBytes(l)
+
+}
+func BenchmarkEJ_Marshal_M_Parallel(b *testing.B) {
+ b.SetBytes(int64(len(largeStructText)))
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ _, err := largeStructData.MarshalJSON()
+ if err != nil {
+ b.Error(err)
+ }
+ }
+ })
+}
+
+func BenchmarkEJ_Marshal_M_ToWriter(b *testing.B) {
+ var l int64
+ out := &DummyWriter{}
+ for i := 0; i < b.N; i++ {
+ w := jwriter.Writer{}
+ largeStructData.MarshalEasyJSON(&w)
+ if w.Error != nil {
+ b.Error(w.Error)
+ }
+
+ l = int64(w.Size())
+ w.DumpTo(out)
+ }
+ b.SetBytes(l)
+
+}
+func BenchmarkEJ_Marshal_M_ToWriter_Parallel(b *testing.B) {
+ out := &DummyWriter{}
+
+ b.RunParallel(func(pb *testing.PB) {
+ var l int64
+ for pb.Next() {
+ w := jwriter.Writer{}
+ largeStructData.MarshalEasyJSON(&w)
+ if w.Error != nil {
+ b.Error(w.Error)
+ }
+
+ l = int64(w.Size())
+ w.DumpTo(out)
+ }
+ if l > 0 {
+ b.SetBytes(l)
+ }
+ })
+
+}
+
+func BenchmarkEJ_Marshal_L_Parallel(b *testing.B) {
+ var l int64
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ data, err := xlStructData.MarshalJSON()
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ })
+ b.SetBytes(l)
+}
+
+func BenchmarkEJ_Marshal_L_ToWriter_Parallel(b *testing.B) {
+ out := &DummyWriter{}
+ b.RunParallel(func(pb *testing.PB) {
+ var l int64
+ for pb.Next() {
+ w := jwriter.Writer{}
+
+ xlStructData.MarshalEasyJSON(&w)
+ if w.Error != nil {
+ b.Error(w.Error)
+ }
+ l = int64(w.Size())
+ w.DumpTo(out)
+ }
+ if l > 0 {
+ b.SetBytes(l)
+ }
+ })
+}
+
+func BenchmarkEJ_Marshal_S(b *testing.B) {
+ var l int64
+ for i := 0; i < b.N; i++ {
+ data, err := smallStructData.MarshalJSON()
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ b.SetBytes(l)
+}
+
+func BenchmarkEJ_Marshal_S_Parallel(b *testing.B) {
+ var l int64
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ data, err := smallStructData.MarshalJSON()
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ })
+ b.SetBytes(l)
+}
diff --git a/vendor/github.com/mailru/easyjson/benchmark/example.json b/vendor/github.com/mailru/easyjson/benchmark/example.json
new file mode 100644
index 000000000..2405022cf
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/benchmark/example.json
@@ -0,0 +1,415 @@
+{
+ "statuses": [
+ {
+ "coordinates": null,
+ "favorited": false,
+ "truncated": false,
+ "created_at": "Mon Sep 24 03:35:21 +0000 2012",
+ "id_str": "250075927172759552",
+ "entities": {
+ "urls": [
+
+ ],
+ "hashtags": [
+ {
+ "text": "freebandnames",
+ "indices": [
+ 20,
+ 34
+ ]
+ }
+ ],
+ "user_mentions": [
+
+ ]
+ },
+ "in_reply_to_user_id_str": null,
+ "contributors": null,
+ "text": "Aggressive Ponytail #freebandnames",
+ "metadata": {
+ "iso_language_code": "en",
+ "result_type": "recent"
+ },
+ "retweet_count": 0,
+ "in_reply_to_status_id_str": null,
+ "id": 250075927172759552,
+ "geo": null,
+ "retweeted": false,
+ "in_reply_to_user_id": null,
+ "place": null,
+ "user": {
+ "profile_sidebar_fill_color": "DDEEF6",
+ "profile_sidebar_border_color": "C0DEED",
+ "profile_background_tile": false,
+ "name": "Sean Cummings",
+ "profile_image_url": "http://a0.twimg.com/profile_images/2359746665/1v6zfgqo8g0d3mk7ii5s_normal.jpeg",
+ "created_at": "Mon Apr 26 06:01:55 +0000 2010",
+ "location": "LA, CA",
+ "follow_request_sent": null,
+ "profile_link_color": "0084B4",
+ "is_translator": false,
+ "id_str": "137238150",
+ "entities": {
+ "url": {
+ "urls": [
+ {
+ "expanded_url": null,
+ "url": "",
+ "indices": [
+ 0,
+ 0
+ ]
+ }
+ ]
+ },
+ "description": {
+ "urls": [
+
+ ]
+ }
+ },
+ "default_profile": true,
+ "contributors_enabled": false,
+ "favourites_count": 0,
+ "url": null,
+ "profile_image_url_https": "https://si0.twimg.com/profile_images/2359746665/1v6zfgqo8g0d3mk7ii5s_normal.jpeg",
+ "utc_offset": -28800,
+ "id": 137238150,
+ "profile_use_background_image": true,
+ "listed_count": 2,
+ "profile_text_color": "333333",
+ "lang": "en",
+ "followers_count": 70,
+ "protected": false,
+ "notifications": null,
+ "profile_background_image_url_https": "https://si0.twimg.com/images/themes/theme1/bg.png",
+ "profile_background_color": "C0DEED",
+ "verified": false,
+ "geo_enabled": true,
+ "time_zone": "Pacific Time (US & Canada)",
+ "description": "Born 330 Live 310",
+ "default_profile_image": false,
+ "profile_background_image_url": "http://a0.twimg.com/images/themes/theme1/bg.png",
+ "statuses_count": 579,
+ "friends_count": 110,
+ "following": null,
+ "show_all_inline_media": false,
+ "screen_name": "sean_cummings"
+ },
+ "in_reply_to_screen_name": null,
+ "source": "<a href=\"//itunes.apple.com/us/app/twitter/id409789998?mt=12%5C%22\" rel=\"\\\"nofollow\\\"\">Twitter for Mac</a>",
+ "in_reply_to_status_id": null
+ },
+ {
+ "coordinates": null,
+ "favorited": false,
+ "truncated": false,
+ "created_at": "Fri Sep 21 23:40:54 +0000 2012",
+ "id_str": "249292149810667520",
+ "entities": {
+ "urls": [
+
+ ],
+ "hashtags": [
+ {
+ "text": "FreeBandNames",
+ "indices": [
+ 20,
+ 34
+ ]
+ }
+ ],
+ "user_mentions": [
+
+ ]
+ },
+ "in_reply_to_user_id_str": null,
+ "contributors": null,
+ "text": "Thee Namaste Nerdz. #FreeBandNames",
+ "metadata": {
+ "iso_language_code": "pl",
+ "result_type": "recent"
+ },
+ "retweet_count": 0,
+ "in_reply_to_status_id_str": null,
+ "id": 249292149810667520,
+ "geo": null,
+ "retweeted": false,
+ "in_reply_to_user_id": null,
+ "place": null,
+ "user": {
+ "profile_sidebar_fill_color": "DDFFCC",
+ "profile_sidebar_border_color": "BDDCAD",
+ "profile_background_tile": true,
+ "name": "Chaz Martenstein",
+ "profile_image_url": "http://a0.twimg.com/profile_images/447958234/Lichtenstein_normal.jpg",
+ "created_at": "Tue Apr 07 19:05:07 +0000 2009",
+ "location": "Durham, NC",
+ "follow_request_sent": null,
+ "profile_link_color": "0084B4",
+ "is_translator": false,
+ "id_str": "29516238",
+ "entities": {
+ "url": {
+ "urls": [
+ {
+ "expanded_url": null,
+ "url": "http://bullcityrecords.com/wnng/",
+ "indices": [
+ 0,
+ 32
+ ]
+ }
+ ]
+ },
+ "description": {
+ "urls": [
+
+ ]
+ }
+ },
+ "default_profile": false,
+ "contributors_enabled": false,
+ "favourites_count": 8,
+ "url": "http://bullcityrecords.com/wnng/",
+ "profile_image_url_https": "https://si0.twimg.com/profile_images/447958234/Lichtenstein_normal.jpg",
+ "utc_offset": -18000,
+ "id": 29516238,
+ "profile_use_background_image": true,
+ "listed_count": 118,
+ "profile_text_color": "333333",
+ "lang": "en",
+ "followers_count": 2052,
+ "protected": false,
+ "notifications": null,
+ "profile_background_image_url_https": "https://si0.twimg.com/profile_background_images/9423277/background_tile.bmp",
+ "profile_background_color": "9AE4E8",
+ "verified": false,
+ "geo_enabled": false,
+ "time_zone": "Eastern Time (US & Canada)",
+ "description": "You will come to Durham, North Carolina. I will sell you some records then, here in Durham, North Carolina. Fun will happen.",
+ "default_profile_image": false,
+ "profile_background_image_url": "http://a0.twimg.com/profile_background_images/9423277/background_tile.bmp",
+ "statuses_count": 7579,
+ "friends_count": 348,
+ "following": null,
+ "show_all_inline_media": true,
+ "screen_name": "bullcityrecords"
+ },
+ "in_reply_to_screen_name": null,
+ "source": "web",
+ "in_reply_to_status_id": null
+ },
+ {
+ "coordinates": null,
+ "favorited": false,
+ "truncated": false,
+ "created_at": "Fri Sep 21 23:30:20 +0000 2012",
+ "id_str": "249289491129438208",
+ "entities": {
+ "urls": [
+
+ ],
+ "hashtags": [
+ {
+ "text": "freebandnames",
+ "indices": [
+ 29,
+ 43
+ ]
+ }
+ ],
+ "user_mentions": [
+
+ ]
+ },
+ "in_reply_to_user_id_str": null,
+ "contributors": null,
+ "text": "Mexican Heaven, Mexican Hell #freebandnames",
+ "metadata": {
+ "iso_language_code": "en",
+ "result_type": "recent"
+ },
+ "retweet_count": 0,
+ "in_reply_to_status_id_str": null,
+ "id": 249289491129438208,
+ "geo": null,
+ "retweeted": false,
+ "in_reply_to_user_id": null,
+ "place": null,
+ "user": {
+ "profile_sidebar_fill_color": "99CC33",
+ "profile_sidebar_border_color": "829D5E",
+ "profile_background_tile": false,
+ "name": "Thomas John Wakeman",
+ "profile_image_url": "http://a0.twimg.com/profile_images/2219333930/Froggystyle_normal.png",
+ "created_at": "Tue Sep 01 21:21:35 +0000 2009",
+ "location": "Kingston New York",
+ "follow_request_sent": null,
+ "profile_link_color": "D02B55",
+ "is_translator": false,
+ "id_str": "70789458",
+ "entities": {
+ "url": {
+ "urls": [
+ {
+ "expanded_url": null,
+ "url": "",
+ "indices": [
+ 0,
+ 0
+ ]
+ }
+ ]
+ },
+ "description": {
+ "urls": [
+
+ ]
+ }
+ },
+ "default_profile": false,
+ "contributors_enabled": false,
+ "favourites_count": 19,
+ "url": null,
+ "profile_image_url_https": "https://si0.twimg.com/profile_images/2219333930/Froggystyle_normal.png",
+ "utc_offset": -18000,
+ "id": 70789458,
+ "profile_use_background_image": true,
+ "listed_count": 1,
+ "profile_text_color": "3E4415",
+ "lang": "en",
+ "followers_count": 63,
+ "protected": false,
+ "notifications": null,
+ "profile_background_image_url_https": "https://si0.twimg.com/images/themes/theme5/bg.gif",
+ "profile_background_color": "352726",
+ "verified": false,
+ "geo_enabled": false,
+ "time_zone": "Eastern Time (US & Canada)",
+ "description": "Science Fiction Writer, sort of. Likes Superheroes, Mole People, Alt. Timelines.",
+ "default_profile_image": false,
+ "profile_background_image_url": "http://a0.twimg.com/images/themes/theme5/bg.gif",
+ "statuses_count": 1048,
+ "friends_count": 63,
+ "following": null,
+ "show_all_inline_media": false,
+ "screen_name": "MonkiesFist"
+ },
+ "in_reply_to_screen_name": null,
+ "source": "web",
+ "in_reply_to_status_id": null
+ },
+ {
+ "coordinates": null,
+ "favorited": false,
+ "truncated": false,
+ "created_at": "Fri Sep 21 22:51:18 +0000 2012",
+ "id_str": "249279667666817024",
+ "entities": {
+ "urls": [
+
+ ],
+ "hashtags": [
+ {
+ "text": "freebandnames",
+ "indices": [
+ 20,
+ 34
+ ]
+ }
+ ],
+ "user_mentions": [
+
+ ]
+ },
+ "in_reply_to_user_id_str": null,
+ "contributors": null,
+ "text": "The Foolish Mortals #freebandnames",
+ "metadata": {
+ "iso_language_code": "en",
+ "result_type": "recent"
+ },
+ "retweet_count": 0,
+ "in_reply_to_status_id_str": null,
+ "id": 249279667666817024,
+ "geo": null,
+ "retweeted": false,
+ "in_reply_to_user_id": null,
+ "place": null,
+ "user": {
+ "profile_sidebar_fill_color": "BFAC83",
+ "profile_sidebar_border_color": "615A44",
+ "profile_background_tile": true,
+ "name": "Marty Elmer",
+ "profile_image_url": "http://a0.twimg.com/profile_images/1629790393/shrinker_2000_trans_normal.png",
+ "created_at": "Mon May 04 00:05:00 +0000 2009",
+ "location": "Wisconsin, USA",
+ "follow_request_sent": null,
+ "profile_link_color": "3B2A26",
+ "is_translator": false,
+ "id_str": "37539828",
+ "entities": {
+ "url": {
+ "urls": [
+ {
+ "expanded_url": null,
+ "url": "http://www.omnitarian.me",
+ "indices": [
+ 0,
+ 24
+ ]
+ }
+ ]
+ },
+ "description": {
+ "urls": [
+
+ ]
+ }
+ },
+ "default_profile": false,
+ "contributors_enabled": false,
+ "favourites_count": 647,
+ "url": "http://www.omnitarian.me",
+ "profile_image_url_https": "https://si0.twimg.com/profile_images/1629790393/shrinker_2000_trans_normal.png",
+ "utc_offset": -21600,
+ "id": 37539828,
+ "profile_use_background_image": true,
+ "listed_count": 52,
+ "profile_text_color": "000000",
+ "lang": "en",
+ "followers_count": 608,
+ "protected": false,
+ "notifications": null,
+ "profile_background_image_url_https": "https://si0.twimg.com/profile_background_images/106455659/rect6056-9.png",
+ "profile_background_color": "EEE3C4",
+ "verified": false,
+ "geo_enabled": false,
+ "time_zone": "Central Time (US & Canada)",
+ "description": "Cartoonist, Illustrator, and T-Shirt connoisseur",
+ "default_profile_image": false,
+ "profile_background_image_url": "http://a0.twimg.com/profile_background_images/106455659/rect6056-9.png",
+ "statuses_count": 3575,
+ "friends_count": 249,
+ "following": null,
+ "show_all_inline_media": true,
+ "screen_name": "Omnitarian"
+ },
+ "in_reply_to_screen_name": null,
+ "source": "<a href=\"//twitter.com/download/iphone%5C%22\" rel=\"\\\"nofollow\\\"\">Twitter for iPhone</a>",
+ "in_reply_to_status_id": null
+ }
+ ],
+ "search_metadata": {
+ "max_id": 250126199840518145,
+ "since_id": 24012619984051000,
+ "refresh_url": "?since_id=250126199840518145&q=%23freebandnames&result_type=mixed&include_entities=1",
+ "next_results": "?max_id=249279667666817023&q=%23freebandnames&count=4&include_entities=1&result_type=mixed",
+ "count": 4,
+ "completed_in": 0.035,
+ "since_id_str": "24012619984051000",
+ "query": "%23freebandnames",
+ "max_id_str": "250126199840518145"
+ }
+}
diff --git a/vendor/github.com/mailru/easyjson/benchmark/ffjson_test.go b/vendor/github.com/mailru/easyjson/benchmark/ffjson_test.go
new file mode 100644
index 000000000..03671827c
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/benchmark/ffjson_test.go
@@ -0,0 +1,190 @@
+// +build use_ffjson
+
+package benchmark
+
+import (
+ "testing"
+
+ "github.com/pquerna/ffjson/ffjson"
+)
+
+func BenchmarkFF_Unmarshal_M(b *testing.B) {
+ b.SetBytes(int64(len(largeStructText)))
+ for i := 0; i < b.N; i++ {
+ var s LargeStruct
+ err := ffjson.UnmarshalFast(largeStructText, &s)
+ if err != nil {
+ b.Error(err)
+ }
+ }
+}
+
+func BenchmarkFF_Unmarshal_S(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var s Entities
+ err := ffjson.UnmarshalFast(smallStructText, &s)
+ if err != nil {
+ b.Error(err)
+ }
+ }
+ b.SetBytes(int64(len(smallStructText)))
+}
+
+func BenchmarkFF_Marshal_M(b *testing.B) {
+ var l int64
+ for i := 0; i < b.N; i++ {
+ data, err := ffjson.MarshalFast(&largeStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ b.SetBytes(l)
+}
+
+func BenchmarkFF_Marshal_S(b *testing.B) {
+ var l int64
+ for i := 0; i < b.N; i++ {
+ data, err := ffjson.MarshalFast(&smallStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ b.SetBytes(l)
+}
+
+func BenchmarkFF_Marshal_M_Pool(b *testing.B) {
+ var l int64
+ for i := 0; i < b.N; i++ {
+ data, err := ffjson.MarshalFast(&largeStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ ffjson.Pool(data)
+ }
+ b.SetBytes(l)
+}
+
+func BenchmarkFF_Marshal_L(b *testing.B) {
+ var l int64
+ for i := 0; i < b.N; i++ {
+ data, err := ffjson.MarshalFast(&xlStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ b.SetBytes(l)
+}
+
+func BenchmarkFF_Marshal_L_Pool(b *testing.B) {
+ var l int64
+ for i := 0; i < b.N; i++ {
+ data, err := ffjson.MarshalFast(&xlStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ ffjson.Pool(data)
+ }
+ b.SetBytes(l)
+}
+
+func BenchmarkFF_Marshal_L_Pool_Parallel(b *testing.B) {
+ var l int64
+ for i := 0; i < b.N; i++ {
+ data, err := ffjson.MarshalFast(&xlStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ ffjson.Pool(data)
+ }
+ b.SetBytes(l)
+}
+func BenchmarkFF_Marshal_M_Pool_Parallel(b *testing.B) {
+ var l int64
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ data, err := ffjson.MarshalFast(&largeStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ ffjson.Pool(data)
+ }
+ })
+ b.SetBytes(l)
+}
+
+func BenchmarkFF_Marshal_S_Pool(b *testing.B) {
+ var l int64
+ for i := 0; i < b.N; i++ {
+ data, err := ffjson.MarshalFast(&smallStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ ffjson.Pool(data)
+ }
+ b.SetBytes(l)
+}
+
+func BenchmarkFF_Marshal_S_Pool_Parallel(b *testing.B) {
+ var l int64
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ data, err := ffjson.MarshalFast(&smallStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ ffjson.Pool(data)
+ }
+ })
+ b.SetBytes(l)
+}
+
+func BenchmarkFF_Marshal_S_Parallel(b *testing.B) {
+ var l int64
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ data, err := ffjson.MarshalFast(&smallStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ })
+ b.SetBytes(l)
+}
+
+func BenchmarkFF_Marshal_M_Parallel(b *testing.B) {
+ var l int64
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ data, err := ffjson.MarshalFast(&largeStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ })
+ b.SetBytes(l)
+}
+
+func BenchmarkFF_Marshal_L_Parallel(b *testing.B) {
+ var l int64
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ data, err := ffjson.MarshalFast(&xlStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ })
+ b.SetBytes(l)
+}
diff --git a/vendor/github.com/mailru/easyjson/benchmark/jsoniter_test.go b/vendor/github.com/mailru/easyjson/benchmark/jsoniter_test.go
new file mode 100644
index 000000000..004f891da
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/benchmark/jsoniter_test.go
@@ -0,0 +1,119 @@
+// +build use_jsoniter
+
+package benchmark
+
+import (
+ "testing"
+
+ jsoniter "github.com/json-iterator/go"
+)
+
+func BenchmarkJI_Unmarshal_M(b *testing.B) {
+ b.SetBytes(int64(len(largeStructText)))
+ for i := 0; i < b.N; i++ {
+ var s LargeStruct
+ err := jsoniter.Unmarshal(largeStructText, &s)
+ if err != nil {
+ b.Error(err)
+ }
+ }
+}
+
+func BenchmarkJI_Unmarshal_S(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var s Entities
+ err := jsoniter.Unmarshal(smallStructText, &s)
+ if err != nil {
+ b.Error(err)
+ }
+ }
+ b.SetBytes(int64(len(smallStructText)))
+}
+
+func BenchmarkJI_Marshal_M(b *testing.B) {
+ var l int64
+ for i := 0; i < b.N; i++ {
+ data, err := jsoniter.Marshal(&largeStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ b.SetBytes(l)
+}
+
+func BenchmarkJI_Marshal_L(b *testing.B) {
+ var l int64
+ for i := 0; i < b.N; i++ {
+ data, err := jsoniter.Marshal(&xlStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ b.SetBytes(l)
+}
+
+func BenchmarkJI_Marshal_M_Parallel(b *testing.B) {
+ var l int64
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ data, err := jsoniter.Marshal(&largeStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ })
+ b.SetBytes(l)
+}
+
+func BenchmarkJI_Marshal_L_Parallel(b *testing.B) {
+ var l int64
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ data, err := jsoniter.Marshal(&xlStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ })
+ b.SetBytes(l)
+}
+
+func BenchmarkJI_Marshal_S(b *testing.B) {
+ var l int64
+ for i := 0; i < b.N; i++ {
+ data, err := jsoniter.Marshal(&smallStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ b.SetBytes(l)
+}
+
+func BenchmarkJI_Marshal_S_Parallel(b *testing.B) {
+ var l int64
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ data, err := jsoniter.Marshal(&smallStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ l = int64(len(data))
+ }
+ })
+ b.SetBytes(l)
+}
+
+func BenchmarkJI_Marshal_M_ToWriter(b *testing.B) {
+ enc := jsoniter.NewEncoder(&DummyWriter{})
+ for i := 0; i < b.N; i++ {
+ err := enc.Encode(&largeStructData)
+ if err != nil {
+ b.Error(err)
+ }
+ }
+}
diff --git a/vendor/github.com/mailru/easyjson/benchmark/ujson.sh b/vendor/github.com/mailru/easyjson/benchmark/ujson.sh
new file mode 100755
index 000000000..378e7df46
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/benchmark/ujson.sh
@@ -0,0 +1,7 @@
+#/bin/bash
+
+echo -n "Python ujson module, DECODE: "
+python -m timeit -s "import ujson; data = open('`dirname $0`/example.json', 'r').read()" 'ujson.loads(data)'
+
+echo -n "Python ujson module, ENCODE: "
+python -m timeit -s "import ujson; data = open('`dirname $0`/example.json', 'r').read(); obj = ujson.loads(data)" 'ujson.dumps(obj)'
diff --git a/vendor/github.com/mailru/easyjson/bootstrap/bootstrap.go b/vendor/github.com/mailru/easyjson/bootstrap/bootstrap.go
new file mode 100644
index 000000000..3c20e09ca
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/bootstrap/bootstrap.go
@@ -0,0 +1,188 @@
+// Package bootstrap implements the bootstrapping logic: generation of a .go file to
+// launch the actual generator and launching the generator itself.
+//
+// The package may be preferred to a command-line utility if generating the serializers
+// from golang code is required.
+package bootstrap
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "sort"
+)
+
+const genPackage = "github.com/mailru/easyjson/gen"
+const pkgWriter = "github.com/mailru/easyjson/jwriter"
+const pkgLexer = "github.com/mailru/easyjson/jlexer"
+
+type Generator struct {
+ PkgPath, PkgName string
+ Types []string
+
+ NoStdMarshalers bool
+ SnakeCase bool
+ LowerCamelCase bool
+ OmitEmpty bool
+
+ OutName string
+ BuildTags string
+
+ StubsOnly bool
+ LeaveTemps bool
+ NoFormat bool
+}
+
+// writeStub outputs an initial stubs for marshalers/unmarshalers so that the package
+// using marshalers/unmarshales compiles correctly for boostrapping code.
+func (g *Generator) writeStub() error {
+ f, err := os.Create(g.OutName)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ if g.BuildTags != "" {
+ fmt.Fprintln(f, "// +build ", g.BuildTags)
+ fmt.Fprintln(f)
+ }
+ fmt.Fprintln(f, "// TEMPORARY AUTOGENERATED FILE: easyjson stub code to make the package")
+ fmt.Fprintln(f, "// compilable during generation.")
+ fmt.Fprintln(f)
+ fmt.Fprintln(f, "package ", g.PkgName)
+
+ if len(g.Types) > 0 {
+ fmt.Fprintln(f)
+ fmt.Fprintln(f, "import (")
+ fmt.Fprintln(f, ` "`+pkgWriter+`"`)
+ fmt.Fprintln(f, ` "`+pkgLexer+`"`)
+ fmt.Fprintln(f, ")")
+ }
+
+ sort.Strings(g.Types)
+ for _, t := range g.Types {
+ fmt.Fprintln(f)
+ if !g.NoStdMarshalers {
+ fmt.Fprintln(f, "func (", t, ") MarshalJSON() ([]byte, error) { return nil, nil }")
+ fmt.Fprintln(f, "func (*", t, ") UnmarshalJSON([]byte) error { return nil }")
+ }
+
+ fmt.Fprintln(f, "func (", t, ") MarshalEasyJSON(w *jwriter.Writer) {}")
+ fmt.Fprintln(f, "func (*", t, ") UnmarshalEasyJSON(l *jlexer.Lexer) {}")
+ fmt.Fprintln(f)
+ fmt.Fprintln(f, "type EasyJSON_exporter_"+t+" *"+t)
+ }
+ return nil
+}
+
+// writeMain creates a .go file that launches the generator if 'go run'.
+func (g *Generator) writeMain() (path string, err error) {
+ f, err := ioutil.TempFile(filepath.Dir(g.OutName), "easyjson-bootstrap")
+ if err != nil {
+ return "", err
+ }
+
+ fmt.Fprintln(f, "// +build ignore")
+ fmt.Fprintln(f)
+ fmt.Fprintln(f, "// TEMPORARY AUTOGENERATED FILE: easyjson bootstapping code to launch")
+ fmt.Fprintln(f, "// the actual generator.")
+ fmt.Fprintln(f)
+ fmt.Fprintln(f, "package main")
+ fmt.Fprintln(f)
+ fmt.Fprintln(f, "import (")
+ fmt.Fprintln(f, ` "fmt"`)
+ fmt.Fprintln(f, ` "os"`)
+ fmt.Fprintln(f)
+ fmt.Fprintf(f, " %q\n", genPackage)
+ if len(g.Types) > 0 {
+ fmt.Fprintln(f)
+ fmt.Fprintf(f, " pkg %q\n", g.PkgPath)
+ }
+ fmt.Fprintln(f, ")")
+ fmt.Fprintln(f)
+ fmt.Fprintln(f, "func main() {")
+ fmt.Fprintf(f, " g := gen.NewGenerator(%q)\n", filepath.Base(g.OutName))
+ fmt.Fprintf(f, " g.SetPkg(%q, %q)\n", g.PkgName, g.PkgPath)
+ if g.BuildTags != "" {
+ fmt.Fprintf(f, " g.SetBuildTags(%q)\n", g.BuildTags)
+ }
+ if g.SnakeCase {
+ fmt.Fprintln(f, " g.UseSnakeCase()")
+ }
+ if g.LowerCamelCase {
+ fmt.Fprintln(f, " g.UseLowerCamelCase()")
+ }
+ if g.OmitEmpty {
+ fmt.Fprintln(f, " g.OmitEmpty()")
+ }
+ if g.NoStdMarshalers {
+ fmt.Fprintln(f, " g.NoStdMarshalers()")
+ }
+
+ sort.Strings(g.Types)
+ for _, v := range g.Types {
+ fmt.Fprintln(f, " g.Add(pkg.EasyJSON_exporter_"+v+"(nil))")
+ }
+
+ fmt.Fprintln(f, " if err := g.Run(os.Stdout); err != nil {")
+ fmt.Fprintln(f, " fmt.Fprintln(os.Stderr, err)")
+ fmt.Fprintln(f, " os.Exit(1)")
+ fmt.Fprintln(f, " }")
+ fmt.Fprintln(f, "}")
+
+ src := f.Name()
+ if err := f.Close(); err != nil {
+ return src, err
+ }
+
+ dest := src + ".go"
+ return dest, os.Rename(src, dest)
+}
+
+func (g *Generator) Run() error {
+ if err := g.writeStub(); err != nil {
+ return err
+ }
+ if g.StubsOnly {
+ return nil
+ }
+
+ path, err := g.writeMain()
+ if err != nil {
+ return err
+ }
+ if !g.LeaveTemps {
+ defer os.Remove(path)
+ }
+
+ f, err := os.Create(g.OutName + ".tmp")
+ if err != nil {
+ return err
+ }
+ if !g.LeaveTemps {
+ defer os.Remove(f.Name()) // will not remove after rename
+ }
+
+ cmd := exec.Command("go", "run", "-tags", g.BuildTags, path)
+ cmd.Stdout = f
+ cmd.Stderr = os.Stderr
+ if err = cmd.Run(); err != nil {
+ return err
+ }
+
+ f.Close()
+
+ if !g.NoFormat {
+ cmd = exec.Command("gofmt", "-w", f.Name())
+ cmd.Stderr = os.Stderr
+ cmd.Stdout = os.Stdout
+
+ if err = cmd.Run(); err != nil {
+ return err
+ }
+ }
+
+ return os.Rename(f.Name(), g.OutName)
+}
diff --git a/vendor/github.com/mailru/easyjson/buffer/pool.go b/vendor/github.com/mailru/easyjson/buffer/pool.go
new file mode 100644
index 000000000..07fb4bc1f
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/buffer/pool.go
@@ -0,0 +1,270 @@
+// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to
+// reduce copying and to allow reuse of individual chunks.
+package buffer
+
+import (
+ "io"
+ "sync"
+)
+
+// PoolConfig contains configuration for the allocation and reuse strategy.
+type PoolConfig struct {
+ StartSize int // Minimum chunk size that is allocated.
+ PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead.
+ MaxSize int // Maximum chunk size that will be allocated.
+}
+
+var config = PoolConfig{
+ StartSize: 128,
+ PooledSize: 512,
+ MaxSize: 32768,
+}
+
+// Reuse pool: chunk size -> pool.
+var buffers = map[int]*sync.Pool{}
+
+func initBuffers() {
+ for l := config.PooledSize; l <= config.MaxSize; l *= 2 {
+ buffers[l] = new(sync.Pool)
+ }
+}
+
+func init() {
+ initBuffers()
+}
+
+// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done.
+func Init(cfg PoolConfig) {
+ config = cfg
+ initBuffers()
+}
+
+// putBuf puts a chunk to reuse pool if it can be reused.
+func putBuf(buf []byte) {
+ size := cap(buf)
+ if size < config.PooledSize {
+ return
+ }
+ if c := buffers[size]; c != nil {
+ c.Put(buf[:0])
+ }
+}
+
+// getBuf gets a chunk from reuse pool or creates a new one if reuse failed.
+func getBuf(size int) []byte {
+ if size < config.PooledSize {
+ return make([]byte, 0, size)
+ }
+
+ if c := buffers[size]; c != nil {
+ v := c.Get()
+ if v != nil {
+ return v.([]byte)
+ }
+ }
+ return make([]byte, 0, size)
+}
+
+// Buffer is a buffer optimized for serialization without extra copying.
+type Buffer struct {
+
+ // Buf is the current chunk that can be used for serialization.
+ Buf []byte
+
+ toPool []byte
+ bufs [][]byte
+}
+
+// EnsureSpace makes sure that the current chunk contains at least s free bytes,
+// possibly creating a new chunk.
+func (b *Buffer) EnsureSpace(s int) {
+ if cap(b.Buf)-len(b.Buf) >= s {
+ return
+ }
+ l := len(b.Buf)
+ if l > 0 {
+ if cap(b.toPool) != cap(b.Buf) {
+ // Chunk was reallocated, toPool can be pooled.
+ putBuf(b.toPool)
+ }
+ if cap(b.bufs) == 0 {
+ b.bufs = make([][]byte, 0, 8)
+ }
+ b.bufs = append(b.bufs, b.Buf)
+ l = cap(b.toPool) * 2
+ } else {
+ l = config.StartSize
+ }
+
+ if l > config.MaxSize {
+ l = config.MaxSize
+ }
+ b.Buf = getBuf(l)
+ b.toPool = b.Buf
+}
+
+// AppendByte appends a single byte to buffer.
+func (b *Buffer) AppendByte(data byte) {
+ if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
+ b.EnsureSpace(1)
+ }
+ b.Buf = append(b.Buf, data)
+}
+
+// AppendBytes appends a byte slice to buffer.
+func (b *Buffer) AppendBytes(data []byte) {
+ for len(data) > 0 {
+ if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
+ b.EnsureSpace(1)
+ }
+
+ sz := cap(b.Buf) - len(b.Buf)
+ if sz > len(data) {
+ sz = len(data)
+ }
+
+ b.Buf = append(b.Buf, data[:sz]...)
+ data = data[sz:]
+ }
+}
+
+// AppendBytes appends a string to buffer.
+func (b *Buffer) AppendString(data string) {
+ for len(data) > 0 {
+ if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
+ b.EnsureSpace(1)
+ }
+
+ sz := cap(b.Buf) - len(b.Buf)
+ if sz > len(data) {
+ sz = len(data)
+ }
+
+ b.Buf = append(b.Buf, data[:sz]...)
+ data = data[sz:]
+ }
+}
+
+// Size computes the size of a buffer by adding sizes of every chunk.
+func (b *Buffer) Size() int {
+ size := len(b.Buf)
+ for _, buf := range b.bufs {
+ size += len(buf)
+ }
+ return size
+}
+
+// DumpTo outputs the contents of a buffer to a writer and resets the buffer.
+func (b *Buffer) DumpTo(w io.Writer) (written int, err error) {
+ var n int
+ for _, buf := range b.bufs {
+ if err == nil {
+ n, err = w.Write(buf)
+ written += n
+ }
+ putBuf(buf)
+ }
+
+ if err == nil {
+ n, err = w.Write(b.Buf)
+ written += n
+ }
+ putBuf(b.toPool)
+
+ b.bufs = nil
+ b.Buf = nil
+ b.toPool = nil
+
+ return
+}
+
+// BuildBytes creates a single byte slice with all the contents of the buffer. Data is
+// copied if it does not fit in a single chunk. You can optionally provide one byte
+// slice as argument that it will try to reuse.
+func (b *Buffer) BuildBytes(reuse ...[]byte) []byte {
+ if len(b.bufs) == 0 {
+ ret := b.Buf
+ b.toPool = nil
+ b.Buf = nil
+ return ret
+ }
+
+ var ret []byte
+ size := b.Size()
+
+ // If we got a buffer as argument and it is big enought, reuse it.
+ if len(reuse) == 1 && cap(reuse[0]) >= size {
+ ret = reuse[0][:0]
+ } else {
+ ret = make([]byte, 0, size)
+ }
+ for _, buf := range b.bufs {
+ ret = append(ret, buf...)
+ putBuf(buf)
+ }
+
+ ret = append(ret, b.Buf...)
+ putBuf(b.toPool)
+
+ b.bufs = nil
+ b.toPool = nil
+ b.Buf = nil
+
+ return ret
+}
+
+type readCloser struct {
+ offset int
+ bufs [][]byte
+}
+
+func (r *readCloser) Read(p []byte) (n int, err error) {
+ for _, buf := range r.bufs {
+ // Copy as much as we can.
+ x := copy(p[n:], buf[r.offset:])
+ n += x // Increment how much we filled.
+
+ // Did we empty the whole buffer?
+ if r.offset+x == len(buf) {
+ // On to the next buffer.
+ r.offset = 0
+ r.bufs = r.bufs[1:]
+
+ // We can release this buffer.
+ putBuf(buf)
+ } else {
+ r.offset += x
+ }
+
+ if n == len(p) {
+ break
+ }
+ }
+ // No buffers left or nothing read?
+ if len(r.bufs) == 0 {
+ err = io.EOF
+ }
+ return
+}
+
+func (r *readCloser) Close() error {
+ // Release all remaining buffers.
+ for _, buf := range r.bufs {
+ putBuf(buf)
+ }
+ // In case Close gets called multiple times.
+ r.bufs = nil
+
+ return nil
+}
+
+// ReadCloser creates an io.ReadCloser with all the contents of the buffer.
+func (b *Buffer) ReadCloser() io.ReadCloser {
+ ret := &readCloser{0, append(b.bufs, b.Buf)}
+
+ b.bufs = nil
+ b.toPool = nil
+ b.Buf = nil
+
+ return ret
+}
diff --git a/vendor/github.com/mailru/easyjson/buffer/pool_test.go b/vendor/github.com/mailru/easyjson/buffer/pool_test.go
new file mode 100644
index 000000000..680623ace
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/buffer/pool_test.go
@@ -0,0 +1,107 @@
+package buffer
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestAppendByte(t *testing.T) {
+ var b Buffer
+ var want []byte
+
+ for i := 0; i < 1000; i++ {
+ b.AppendByte(1)
+ b.AppendByte(2)
+ want = append(want, 1, 2)
+ }
+
+ got := b.BuildBytes()
+ if !bytes.Equal(got, want) {
+ t.Errorf("BuildBytes() = %v; want %v", got, want)
+ }
+}
+
+func TestAppendBytes(t *testing.T) {
+ var b Buffer
+ var want []byte
+
+ for i := 0; i < 1000; i++ {
+ b.AppendBytes([]byte{1, 2})
+ want = append(want, 1, 2)
+ }
+
+ got := b.BuildBytes()
+ if !bytes.Equal(got, want) {
+ t.Errorf("BuildBytes() = %v; want %v", got, want)
+ }
+}
+
+func TestAppendString(t *testing.T) {
+ var b Buffer
+ var want []byte
+
+ s := "test"
+ for i := 0; i < 1000; i++ {
+ b.AppendBytes([]byte(s))
+ want = append(want, s...)
+ }
+
+ got := b.BuildBytes()
+ if !bytes.Equal(got, want) {
+ t.Errorf("BuildBytes() = %v; want %v", got, want)
+ }
+}
+
+func TestDumpTo(t *testing.T) {
+ var b Buffer
+ var want []byte
+
+ s := "test"
+ for i := 0; i < 1000; i++ {
+ b.AppendBytes([]byte(s))
+ want = append(want, s...)
+ }
+
+ out := &bytes.Buffer{}
+ n, err := b.DumpTo(out)
+ if err != nil {
+ t.Errorf("DumpTo() error: %v", err)
+ }
+
+ got := out.Bytes()
+ if !bytes.Equal(got, want) {
+ t.Errorf("DumpTo(): got %v; want %v", got, want)
+ }
+
+ if n != len(want) {
+ t.Errorf("DumpTo() = %v; want %v", n, len(want))
+ }
+}
+
+func TestReadCloser(t *testing.T) {
+ var b Buffer
+ var want []byte
+
+ s := "test"
+ for i := 0; i < 1000; i++ {
+ b.AppendBytes([]byte(s))
+ want = append(want, s...)
+ }
+
+ out := &bytes.Buffer{}
+ rc := b.ReadCloser()
+ n, err := out.ReadFrom(rc)
+ if err != nil {
+ t.Errorf("ReadCloser() error: %v", err)
+ }
+ rc.Close() // Will always return nil
+
+ got := out.Bytes()
+ if !bytes.Equal(got, want) {
+ t.Errorf("DumpTo(): got %v; want %v", got, want)
+ }
+
+ if n != int64(len(want)) {
+ t.Errorf("DumpTo() = %v; want %v", n, len(want))
+ }
+}
diff --git a/vendor/github.com/mailru/easyjson/easyjson/main.go b/vendor/github.com/mailru/easyjson/easyjson/main.go
new file mode 100644
index 000000000..1cd30bb36
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/easyjson/main.go
@@ -0,0 +1,106 @@
+package main
+
+import (
+ "errors"
+ "flag"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/mailru/easyjson/bootstrap"
+ // Reference the gen package to be friendly to vendoring tools,
+ // as it is an indirect dependency.
+ // (The temporary bootstrapping code uses it.)
+ _ "github.com/mailru/easyjson/gen"
+ "github.com/mailru/easyjson/parser"
+)
+
+var buildTags = flag.String("build_tags", "", "build tags to add to generated file")
+var snakeCase = flag.Bool("snake_case", false, "use snake_case names instead of CamelCase by default")
+var lowerCamelCase = flag.Bool("lower_camel_case", false, "use lowerCamelCase names instead of CamelCase by default")
+var noStdMarshalers = flag.Bool("no_std_marshalers", false, "don't generate MarshalJSON/UnmarshalJSON funcs")
+var omitEmpty = flag.Bool("omit_empty", false, "omit empty fields by default")
+var allStructs = flag.Bool("all", false, "generate marshaler/unmarshalers for all structs in a file")
+var leaveTemps = flag.Bool("leave_temps", false, "do not delete temporary files")
+var stubs = flag.Bool("stubs", false, "only generate stubs for marshaler/unmarshaler funcs")
+var noformat = flag.Bool("noformat", false, "do not run 'gofmt -w' on output file")
+var specifiedName = flag.String("output_filename", "", "specify the filename of the output")
+var processPkg = flag.Bool("pkg", false, "process the whole package instead of just the given file")
+
+func generate(fname string) (err error) {
+ fInfo, err := os.Stat(fname)
+ if err != nil {
+ return err
+ }
+
+ p := parser.Parser{AllStructs: *allStructs}
+ if err := p.Parse(fname, fInfo.IsDir()); err != nil {
+ return fmt.Errorf("Error parsing %v: %v", fname, err)
+ }
+
+ var outName string
+ if fInfo.IsDir() {
+ outName = filepath.Join(fname, p.PkgName+"_easyjson.go")
+ } else {
+ if s := strings.TrimSuffix(fname, ".go"); s == fname {
+ return errors.New("Filename must end in '.go'")
+ } else {
+ outName = s + "_easyjson.go"
+ }
+ }
+
+ if *specifiedName != "" {
+ outName = *specifiedName
+ }
+
+ var trimmedBuildTags string
+ if *buildTags != "" {
+ trimmedBuildTags = strings.TrimSpace(*buildTags)
+ }
+
+ g := bootstrap.Generator{
+ BuildTags: trimmedBuildTags,
+ PkgPath: p.PkgPath,
+ PkgName: p.PkgName,
+ Types: p.StructNames,
+ SnakeCase: *snakeCase,
+ LowerCamelCase: *lowerCamelCase,
+ NoStdMarshalers: *noStdMarshalers,
+ OmitEmpty: *omitEmpty,
+ LeaveTemps: *leaveTemps,
+ OutName: outName,
+ StubsOnly: *stubs,
+ NoFormat: *noformat,
+ }
+
+ if err := g.Run(); err != nil {
+ return fmt.Errorf("Bootstrap failed: %v", err)
+ }
+ return nil
+}
+
+func main() {
+ flag.Parse()
+
+ files := flag.Args()
+
+ gofile := os.Getenv("GOFILE")
+ if *processPkg {
+ gofile = filepath.Dir(gofile)
+ }
+
+ if len(files) == 0 && gofile != "" {
+ files = []string{gofile}
+ } else if len(files) == 0 {
+ flag.Usage()
+ os.Exit(1)
+ }
+
+ for _, fname := range files {
+ if err := generate(fname); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ }
+}
diff --git a/vendor/github.com/mailru/easyjson/gen/decoder.go b/vendor/github.com/mailru/easyjson/gen/decoder.go
new file mode 100644
index 000000000..021933ac8
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/gen/decoder.go
@@ -0,0 +1,489 @@
+package gen
+
+import (
+ "encoding"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strings"
+ "unicode"
+
+ "github.com/mailru/easyjson"
+)
+
+// Target this byte size for initial slice allocation to reduce garbage collection.
+const minSliceBytes = 64
+
+func (g *Generator) getDecoderName(t reflect.Type) string {
+ return g.functionName("decode", t)
+}
+
+var primitiveDecoders = map[reflect.Kind]string{
+ reflect.String: "in.String()",
+ reflect.Bool: "in.Bool()",
+ reflect.Int: "in.Int()",
+ reflect.Int8: "in.Int8()",
+ reflect.Int16: "in.Int16()",
+ reflect.Int32: "in.Int32()",
+ reflect.Int64: "in.Int64()",
+ reflect.Uint: "in.Uint()",
+ reflect.Uint8: "in.Uint8()",
+ reflect.Uint16: "in.Uint16()",
+ reflect.Uint32: "in.Uint32()",
+ reflect.Uint64: "in.Uint64()",
+ reflect.Float32: "in.Float32()",
+ reflect.Float64: "in.Float64()",
+}
+
+var primitiveStringDecoders = map[reflect.Kind]string{
+ reflect.String: "in.String()",
+ reflect.Int: "in.IntStr()",
+ reflect.Int8: "in.Int8Str()",
+ reflect.Int16: "in.Int16Str()",
+ reflect.Int32: "in.Int32Str()",
+ reflect.Int64: "in.Int64Str()",
+ reflect.Uint: "in.UintStr()",
+ reflect.Uint8: "in.Uint8Str()",
+ reflect.Uint16: "in.Uint16Str()",
+ reflect.Uint32: "in.Uint32Str()",
+ reflect.Uint64: "in.Uint64Str()",
+ reflect.Uintptr: "in.UintptrStr()",
+}
+
+var customDecoders = map[string]string{
+ "json.Number": "in.JsonNumber()",
+}
+
+// genTypeDecoder generates decoding code for the type t, but uses unmarshaler interface if implemented by t.
+func (g *Generator) genTypeDecoder(t reflect.Type, out string, tags fieldTags, indent int) error {
+ ws := strings.Repeat(" ", indent)
+
+ unmarshalerIface := reflect.TypeOf((*easyjson.Unmarshaler)(nil)).Elem()
+ if reflect.PtrTo(t).Implements(unmarshalerIface) {
+ fmt.Fprintln(g.out, ws+"("+out+").UnmarshalEasyJSON(in)")
+ return nil
+ }
+
+ unmarshalerIface = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()
+ if reflect.PtrTo(t).Implements(unmarshalerIface) {
+ fmt.Fprintln(g.out, ws+"if data := in.Raw(); in.Ok() {")
+ fmt.Fprintln(g.out, ws+" in.AddError( ("+out+").UnmarshalJSON(data) )")
+ fmt.Fprintln(g.out, ws+"}")
+ return nil
+ }
+
+ unmarshalerIface = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+ if reflect.PtrTo(t).Implements(unmarshalerIface) {
+ fmt.Fprintln(g.out, ws+"if data := in.UnsafeBytes(); in.Ok() {")
+ fmt.Fprintln(g.out, ws+" in.AddError( ("+out+").UnmarshalText(data) )")
+ fmt.Fprintln(g.out, ws+"}")
+ return nil
+ }
+
+ err := g.genTypeDecoderNoCheck(t, out, tags, indent)
+ return err
+}
+
+// genTypeDecoderNoCheck generates decoding code for the type t.
+func (g *Generator) genTypeDecoderNoCheck(t reflect.Type, out string, tags fieldTags, indent int) error {
+ ws := strings.Repeat(" ", indent)
+ // Check whether type is primitive, needs to be done after interface check.
+ if dec := customDecoders[t.String()]; dec != "" {
+ fmt.Fprintln(g.out, ws+out+" = "+dec)
+ return nil
+ } else if dec := primitiveStringDecoders[t.Kind()]; dec != "" && tags.asString {
+ fmt.Fprintln(g.out, ws+out+" = "+g.getType(t)+"("+dec+")")
+ return nil
+ } else if dec := primitiveDecoders[t.Kind()]; dec != "" {
+ fmt.Fprintln(g.out, ws+out+" = "+g.getType(t)+"("+dec+")")
+ return nil
+ }
+
+ switch t.Kind() {
+ case reflect.Slice:
+ tmpVar := g.uniqueVarName()
+ elem := t.Elem()
+
+ if elem.Kind() == reflect.Uint8 {
+ fmt.Fprintln(g.out, ws+"if in.IsNull() {")
+ fmt.Fprintln(g.out, ws+" in.Skip()")
+ fmt.Fprintln(g.out, ws+" "+out+" = nil")
+ fmt.Fprintln(g.out, ws+"} else {")
+ fmt.Fprintln(g.out, ws+" "+out+" = in.Bytes()")
+ fmt.Fprintln(g.out, ws+"}")
+
+ } else {
+
+ capacity := minSliceBytes / elem.Size()
+ if capacity == 0 {
+ capacity = 1
+ }
+
+ fmt.Fprintln(g.out, ws+"if in.IsNull() {")
+ fmt.Fprintln(g.out, ws+" in.Skip()")
+ fmt.Fprintln(g.out, ws+" "+out+" = nil")
+ fmt.Fprintln(g.out, ws+"} else {")
+ fmt.Fprintln(g.out, ws+" in.Delim('[')")
+ fmt.Fprintln(g.out, ws+" if "+out+" == nil {")
+ fmt.Fprintln(g.out, ws+" if !in.IsDelim(']') {")
+ fmt.Fprintln(g.out, ws+" "+out+" = make("+g.getType(t)+", 0, "+fmt.Sprint(capacity)+")")
+ fmt.Fprintln(g.out, ws+" } else {")
+ fmt.Fprintln(g.out, ws+" "+out+" = "+g.getType(t)+"{}")
+ fmt.Fprintln(g.out, ws+" }")
+ fmt.Fprintln(g.out, ws+" } else { ")
+ fmt.Fprintln(g.out, ws+" "+out+" = ("+out+")[:0]")
+ fmt.Fprintln(g.out, ws+" }")
+ fmt.Fprintln(g.out, ws+" for !in.IsDelim(']') {")
+ fmt.Fprintln(g.out, ws+" var "+tmpVar+" "+g.getType(elem))
+
+ if err := g.genTypeDecoder(elem, tmpVar, tags, indent+2); err != nil {
+ return err
+ }
+
+ fmt.Fprintln(g.out, ws+" "+out+" = append("+out+", "+tmpVar+")")
+ fmt.Fprintln(g.out, ws+" in.WantComma()")
+ fmt.Fprintln(g.out, ws+" }")
+ fmt.Fprintln(g.out, ws+" in.Delim(']')")
+ fmt.Fprintln(g.out, ws+"}")
+ }
+
+ case reflect.Array:
+ iterVar := g.uniqueVarName()
+ elem := t.Elem()
+
+ if elem.Kind() == reflect.Uint8 {
+ fmt.Fprintln(g.out, ws+"if in.IsNull() {")
+ fmt.Fprintln(g.out, ws+" in.Skip()")
+ fmt.Fprintln(g.out, ws+"} else {")
+ fmt.Fprintln(g.out, ws+" copy("+out+"[:], in.Bytes())")
+ fmt.Fprintln(g.out, ws+"}")
+
+ } else {
+
+ length := t.Len()
+
+ fmt.Fprintln(g.out, ws+"if in.IsNull() {")
+ fmt.Fprintln(g.out, ws+" in.Skip()")
+ fmt.Fprintln(g.out, ws+"} else {")
+ fmt.Fprintln(g.out, ws+" in.Delim('[')")
+ fmt.Fprintln(g.out, ws+" "+iterVar+" := 0")
+ fmt.Fprintln(g.out, ws+" for !in.IsDelim(']') {")
+ fmt.Fprintln(g.out, ws+" if "+iterVar+" < "+fmt.Sprint(length)+" {")
+
+ if err := g.genTypeDecoder(elem, out+"["+iterVar+"]", tags, indent+3); err != nil {
+ return err
+ }
+
+ fmt.Fprintln(g.out, ws+" "+iterVar+"++")
+ fmt.Fprintln(g.out, ws+" } else {")
+ fmt.Fprintln(g.out, ws+" in.SkipRecursive()")
+ fmt.Fprintln(g.out, ws+" }")
+ fmt.Fprintln(g.out, ws+" in.WantComma()")
+ fmt.Fprintln(g.out, ws+" }")
+ fmt.Fprintln(g.out, ws+" in.Delim(']')")
+ fmt.Fprintln(g.out, ws+"}")
+ }
+
+ case reflect.Struct:
+ dec := g.getDecoderName(t)
+ g.addType(t)
+
+ fmt.Fprintln(g.out, ws+dec+"(in, &"+out+")")
+
+ case reflect.Ptr:
+ fmt.Fprintln(g.out, ws+"if in.IsNull() {")
+ fmt.Fprintln(g.out, ws+" in.Skip()")
+ fmt.Fprintln(g.out, ws+" "+out+" = nil")
+ fmt.Fprintln(g.out, ws+"} else {")
+ fmt.Fprintln(g.out, ws+" if "+out+" == nil {")
+ fmt.Fprintln(g.out, ws+" "+out+" = new("+g.getType(t.Elem())+")")
+ fmt.Fprintln(g.out, ws+" }")
+
+ if err := g.genTypeDecoder(t.Elem(), "*"+out, tags, indent+1); err != nil {
+ return err
+ }
+
+ fmt.Fprintln(g.out, ws+"}")
+
+ case reflect.Map:
+ key := t.Key()
+ keyDec, ok := primitiveStringDecoders[key.Kind()]
+ if !ok {
+ return fmt.Errorf("map type %v not supported: only string and integer keys are allowed", key)
+ }
+ elem := t.Elem()
+ tmpVar := g.uniqueVarName()
+
+ fmt.Fprintln(g.out, ws+"if in.IsNull() {")
+ fmt.Fprintln(g.out, ws+" in.Skip()")
+ fmt.Fprintln(g.out, ws+"} else {")
+ fmt.Fprintln(g.out, ws+" in.Delim('{')")
+ fmt.Fprintln(g.out, ws+" if !in.IsDelim('}') {")
+ fmt.Fprintln(g.out, ws+" "+out+" = make("+g.getType(t)+")")
+ fmt.Fprintln(g.out, ws+" } else {")
+ fmt.Fprintln(g.out, ws+" "+out+" = nil")
+ fmt.Fprintln(g.out, ws+" }")
+
+ fmt.Fprintln(g.out, ws+" for !in.IsDelim('}') {")
+ fmt.Fprintln(g.out, ws+" key := "+g.getType(key)+"("+keyDec+")")
+ fmt.Fprintln(g.out, ws+" in.WantColon()")
+ fmt.Fprintln(g.out, ws+" var "+tmpVar+" "+g.getType(elem))
+
+ if err := g.genTypeDecoder(elem, tmpVar, tags, indent+2); err != nil {
+ return err
+ }
+
+ fmt.Fprintln(g.out, ws+" ("+out+")[key] = "+tmpVar)
+ fmt.Fprintln(g.out, ws+" in.WantComma()")
+ fmt.Fprintln(g.out, ws+" }")
+ fmt.Fprintln(g.out, ws+" in.Delim('}')")
+ fmt.Fprintln(g.out, ws+"}")
+
+ case reflect.Interface:
+ if t.NumMethod() != 0 {
+ return fmt.Errorf("interface type %v not supported: only interface{} is allowed", t)
+ }
+ fmt.Fprintln(g.out, ws+"if m, ok := "+out+".(easyjson.Unmarshaler); ok {")
+ fmt.Fprintln(g.out, ws+"m.UnmarshalEasyJSON(in)")
+ fmt.Fprintln(g.out, ws+"} else if m, ok := "+out+".(json.Unmarshaler); ok {")
+ fmt.Fprintln(g.out, ws+"_ = m.UnmarshalJSON(in.Raw())")
+ fmt.Fprintln(g.out, ws+"} else {")
+ fmt.Fprintln(g.out, ws+" "+out+" = in.Interface()")
+ fmt.Fprintln(g.out, ws+"}")
+ default:
+ return fmt.Errorf("don't know how to decode %v", t)
+ }
+ return nil
+
+}
+
+func (g *Generator) genStructFieldDecoder(t reflect.Type, f reflect.StructField) error {
+ jsonName := g.fieldNamer.GetJSONFieldName(t, f)
+ tags := parseFieldTags(f)
+
+ if tags.omit {
+ return nil
+ }
+
+ fmt.Fprintf(g.out, " case %q:\n", jsonName)
+ if err := g.genTypeDecoder(f.Type, "out."+f.Name, tags, 3); err != nil {
+ return err
+ }
+
+ if tags.required {
+ fmt.Fprintf(g.out, "%sSet = true\n", f.Name)
+ }
+
+ return nil
+}
+
+func (g *Generator) genRequiredFieldSet(t reflect.Type, f reflect.StructField) {
+ tags := parseFieldTags(f)
+
+ if !tags.required {
+ return
+ }
+
+ fmt.Fprintf(g.out, "var %sSet bool\n", f.Name)
+}
+
+func (g *Generator) genRequiredFieldCheck(t reflect.Type, f reflect.StructField) {
+ jsonName := g.fieldNamer.GetJSONFieldName(t, f)
+ tags := parseFieldTags(f)
+
+ if !tags.required {
+ return
+ }
+
+ g.imports["fmt"] = "fmt"
+
+ fmt.Fprintf(g.out, "if !%sSet {\n", f.Name)
+ fmt.Fprintf(g.out, " in.AddError(fmt.Errorf(\"key '%s' is required\"))\n", jsonName)
+ fmt.Fprintf(g.out, "}\n")
+}
+
+func mergeStructFields(fields1, fields2 []reflect.StructField) (fields []reflect.StructField) {
+ used := map[string]bool{}
+ for _, f := range fields2 {
+ used[f.Name] = true
+ fields = append(fields, f)
+ }
+
+ for _, f := range fields1 {
+ if !used[f.Name] {
+ fields = append(fields, f)
+ }
+ }
+ return
+}
+
+func getStructFields(t reflect.Type) ([]reflect.StructField, error) {
+ if t.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("got %v; expected a struct", t)
+ }
+
+ var efields []reflect.StructField
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if !f.Anonymous {
+ continue
+ }
+
+ t1 := f.Type
+ if t1.Kind() == reflect.Ptr {
+ t1 = t1.Elem()
+ }
+
+ fs, err := getStructFields(t1)
+ if err != nil {
+ return nil, fmt.Errorf("error processing embedded field: %v", err)
+ }
+ efields = mergeStructFields(efields, fs)
+ }
+
+ var fields []reflect.StructField
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Anonymous {
+ continue
+ }
+
+ c := []rune(f.Name)[0]
+ if unicode.IsUpper(c) {
+ fields = append(fields, f)
+ }
+ }
+ return mergeStructFields(efields, fields), nil
+}
+
+func (g *Generator) genDecoder(t reflect.Type) error {
+ switch t.Kind() {
+ case reflect.Slice, reflect.Array, reflect.Map:
+ return g.genSliceArrayDecoder(t)
+ default:
+ return g.genStructDecoder(t)
+ }
+}
+
+func (g *Generator) genSliceArrayDecoder(t reflect.Type) error {
+ switch t.Kind() {
+ case reflect.Slice, reflect.Array, reflect.Map:
+ default:
+ return fmt.Errorf("cannot generate encoder/decoder for %v, not a slice/array/map type", t)
+ }
+
+ fname := g.getDecoderName(t)
+ typ := g.getType(t)
+
+ fmt.Fprintln(g.out, "func "+fname+"(in *jlexer.Lexer, out *"+typ+") {")
+ fmt.Fprintln(g.out, " isTopLevel := in.IsStart()")
+ err := g.genTypeDecoderNoCheck(t, "*out", fieldTags{}, 1)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintln(g.out, " if isTopLevel {")
+ fmt.Fprintln(g.out, " in.Consumed()")
+ fmt.Fprintln(g.out, " }")
+ fmt.Fprintln(g.out, "}")
+
+ return nil
+}
+
+func (g *Generator) genStructDecoder(t reflect.Type) error {
+ if t.Kind() != reflect.Struct {
+ return fmt.Errorf("cannot generate encoder/decoder for %v, not a struct type", t)
+ }
+
+ fname := g.getDecoderName(t)
+ typ := g.getType(t)
+
+ fmt.Fprintln(g.out, "func "+fname+"(in *jlexer.Lexer, out *"+typ+") {")
+ fmt.Fprintln(g.out, " isTopLevel := in.IsStart()")
+ fmt.Fprintln(g.out, " if in.IsNull() {")
+ fmt.Fprintln(g.out, " if isTopLevel {")
+ fmt.Fprintln(g.out, " in.Consumed()")
+ fmt.Fprintln(g.out, " }")
+ fmt.Fprintln(g.out, " in.Skip()")
+ fmt.Fprintln(g.out, " return")
+ fmt.Fprintln(g.out, " }")
+
+ // Init embedded pointer fields.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if !f.Anonymous || f.Type.Kind() != reflect.Ptr {
+ continue
+ }
+ fmt.Fprintln(g.out, " out."+f.Name+" = new("+g.getType(f.Type.Elem())+")")
+ }
+
+ fs, err := getStructFields(t)
+ if err != nil {
+ return fmt.Errorf("cannot generate decoder for %v: %v", t, err)
+ }
+
+ for _, f := range fs {
+ g.genRequiredFieldSet(t, f)
+ }
+
+ fmt.Fprintln(g.out, " in.Delim('{')")
+ fmt.Fprintln(g.out, " for !in.IsDelim('}') {")
+ fmt.Fprintln(g.out, " key := in.UnsafeString()")
+ fmt.Fprintln(g.out, " in.WantColon()")
+ fmt.Fprintln(g.out, " if in.IsNull() {")
+ fmt.Fprintln(g.out, " in.Skip()")
+ fmt.Fprintln(g.out, " in.WantComma()")
+ fmt.Fprintln(g.out, " continue")
+ fmt.Fprintln(g.out, " }")
+
+ fmt.Fprintln(g.out, " switch key {")
+ for _, f := range fs {
+ if err := g.genStructFieldDecoder(t, f); err != nil {
+ return err
+ }
+ }
+
+ fmt.Fprintln(g.out, " default:")
+ fmt.Fprintln(g.out, " in.SkipRecursive()")
+ fmt.Fprintln(g.out, " }")
+ fmt.Fprintln(g.out, " in.WantComma()")
+ fmt.Fprintln(g.out, " }")
+ fmt.Fprintln(g.out, " in.Delim('}')")
+ fmt.Fprintln(g.out, " if isTopLevel {")
+ fmt.Fprintln(g.out, " in.Consumed()")
+ fmt.Fprintln(g.out, " }")
+
+ for _, f := range fs {
+ g.genRequiredFieldCheck(t, f)
+ }
+
+ fmt.Fprintln(g.out, "}")
+
+ return nil
+}
+
+func (g *Generator) genStructUnmarshaler(t reflect.Type) error {
+ switch t.Kind() {
+ case reflect.Slice, reflect.Array, reflect.Map, reflect.Struct:
+ default:
+ return fmt.Errorf("cannot generate encoder/decoder for %v, not a struct/slice/array/map type", t)
+ }
+
+ fname := g.getDecoderName(t)
+ typ := g.getType(t)
+
+ if !g.noStdMarshalers {
+ fmt.Fprintln(g.out, "// UnmarshalJSON supports json.Unmarshaler interface")
+ fmt.Fprintln(g.out, "func (v *"+typ+") UnmarshalJSON(data []byte) error {")
+ fmt.Fprintln(g.out, " r := jlexer.Lexer{Data: data}")
+ fmt.Fprintln(g.out, " "+fname+"(&r, v)")
+ fmt.Fprintln(g.out, " return r.Error()")
+ fmt.Fprintln(g.out, "}")
+ }
+
+ fmt.Fprintln(g.out, "// UnmarshalEasyJSON supports easyjson.Unmarshaler interface")
+ fmt.Fprintln(g.out, "func (v *"+typ+") UnmarshalEasyJSON(l *jlexer.Lexer) {")
+ fmt.Fprintln(g.out, " "+fname+"(l, v)")
+ fmt.Fprintln(g.out, "}")
+
+ return nil
+}
diff --git a/vendor/github.com/mailru/easyjson/gen/encoder.go b/vendor/github.com/mailru/easyjson/gen/encoder.go
new file mode 100644
index 000000000..48cba15d4
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/gen/encoder.go
@@ -0,0 +1,382 @@
+package gen
+
+import (
+ "encoding"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "github.com/mailru/easyjson"
+)
+
+func (g *Generator) getEncoderName(t reflect.Type) string {
+ return g.functionName("encode", t)
+}
+
+var primitiveEncoders = map[reflect.Kind]string{
+ reflect.String: "out.String(string(%v))",
+ reflect.Bool: "out.Bool(bool(%v))",
+ reflect.Int: "out.Int(int(%v))",
+ reflect.Int8: "out.Int8(int8(%v))",
+ reflect.Int16: "out.Int16(int16(%v))",
+ reflect.Int32: "out.Int32(int32(%v))",
+ reflect.Int64: "out.Int64(int64(%v))",
+ reflect.Uint: "out.Uint(uint(%v))",
+ reflect.Uint8: "out.Uint8(uint8(%v))",
+ reflect.Uint16: "out.Uint16(uint16(%v))",
+ reflect.Uint32: "out.Uint32(uint32(%v))",
+ reflect.Uint64: "out.Uint64(uint64(%v))",
+ reflect.Float32: "out.Float32(float32(%v))",
+ reflect.Float64: "out.Float64(float64(%v))",
+}
+
+var primitiveStringEncoders = map[reflect.Kind]string{
+ reflect.String: "out.String(string(%v))",
+ reflect.Int: "out.IntStr(int(%v))",
+ reflect.Int8: "out.Int8Str(int8(%v))",
+ reflect.Int16: "out.Int16Str(int16(%v))",
+ reflect.Int32: "out.Int32Str(int32(%v))",
+ reflect.Int64: "out.Int64Str(int64(%v))",
+ reflect.Uint: "out.UintStr(uint(%v))",
+ reflect.Uint8: "out.Uint8Str(uint8(%v))",
+ reflect.Uint16: "out.Uint16Str(uint16(%v))",
+ reflect.Uint32: "out.Uint32Str(uint32(%v))",
+ reflect.Uint64: "out.Uint64Str(uint64(%v))",
+ reflect.Uintptr: "out.UintptrStr(uintptr(%v))",
+}
+
+// fieldTags contains parsed version of json struct field tags.
+type fieldTags struct {
+ name string
+
+ omit bool
+ omitEmpty bool
+ noOmitEmpty bool
+ asString bool
+ required bool
+}
+
+// parseFieldTags parses the json field tag into a structure.
+func parseFieldTags(f reflect.StructField) fieldTags {
+ var ret fieldTags
+
+ for i, s := range strings.Split(f.Tag.Get("json"), ",") {
+ switch {
+ case i == 0 && s == "-":
+ ret.omit = true
+ case i == 0:
+ ret.name = s
+ case s == "omitempty":
+ ret.omitEmpty = true
+ case s == "!omitempty":
+ ret.noOmitEmpty = true
+ case s == "string":
+ ret.asString = true
+ case s == "required":
+ ret.required = true
+ }
+ }
+
+ return ret
+}
+
+// genTypeEncoder generates code that encodes in of type t into the writer, but uses marshaler interface if implemented by t.
+func (g *Generator) genTypeEncoder(t reflect.Type, in string, tags fieldTags, indent int, assumeNonEmpty bool) error {
+ ws := strings.Repeat(" ", indent)
+
+ marshalerIface := reflect.TypeOf((*easyjson.Marshaler)(nil)).Elem()
+ if reflect.PtrTo(t).Implements(marshalerIface) {
+ fmt.Fprintln(g.out, ws+"("+in+").MarshalEasyJSON(out)")
+ return nil
+ }
+
+ marshalerIface = reflect.TypeOf((*json.Marshaler)(nil)).Elem()
+ if reflect.PtrTo(t).Implements(marshalerIface) {
+ fmt.Fprintln(g.out, ws+"out.Raw( ("+in+").MarshalJSON() )")
+ return nil
+ }
+
+ marshalerIface = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+ if reflect.PtrTo(t).Implements(marshalerIface) {
+ fmt.Fprintln(g.out, ws+"out.RawText( ("+in+").MarshalText() )")
+ return nil
+ }
+
+ err := g.genTypeEncoderNoCheck(t, in, tags, indent, assumeNonEmpty)
+ return err
+}
+
+// genTypeEncoderNoCheck generates code that encodes in of type t into the writer.
+func (g *Generator) genTypeEncoderNoCheck(t reflect.Type, in string, tags fieldTags, indent int, assumeNonEmpty bool) error {
+ ws := strings.Repeat(" ", indent)
+
+ // Check whether type is primitive, needs to be done after interface check.
+ if enc := primitiveStringEncoders[t.Kind()]; enc != "" && tags.asString {
+ fmt.Fprintf(g.out, ws+enc+"\n", in)
+ return nil
+ } else if enc := primitiveEncoders[t.Kind()]; enc != "" {
+ fmt.Fprintf(g.out, ws+enc+"\n", in)
+ return nil
+ }
+
+ switch t.Kind() {
+ case reflect.Slice:
+ elem := t.Elem()
+ iVar := g.uniqueVarName()
+ vVar := g.uniqueVarName()
+
+ if t.Elem().Kind() == reflect.Uint8 {
+ fmt.Fprintln(g.out, ws+"out.Base64Bytes("+in+")")
+ } else {
+ if !assumeNonEmpty {
+ fmt.Fprintln(g.out, ws+"if "+in+" == nil && (out.Flags & jwriter.NilSliceAsEmpty) == 0 {")
+ fmt.Fprintln(g.out, ws+` out.RawString("null")`)
+ fmt.Fprintln(g.out, ws+"} else {")
+ } else {
+ fmt.Fprintln(g.out, ws+"{")
+ }
+ fmt.Fprintln(g.out, ws+" out.RawByte('[')")
+ fmt.Fprintln(g.out, ws+" for "+iVar+", "+vVar+" := range "+in+" {")
+ fmt.Fprintln(g.out, ws+" if "+iVar+" > 0 {")
+ fmt.Fprintln(g.out, ws+" out.RawByte(',')")
+ fmt.Fprintln(g.out, ws+" }")
+
+ if err := g.genTypeEncoder(elem, vVar, tags, indent+2, false); err != nil {
+ return err
+ }
+
+ fmt.Fprintln(g.out, ws+" }")
+ fmt.Fprintln(g.out, ws+" out.RawByte(']')")
+ fmt.Fprintln(g.out, ws+"}")
+ }
+
+ case reflect.Array:
+ elem := t.Elem()
+ iVar := g.uniqueVarName()
+
+ if t.Elem().Kind() == reflect.Uint8 {
+ fmt.Fprintln(g.out, ws+"out.Base64Bytes("+in+"[:])")
+ } else {
+ fmt.Fprintln(g.out, ws+"out.RawByte('[')")
+ fmt.Fprintln(g.out, ws+"for "+iVar+" := range "+in+" {")
+ fmt.Fprintln(g.out, ws+" if "+iVar+" > 0 {")
+ fmt.Fprintln(g.out, ws+" out.RawByte(',')")
+ fmt.Fprintln(g.out, ws+" }")
+
+ if err := g.genTypeEncoder(elem, in+"["+iVar+"]", tags, indent+1, false); err != nil {
+ return err
+ }
+
+ fmt.Fprintln(g.out, ws+"}")
+ fmt.Fprintln(g.out, ws+"out.RawByte(']')")
+ }
+
+ case reflect.Struct:
+ enc := g.getEncoderName(t)
+ g.addType(t)
+
+ fmt.Fprintln(g.out, ws+enc+"(out, "+in+")")
+
+ case reflect.Ptr:
+ if !assumeNonEmpty {
+ fmt.Fprintln(g.out, ws+"if "+in+" == nil {")
+ fmt.Fprintln(g.out, ws+` out.RawString("null")`)
+ fmt.Fprintln(g.out, ws+"} else {")
+ }
+
+ if err := g.genTypeEncoder(t.Elem(), "*"+in, tags, indent+1, false); err != nil {
+ return err
+ }
+
+ if !assumeNonEmpty {
+ fmt.Fprintln(g.out, ws+"}")
+ }
+
+ case reflect.Map:
+ key := t.Key()
+ keyEnc, ok := primitiveStringEncoders[key.Kind()]
+ if !ok {
+ return fmt.Errorf("map key type %v not supported: only string and integer keys are allowed", key)
+ }
+ tmpVar := g.uniqueVarName()
+
+ if !assumeNonEmpty {
+ fmt.Fprintln(g.out, ws+"if "+in+" == nil && (out.Flags & jwriter.NilMapAsEmpty) == 0 {")
+ fmt.Fprintln(g.out, ws+" out.RawString(`null`)")
+ fmt.Fprintln(g.out, ws+"} else {")
+ } else {
+ fmt.Fprintln(g.out, ws+"{")
+ }
+ fmt.Fprintln(g.out, ws+" out.RawByte('{')")
+ fmt.Fprintln(g.out, ws+" "+tmpVar+"First := true")
+ fmt.Fprintln(g.out, ws+" for "+tmpVar+"Name, "+tmpVar+"Value := range "+in+" {")
+ fmt.Fprintln(g.out, ws+" if "+tmpVar+"First { "+tmpVar+"First = false } else { out.RawByte(',') }")
+ fmt.Fprintln(g.out, ws+" "+fmt.Sprintf(keyEnc, tmpVar+"Name"))
+ fmt.Fprintln(g.out, ws+" out.RawByte(':')")
+
+ if err := g.genTypeEncoder(t.Elem(), tmpVar+"Value", tags, indent+2, false); err != nil {
+ return err
+ }
+
+ fmt.Fprintln(g.out, ws+" }")
+ fmt.Fprintln(g.out, ws+" out.RawByte('}')")
+ fmt.Fprintln(g.out, ws+"}")
+
+ case reflect.Interface:
+ if t.NumMethod() != 0 {
+ return fmt.Errorf("interface type %v not supported: only interface{} is allowed", t)
+ }
+ fmt.Fprintln(g.out, ws+"if m, ok := "+in+".(easyjson.Marshaler); ok {")
+ fmt.Fprintln(g.out, ws+" m.MarshalEasyJSON(out)")
+ fmt.Fprintln(g.out, ws+"} else if m, ok := "+in+".(json.Marshaler); ok {")
+ fmt.Fprintln(g.out, ws+" out.Raw(m.MarshalJSON())")
+ fmt.Fprintln(g.out, ws+"} else {")
+ fmt.Fprintln(g.out, ws+" out.Raw(json.Marshal("+in+"))")
+ fmt.Fprintln(g.out, ws+"}")
+
+ default:
+ return fmt.Errorf("don't know how to encode %v", t)
+ }
+ return nil
+}
+
+func (g *Generator) notEmptyCheck(t reflect.Type, v string) string {
+ optionalIface := reflect.TypeOf((*easyjson.Optional)(nil)).Elem()
+ if reflect.PtrTo(t).Implements(optionalIface) {
+ return "(" + v + ").IsDefined()"
+ }
+
+ switch t.Kind() {
+ case reflect.Slice, reflect.Map:
+ return "len(" + v + ") != 0"
+ case reflect.Interface, reflect.Ptr:
+ return v + " != nil"
+ case reflect.Bool:
+ return v
+ case reflect.String:
+ return v + ` != ""`
+ case reflect.Float32, reflect.Float64,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+
+ return v + " != 0"
+
+ default:
+ // note: Array types don't have a useful empty value
+ return "true"
+ }
+}
+
+func (g *Generator) genStructFieldEncoder(t reflect.Type, f reflect.StructField) error {
+ jsonName := g.fieldNamer.GetJSONFieldName(t, f)
+ tags := parseFieldTags(f)
+
+ if tags.omit {
+ return nil
+ }
+ noOmitEmpty := (!tags.omitEmpty && !g.omitEmpty) || tags.noOmitEmpty
+ if noOmitEmpty {
+ fmt.Fprintln(g.out, " {")
+ } else {
+ fmt.Fprintln(g.out, " if", g.notEmptyCheck(f.Type, "in."+f.Name), "{")
+ }
+ fmt.Fprintf(g.out, " const prefix string = %q\n", ","+strconv.Quote(jsonName)+":")
+ fmt.Fprintln(g.out, " if first {")
+ fmt.Fprintln(g.out, " first = false")
+ fmt.Fprintln(g.out, " out.RawString(prefix[1:])")
+ fmt.Fprintln(g.out, " } else {")
+ fmt.Fprintln(g.out, " out.RawString(prefix)")
+ fmt.Fprintln(g.out, " }")
+
+ if err := g.genTypeEncoder(f.Type, "in."+f.Name, tags, 2, !noOmitEmpty); err != nil {
+ return err
+ }
+ fmt.Fprintln(g.out, " }")
+ return nil
+}
+
+func (g *Generator) genEncoder(t reflect.Type) error {
+ switch t.Kind() {
+ case reflect.Slice, reflect.Array, reflect.Map:
+ return g.genSliceArrayMapEncoder(t)
+ default:
+ return g.genStructEncoder(t)
+ }
+}
+
+func (g *Generator) genSliceArrayMapEncoder(t reflect.Type) error {
+ switch t.Kind() {
+ case reflect.Slice, reflect.Array, reflect.Map:
+ default:
+ return fmt.Errorf("cannot generate encoder/decoder for %v, not a slice/array/map type", t)
+ }
+
+ fname := g.getEncoderName(t)
+ typ := g.getType(t)
+
+ fmt.Fprintln(g.out, "func "+fname+"(out *jwriter.Writer, in "+typ+") {")
+ err := g.genTypeEncoderNoCheck(t, "in", fieldTags{}, 1, false)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintln(g.out, "}")
+ return nil
+}
+
+func (g *Generator) genStructEncoder(t reflect.Type) error {
+ if t.Kind() != reflect.Struct {
+ return fmt.Errorf("cannot generate encoder/decoder for %v, not a struct type", t)
+ }
+
+ fname := g.getEncoderName(t)
+ typ := g.getType(t)
+
+ fmt.Fprintln(g.out, "func "+fname+"(out *jwriter.Writer, in "+typ+") {")
+ fmt.Fprintln(g.out, " out.RawByte('{')")
+ fmt.Fprintln(g.out, " first := true")
+ fmt.Fprintln(g.out, " _ = first")
+
+ fs, err := getStructFields(t)
+ if err != nil {
+ return fmt.Errorf("cannot generate encoder for %v: %v", t, err)
+ }
+ for _, f := range fs {
+ if err := g.genStructFieldEncoder(t, f); err != nil {
+ return err
+ }
+ }
+
+ fmt.Fprintln(g.out, " out.RawByte('}')")
+ fmt.Fprintln(g.out, "}")
+
+ return nil
+}
+
+func (g *Generator) genStructMarshaler(t reflect.Type) error {
+ switch t.Kind() {
+ case reflect.Slice, reflect.Array, reflect.Map, reflect.Struct:
+ default:
+ return fmt.Errorf("cannot generate encoder/decoder for %v, not a struct/slice/array/map type", t)
+ }
+
+ fname := g.getEncoderName(t)
+ typ := g.getType(t)
+
+ if !g.noStdMarshalers {
+ fmt.Fprintln(g.out, "// MarshalJSON supports json.Marshaler interface")
+ fmt.Fprintln(g.out, "func (v "+typ+") MarshalJSON() ([]byte, error) {")
+ fmt.Fprintln(g.out, " w := jwriter.Writer{}")
+ fmt.Fprintln(g.out, " "+fname+"(&w, v)")
+ fmt.Fprintln(g.out, " return w.Buffer.BuildBytes(), w.Error")
+ fmt.Fprintln(g.out, "}")
+ }
+
+ fmt.Fprintln(g.out, "// MarshalEasyJSON supports easyjson.Marshaler interface")
+ fmt.Fprintln(g.out, "func (v "+typ+") MarshalEasyJSON(w *jwriter.Writer) {")
+ fmt.Fprintln(g.out, " "+fname+"(w, v)")
+ fmt.Fprintln(g.out, "}")
+
+ return nil
+}
diff --git a/vendor/github.com/mailru/easyjson/gen/generator.go b/vendor/github.com/mailru/easyjson/gen/generator.go
new file mode 100644
index 000000000..eb0d70ba2
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/gen/generator.go
@@ -0,0 +1,523 @@
+package gen
+
+import (
+ "bytes"
+ "fmt"
+ "hash/fnv"
+ "io"
+ "path"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+const pkgWriter = "github.com/mailru/easyjson/jwriter"
+const pkgLexer = "github.com/mailru/easyjson/jlexer"
+const pkgEasyJSON = "github.com/mailru/easyjson"
+
+// FieldNamer defines a policy for generating names for struct fields.
+type FieldNamer interface {
+ GetJSONFieldName(t reflect.Type, f reflect.StructField) string
+}
+
+// Generator generates the requested marshaler/unmarshalers.
+type Generator struct {
+ out *bytes.Buffer
+
+ pkgName string
+ pkgPath string
+ buildTags string
+ hashString string
+
+ varCounter int
+
+ noStdMarshalers bool
+ omitEmpty bool
+ fieldNamer FieldNamer
+
+ // package path to local alias map for tracking imports
+ imports map[string]string
+
+ // types that marshalers were requested for by user
+ marshalers map[reflect.Type]bool
+
+ // types that encoders were already generated for
+ typesSeen map[reflect.Type]bool
+
+ // types that encoders were requested for (e.g. by encoders of other types)
+ typesUnseen []reflect.Type
+
+ // function name to relevant type maps to track names of de-/encoders in
+ // case of a name clash or unnamed structs
+ functionNames map[string]reflect.Type
+}
+
+// NewGenerator initializes and returns a Generator.
+func NewGenerator(filename string) *Generator {
+ ret := &Generator{
+ imports: map[string]string{
+ pkgWriter: "jwriter",
+ pkgLexer: "jlexer",
+ pkgEasyJSON: "easyjson",
+ "encoding/json": "json",
+ },
+ fieldNamer: DefaultFieldNamer{},
+ marshalers: make(map[reflect.Type]bool),
+ typesSeen: make(map[reflect.Type]bool),
+ functionNames: make(map[string]reflect.Type),
+ }
+
+ // Use a file-unique prefix on all auxiliary funcs to avoid
+ // name clashes.
+ hash := fnv.New32()
+ hash.Write([]byte(filename))
+ ret.hashString = fmt.Sprintf("%x", hash.Sum32())
+
+ return ret
+}
+
+// SetPkg sets the name and path of output package.
+func (g *Generator) SetPkg(name, path string) {
+ g.pkgName = name
+ g.pkgPath = path
+}
+
+// SetBuildTags sets build tags for the output file.
+func (g *Generator) SetBuildTags(tags string) {
+ g.buildTags = tags
+}
+
+// SetFieldNamer sets field naming strategy.
+func (g *Generator) SetFieldNamer(n FieldNamer) {
+ g.fieldNamer = n
+}
+
+// UseSnakeCase sets snake_case field naming strategy.
+func (g *Generator) UseSnakeCase() {
+ g.fieldNamer = SnakeCaseFieldNamer{}
+}
+
+// UseLowerCamelCase sets lowerCamelCase field naming strategy.
+func (g *Generator) UseLowerCamelCase() {
+ g.fieldNamer = LowerCamelCaseFieldNamer{}
+}
+
+// NoStdMarshalers instructs not to generate standard MarshalJSON/UnmarshalJSON
+// methods (only the custom interface).
+func (g *Generator) NoStdMarshalers() {
+ g.noStdMarshalers = true
+}
+
+// OmitEmpty triggers `json=",omitempty"` behaviour by default.
+func (g *Generator) OmitEmpty() {
+ g.omitEmpty = true
+}
+
+// addTypes requests to generate encoding/decoding funcs for the given type.
+func (g *Generator) addType(t reflect.Type) {
+ if g.typesSeen[t] {
+ return
+ }
+ for _, t1 := range g.typesUnseen {
+ if t1 == t {
+ return
+ }
+ }
+ g.typesUnseen = append(g.typesUnseen, t)
+}
+
+// Add requests to generate marshaler/unmarshalers and encoding/decoding
+// funcs for the type of given object.
+func (g *Generator) Add(obj interface{}) {
+ t := reflect.TypeOf(obj)
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ g.addType(t)
+ g.marshalers[t] = true
+}
+
+// printHeader prints package declaration and imports.
+func (g *Generator) printHeader() {
+ if g.buildTags != "" {
+ fmt.Println("// +build ", g.buildTags)
+ fmt.Println()
+ }
+ fmt.Println("// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT.")
+ fmt.Println()
+ fmt.Println("package ", g.pkgName)
+ fmt.Println()
+
+ byAlias := map[string]string{}
+ var aliases []string
+ for path, alias := range g.imports {
+ aliases = append(aliases, alias)
+ byAlias[alias] = path
+ }
+
+ sort.Strings(aliases)
+ fmt.Println("import (")
+ for _, alias := range aliases {
+ fmt.Printf(" %s %q\n", alias, byAlias[alias])
+ }
+
+ fmt.Println(")")
+ fmt.Println("")
+ fmt.Println("// suppress unused package warning")
+ fmt.Println("var (")
+ fmt.Println(" _ *json.RawMessage")
+ fmt.Println(" _ *jlexer.Lexer")
+ fmt.Println(" _ *jwriter.Writer")
+ fmt.Println(" _ easyjson.Marshaler")
+ fmt.Println(")")
+
+ fmt.Println()
+}
+
+// Run runs the generator and outputs generated code to out.
+func (g *Generator) Run(out io.Writer) error {
+ g.out = &bytes.Buffer{}
+
+ for len(g.typesUnseen) > 0 {
+ t := g.typesUnseen[len(g.typesUnseen)-1]
+ g.typesUnseen = g.typesUnseen[:len(g.typesUnseen)-1]
+ g.typesSeen[t] = true
+
+ if err := g.genDecoder(t); err != nil {
+ return err
+ }
+ if err := g.genEncoder(t); err != nil {
+ return err
+ }
+
+ if !g.marshalers[t] {
+ continue
+ }
+
+ if err := g.genStructMarshaler(t); err != nil {
+ return err
+ }
+ if err := g.genStructUnmarshaler(t); err != nil {
+ return err
+ }
+ }
+ g.printHeader()
+ _, err := out.Write(g.out.Bytes())
+ return err
+}
+
+// fixes vendored paths
+func fixPkgPathVendoring(pkgPath string) string {
+ const vendor = "/vendor/"
+ if i := strings.LastIndex(pkgPath, vendor); i != -1 {
+ return pkgPath[i+len(vendor):]
+ }
+ return pkgPath
+}
+
+func fixAliasName(alias string) string {
+ alias = strings.Replace(
+ strings.Replace(alias, ".", "_", -1),
+ "-",
+ "_",
+ -1,
+ )
+
+ if alias[0] == 'v' { // to void conflicting with var names, say v1
+ alias = "_" + alias
+ }
+ return alias
+}
+
+// pkgAlias creates and returns and import alias for a given package.
+func (g *Generator) pkgAlias(pkgPath string) string {
+ pkgPath = fixPkgPathVendoring(pkgPath)
+ if alias := g.imports[pkgPath]; alias != "" {
+ return alias
+ }
+
+ for i := 0; ; i++ {
+ alias := fixAliasName(path.Base(pkgPath))
+ if i > 0 {
+ alias += fmt.Sprint(i)
+ }
+
+ exists := false
+ for _, v := range g.imports {
+ if v == alias {
+ exists = true
+ break
+ }
+ }
+
+ if !exists {
+ g.imports[pkgPath] = alias
+ return alias
+ }
+ }
+}
+
+// getType return the textual type name of given type that can be used in generated code.
+func (g *Generator) getType(t reflect.Type) string {
+ if t.Name() == "" {
+ switch t.Kind() {
+ case reflect.Ptr:
+ return "*" + g.getType(t.Elem())
+ case reflect.Slice:
+ return "[]" + g.getType(t.Elem())
+ case reflect.Array:
+ return "[" + strconv.Itoa(t.Len()) + "]" + g.getType(t.Elem())
+ case reflect.Map:
+ return "map[" + g.getType(t.Key()) + "]" + g.getType(t.Elem())
+ }
+ }
+
+ if t.Name() == "" || t.PkgPath() == "" {
+ if t.Kind() == reflect.Struct {
+ // the fields of an anonymous struct can have named types,
+ // and t.String() will not be sufficient because it does not
+ // remove the package name when it matches g.pkgPath.
+ // so we convert by hand
+ nf := t.NumField()
+ lines := make([]string, 0, nf)
+ for i := 0; i < nf; i++ {
+ f := t.Field(i)
+ line := f.Name + " " + g.getType(f.Type)
+ t := f.Tag
+ if t != "" {
+ line += " " + escapeTag(t)
+ }
+ lines = append(lines, line)
+ }
+ return strings.Join([]string{"struct { ", strings.Join(lines, "; "), " }"}, "")
+ }
+ return t.String()
+ } else if t.PkgPath() == g.pkgPath {
+ return t.Name()
+ }
+ return g.pkgAlias(t.PkgPath()) + "." + t.Name()
+}
+
+// escape a struct field tag string back to source code
+func escapeTag(tag reflect.StructTag) string {
+ t := string(tag)
+ if strings.ContainsRune(t, '`') {
+ // there are ` in the string; we can't use ` to enclose the string
+ return strconv.Quote(t)
+ }
+ return "`" + t + "`"
+}
+
+// uniqueVarName returns a file-unique name that can be used for generated variables.
+func (g *Generator) uniqueVarName() string {
+ g.varCounter++
+ return fmt.Sprint("v", g.varCounter)
+}
+
+// safeName escapes unsafe characters in pkg/type name and returns a string that can be used
+// in encoder/decoder names for the type.
+func (g *Generator) safeName(t reflect.Type) string {
+ name := t.PkgPath()
+ if t.Name() == "" {
+ name += "anonymous"
+ } else {
+ name += "." + t.Name()
+ }
+
+ parts := []string{}
+ part := []rune{}
+ for _, c := range name {
+ if unicode.IsLetter(c) || unicode.IsDigit(c) {
+ part = append(part, c)
+ } else if len(part) > 0 {
+ parts = append(parts, string(part))
+ part = []rune{}
+ }
+ }
+ return joinFunctionNameParts(false, parts...)
+}
+
+// functionName returns a function name for a given type with a given prefix. If a function
+// with this prefix already exists for a type, it is returned.
+//
+// Method is used to track encoder/decoder names for the type.
+func (g *Generator) functionName(prefix string, t reflect.Type) string {
+ prefix = joinFunctionNameParts(true, "easyjson", g.hashString, prefix)
+ name := joinFunctionNameParts(true, prefix, g.safeName(t))
+
+ // Most of the names will be unique, try a shortcut first.
+ if e, ok := g.functionNames[name]; !ok || e == t {
+ g.functionNames[name] = t
+ return name
+ }
+
+ // Search if the function already exists.
+ for name1, t1 := range g.functionNames {
+ if t1 == t && strings.HasPrefix(name1, prefix) {
+ return name1
+ }
+ }
+
+ // Create a new name in the case of a clash.
+ for i := 1; ; i++ {
+ nm := fmt.Sprint(name, i)
+ if _, ok := g.functionNames[nm]; ok {
+ continue
+ }
+ g.functionNames[nm] = t
+ return nm
+ }
+}
+
+// DefaultFieldsNamer implements trivial naming policy equivalent to encoding/json.
+type DefaultFieldNamer struct{}
+
+func (DefaultFieldNamer) GetJSONFieldName(t reflect.Type, f reflect.StructField) string {
+ jsonName := strings.Split(f.Tag.Get("json"), ",")[0]
+ if jsonName != "" {
+ return jsonName
+ } else {
+ return f.Name
+ }
+}
+
+// LowerCamelCaseFieldNamer
+type LowerCamelCaseFieldNamer struct{}
+
+func isLower(b byte) bool {
+ return b <= 122 && b >= 97
+}
+
+func isUpper(b byte) bool {
+ return b >= 65 && b <= 90
+}
+
+// convert HTTPRestClient to httpRestClient
+func lowerFirst(s string) string {
+ if s == "" {
+ return ""
+ }
+
+ str := ""
+ strlen := len(s)
+
+ /**
+ Loop each char
+ If is uppercase:
+ If is first char, LOWER it
+ If the following char is lower, LEAVE it
+ If the following char is upper OR numeric, LOWER it
+ If is the end of string, LEAVE it
+ Else lowercase
+ */
+
+ foundLower := false
+ for i := range s {
+ ch := s[i]
+ if isUpper(ch) {
+ if i == 0 {
+ str += string(ch + 32)
+ } else if !foundLower { // Currently just a stream of capitals, eg JSONRESTS[erver]
+ if strlen > (i+1) && isLower(s[i+1]) {
+ // Next char is lower, keep this a capital
+ str += string(ch)
+ } else {
+ // Either at end of string or next char is capital
+ str += string(ch + 32)
+ }
+ } else {
+ str += string(ch)
+ }
+ } else {
+ foundLower = true
+ str += string(ch)
+ }
+ }
+
+ return str
+}
+
+func (LowerCamelCaseFieldNamer) GetJSONFieldName(t reflect.Type, f reflect.StructField) string {
+ jsonName := strings.Split(f.Tag.Get("json"), ",")[0]
+ if jsonName != "" {
+ return jsonName
+ } else {
+ return lowerFirst(f.Name)
+ }
+}
+
+// SnakeCaseFieldNamer implements CamelCase to snake_case conversion for fields names.
+type SnakeCaseFieldNamer struct{}
+
+func camelToSnake(name string) string {
+ var ret bytes.Buffer
+
+ multipleUpper := false
+ var lastUpper rune
+ var beforeUpper rune
+
+ for _, c := range name {
+ // Non-lowercase character after uppercase is considered to be uppercase too.
+ isUpper := (unicode.IsUpper(c) || (lastUpper != 0 && !unicode.IsLower(c)))
+
+ if lastUpper != 0 {
+ // Output a delimiter if last character was either the first uppercase character
+ // in a row, or the last one in a row (e.g. 'S' in "HTTPServer").
+ // Do not output a delimiter at the beginning of the name.
+
+ firstInRow := !multipleUpper
+ lastInRow := !isUpper
+
+ if ret.Len() > 0 && (firstInRow || lastInRow) && beforeUpper != '_' {
+ ret.WriteByte('_')
+ }
+ ret.WriteRune(unicode.ToLower(lastUpper))
+ }
+
+ // Buffer uppercase char, do not output it yet as a delimiter may be required if the
+ // next character is lowercase.
+ if isUpper {
+ multipleUpper = (lastUpper != 0)
+ lastUpper = c
+ continue
+ }
+
+ ret.WriteRune(c)
+ lastUpper = 0
+ beforeUpper = c
+ multipleUpper = false
+ }
+
+ if lastUpper != 0 {
+ ret.WriteRune(unicode.ToLower(lastUpper))
+ }
+ return string(ret.Bytes())
+}
+
+func (SnakeCaseFieldNamer) GetJSONFieldName(t reflect.Type, f reflect.StructField) string {
+ jsonName := strings.Split(f.Tag.Get("json"), ",")[0]
+ if jsonName != "" {
+ return jsonName
+ }
+
+ return camelToSnake(f.Name)
+}
+
+func joinFunctionNameParts(keepFirst bool, parts ...string) string {
+ buf := bytes.NewBufferString("")
+ for i, part := range parts {
+ if i == 0 && keepFirst {
+ buf.WriteString(part)
+ } else {
+ if len(part) > 0 {
+ buf.WriteString(strings.ToUpper(string(part[0])))
+ }
+ if len(part) > 1 {
+ buf.WriteString(part[1:])
+ }
+ }
+ }
+ return buf.String()
+}
diff --git a/vendor/github.com/mailru/easyjson/gen/generator_test.go b/vendor/github.com/mailru/easyjson/gen/generator_test.go
new file mode 100644
index 000000000..0c9d27845
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/gen/generator_test.go
@@ -0,0 +1,87 @@
+package gen
+
+import (
+ "testing"
+)
+
+func TestCamelToSnake(t *testing.T) {
+ for i, test := range []struct {
+ In, Out string
+ }{
+ {"", ""},
+ {"A", "a"},
+ {"SimpleExample", "simple_example"},
+ {"internalField", "internal_field"},
+
+ {"SomeHTTPStuff", "some_http_stuff"},
+ {"WriteJSON", "write_json"},
+ {"HTTP2Server", "http2_server"},
+ {"Some_Mixed_Case", "some_mixed_case"},
+ {"do_nothing", "do_nothing"},
+
+ {"JSONHTTPRPCServer", "jsonhttprpc_server"}, // nothing can be done here without a dictionary
+ } {
+ got := camelToSnake(test.In)
+ if got != test.Out {
+ t.Errorf("[%d] camelToSnake(%s) = %s; want %s", i, test.In, got, test.Out)
+ }
+ }
+}
+
+func TestCamelToLowerCamel(t *testing.T) {
+ for i, test := range []struct {
+ In, Out string
+ }{
+ {"", ""},
+ {"A", "a"},
+ {"SimpleExample", "simpleExample"},
+ {"internalField", "internalField"},
+
+ {"SomeHTTPStuff", "someHTTPStuff"},
+ {"WriteJSON", "writeJSON"},
+ {"HTTP2Server", "http2Server"},
+
+ {"JSONHTTPRPCServer", "jsonhttprpcServer"}, // nothing can be done here without a dictionary
+ } {
+ got := lowerFirst(test.In)
+ if got != test.Out {
+ t.Errorf("[%d] lowerFirst(%s) = %s; want %s", i, test.In, got, test.Out)
+ }
+ }
+}
+
+func TestJoinFunctionNameParts(t *testing.T) {
+ for i, test := range []struct {
+ keepFirst bool
+ parts []string
+ out string
+ }{
+ {false, []string{}, ""},
+ {false, []string{"a"}, "A"},
+ {false, []string{"simple", "example"}, "SimpleExample"},
+ {true, []string{"first", "example"}, "firstExample"},
+ {false, []string{"some", "UPPER", "case"}, "SomeUPPERCase"},
+ {false, []string{"number", "123"}, "Number123"},
+ } {
+ got := joinFunctionNameParts(test.keepFirst, test.parts...)
+ if got != test.out {
+ t.Errorf("[%d] joinFunctionNameParts(%v) = %s; want %s", i, test.parts, got, test.out)
+ }
+ }
+}
+
+func TestFixVendorPath(t *testing.T) {
+ for i, test := range []struct {
+ In, Out string
+ }{
+ {"", ""},
+ {"time", "time"},
+ {"project/vendor/subpackage", "subpackage"},
+ } {
+ got := fixPkgPathVendoring(test.In)
+ if got != test.Out {
+ t.Errorf("[%d] fixPkgPathVendoring(%s) = %s; want %s", i, test.In, got, test.Out)
+ }
+ }
+
+}
diff --git a/vendor/github.com/mailru/easyjson/helpers.go b/vendor/github.com/mailru/easyjson/helpers.go
new file mode 100644
index 000000000..b86b87d22
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/helpers.go
@@ -0,0 +1,78 @@
+// Package easyjson contains marshaler/unmarshaler interfaces and helper functions.
+package easyjson
+
+import (
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strconv"
+
+ "github.com/mailru/easyjson/jlexer"
+ "github.com/mailru/easyjson/jwriter"
+)
+
+// Marshaler is an easyjson-compatible marshaler interface.
+type Marshaler interface {
+ MarshalEasyJSON(w *jwriter.Writer)
+}
+
+// Marshaler is an easyjson-compatible unmarshaler interface.
+type Unmarshaler interface {
+ UnmarshalEasyJSON(w *jlexer.Lexer)
+}
+
+// Optional defines an undefined-test method for a type to integrate with 'omitempty' logic.
+type Optional interface {
+ IsDefined() bool
+}
+
+// Marshal returns data as a single byte slice. Method is suboptimal as the data is likely to be copied
+// from a chain of smaller chunks.
+func Marshal(v Marshaler) ([]byte, error) {
+ w := jwriter.Writer{}
+ v.MarshalEasyJSON(&w)
+ return w.BuildBytes()
+}
+
+// MarshalToWriter marshals the data to an io.Writer.
+func MarshalToWriter(v Marshaler, w io.Writer) (written int, err error) {
+ jw := jwriter.Writer{}
+ v.MarshalEasyJSON(&jw)
+ return jw.DumpTo(w)
+}
+
+// MarshalToHTTPResponseWriter sets Content-Length and Content-Type headers for the
+// http.ResponseWriter, and send the data to the writer. started will be equal to
+// false if an error occurred before any http.ResponseWriter methods were actually
+// invoked (in this case a 500 reply is possible).
+func MarshalToHTTPResponseWriter(v Marshaler, w http.ResponseWriter) (started bool, written int, err error) {
+ jw := jwriter.Writer{}
+ v.MarshalEasyJSON(&jw)
+ if jw.Error != nil {
+ return false, 0, jw.Error
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.Header().Set("Content-Length", strconv.Itoa(jw.Size()))
+
+ started = true
+ written, err = jw.DumpTo(w)
+ return
+}
+
+// Unmarshal decodes the JSON in data into the object.
+func Unmarshal(data []byte, v Unmarshaler) error {
+ l := jlexer.Lexer{Data: data}
+ v.UnmarshalEasyJSON(&l)
+ return l.Error()
+}
+
+// UnmarshalFromReader reads all the data in the reader and decodes as JSON into the object.
+func UnmarshalFromReader(r io.Reader, v Unmarshaler) error {
+ data, err := ioutil.ReadAll(r)
+ if err != nil {
+ return err
+ }
+ l := jlexer.Lexer{Data: data}
+ v.UnmarshalEasyJSON(&l)
+ return l.Error()
+}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go
new file mode 100644
index 000000000..ff7b27c5b
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go
@@ -0,0 +1,24 @@
+// This file will only be included to the build if neither
+// easyjson_nounsafe nor appengine build tag is set. See README notes
+// for more details.
+
+//+build !easyjson_nounsafe
+//+build !appengine
+
+package jlexer
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// bytesToStr creates a string pointing at the slice to avoid copying.
+//
+// Warning: the string returned by the function should be used with care, as the whole input data
+// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data
+// may be garbage-collected even when the string exists.
+func bytesToStr(data []byte) string {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&data))
+ shdr := reflect.StringHeader{Data: h.Data, Len: h.Len}
+ return *(*string)(unsafe.Pointer(&shdr))
+}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go
new file mode 100644
index 000000000..864d1be67
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go
@@ -0,0 +1,13 @@
+// This file is included to the build if any of the buildtags below
+// are defined. Refer to README notes for more details.
+
+//+build easyjson_nounsafe appengine
+
+package jlexer
+
+// bytesToStr creates a string normally from []byte
+//
+// Note that this method is roughly 1.5x slower than using the 'unsafe' method.
+func bytesToStr(data []byte) string {
+ return string(data)
+}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/error.go b/vendor/github.com/mailru/easyjson/jlexer/error.go
new file mode 100644
index 000000000..e90ec40d0
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/jlexer/error.go
@@ -0,0 +1,15 @@
+package jlexer
+
+import "fmt"
+
+// LexerError implements the error interface and represents all possible errors that can be
+// generated during parsing the JSON data.
+type LexerError struct {
+ Reason string
+ Offset int
+ Data string
+}
+
+func (l *LexerError) Error() string {
+ return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data)
+}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go
new file mode 100644
index 000000000..e5558ae39
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go
@@ -0,0 +1,1141 @@
+// Package jlexer contains a JSON lexer implementation.
+//
+// It is expected that it is mostly used with generated parser code, so the interface is tuned
+// for a parser that knows what kind of data is expected.
+package jlexer
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+// tokenKind determines type of a token.
+type tokenKind byte
+
+const (
+ tokenUndef tokenKind = iota // No token.
+ tokenDelim // Delimiter: one of '{', '}', '[' or ']'.
+ tokenString // A string literal, e.g. "abc\u1234"
+ tokenNumber // Number literal, e.g. 1.5e5
+ tokenBool // Boolean literal: true or false.
+ tokenNull // null keyword.
+)
+
+// token describes a single token: type, position in the input and value.
+type token struct {
+ kind tokenKind // Type of a token.
+
+ boolValue bool // Value if a boolean literal token.
+ byteValue []byte // Raw value of a token.
+ delimValue byte
+}
+
+// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice.
+type Lexer struct {
+ Data []byte // Input data given to the lexer.
+
+ start int // Start of the current token.
+ pos int // Current unscanned position in the input stream.
+ token token // Last scanned token, if token.kind != tokenUndef.
+
+ firstElement bool // Whether current element is the first in array or an object.
+ wantSep byte // A comma or a colon character, which need to occur before a token.
+
+ UseMultipleErrors bool // If we want to use multiple errors.
+ fatalError error // Fatal error occurred during lexing. It is usually a syntax error.
+ multipleErrors []*LexerError // Semantic errors occurred during lexing. Marshalling will be continued after finding this errors.
+}
+
+// FetchToken scans the input for the next token.
+func (r *Lexer) FetchToken() {
+ r.token.kind = tokenUndef
+ r.start = r.pos
+
+ // Check if r.Data has r.pos element
+ // If it doesn't, it mean corrupted input data
+ if len(r.Data) < r.pos {
+ r.errParse("Unexpected end of data")
+ return
+ }
+ // Determine the type of a token by skipping whitespace and reading the
+ // first character.
+ for _, c := range r.Data[r.pos:] {
+ switch c {
+ case ':', ',':
+ if r.wantSep == c {
+ r.pos++
+ r.start++
+ r.wantSep = 0
+ } else {
+ r.errSyntax()
+ }
+
+ case ' ', '\t', '\r', '\n':
+ r.pos++
+ r.start++
+
+ case '"':
+ if r.wantSep != 0 {
+ r.errSyntax()
+ }
+
+ r.token.kind = tokenString
+ r.fetchString()
+ return
+
+ case '{', '[':
+ if r.wantSep != 0 {
+ r.errSyntax()
+ }
+ r.firstElement = true
+ r.token.kind = tokenDelim
+ r.token.delimValue = r.Data[r.pos]
+ r.pos++
+ return
+
+ case '}', ']':
+ if !r.firstElement && (r.wantSep != ',') {
+ r.errSyntax()
+ }
+ r.wantSep = 0
+ r.token.kind = tokenDelim
+ r.token.delimValue = r.Data[r.pos]
+ r.pos++
+ return
+
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-':
+ if r.wantSep != 0 {
+ r.errSyntax()
+ }
+ r.token.kind = tokenNumber
+ r.fetchNumber()
+ return
+
+ case 'n':
+ if r.wantSep != 0 {
+ r.errSyntax()
+ }
+
+ r.token.kind = tokenNull
+ r.fetchNull()
+ return
+
+ case 't':
+ if r.wantSep != 0 {
+ r.errSyntax()
+ }
+
+ r.token.kind = tokenBool
+ r.token.boolValue = true
+ r.fetchTrue()
+ return
+
+ case 'f':
+ if r.wantSep != 0 {
+ r.errSyntax()
+ }
+
+ r.token.kind = tokenBool
+ r.token.boolValue = false
+ r.fetchFalse()
+ return
+
+ default:
+ r.errSyntax()
+ return
+ }
+ }
+ r.fatalError = io.EOF
+ return
+}
+
+// isTokenEnd returns true if the char can follow a non-delimiter token
+func isTokenEnd(c byte) bool {
+ return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':'
+}
+
+// fetchNull fetches and checks remaining bytes of null keyword.
+func (r *Lexer) fetchNull() {
+ r.pos += 4
+ if r.pos > len(r.Data) ||
+ r.Data[r.pos-3] != 'u' ||
+ r.Data[r.pos-2] != 'l' ||
+ r.Data[r.pos-1] != 'l' ||
+ (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
+
+ r.pos -= 4
+ r.errSyntax()
+ }
+}
+
+// fetchTrue fetches and checks remaining bytes of true keyword.
+func (r *Lexer) fetchTrue() {
+ r.pos += 4
+ if r.pos > len(r.Data) ||
+ r.Data[r.pos-3] != 'r' ||
+ r.Data[r.pos-2] != 'u' ||
+ r.Data[r.pos-1] != 'e' ||
+ (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
+
+ r.pos -= 4
+ r.errSyntax()
+ }
+}
+
+// fetchFalse fetches and checks remaining bytes of false keyword.
+func (r *Lexer) fetchFalse() {
+ r.pos += 5
+ if r.pos > len(r.Data) ||
+ r.Data[r.pos-4] != 'a' ||
+ r.Data[r.pos-3] != 'l' ||
+ r.Data[r.pos-2] != 's' ||
+ r.Data[r.pos-1] != 'e' ||
+ (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
+
+ r.pos -= 5
+ r.errSyntax()
+ }
+}
+
+// fetchNumber scans a number literal token.
+func (r *Lexer) fetchNumber() {
+ hasE := false
+ afterE := false
+ hasDot := false
+
+ r.pos++
+ for i, c := range r.Data[r.pos:] {
+ switch {
+ case c >= '0' && c <= '9':
+ afterE = false
+ case c == '.' && !hasDot:
+ hasDot = true
+ case (c == 'e' || c == 'E') && !hasE:
+ hasE = true
+ hasDot = true
+ afterE = true
+ case (c == '+' || c == '-') && afterE:
+ afterE = false
+ default:
+ r.pos += i
+ if !isTokenEnd(c) {
+ r.errSyntax()
+ } else {
+ r.token.byteValue = r.Data[r.start:r.pos]
+ }
+ return
+ }
+ }
+
+ r.pos = len(r.Data)
+ r.token.byteValue = r.Data[r.start:]
+}
+
+// findStringLen tries to scan into the string literal for ending quote char to determine required size.
+// The size will be exact if no escapes are present and may be inexact if there are escaped chars.
+func findStringLen(data []byte) (hasEscapes bool, length int) {
+ delta := 0
+
+ for i := 0; i < len(data); i++ {
+ switch data[i] {
+ case '\\':
+ i++
+ delta++
+ if i < len(data) && data[i] == 'u' {
+ delta++
+ }
+ case '"':
+ return (delta > 0), (i - delta)
+ }
+ }
+
+ return false, len(data)
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+ if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+ return -1
+ }
+ var val rune
+ for i := 2; i < len(s) && i < 6; i++ {
+ var v byte
+ c := s[i]
+ switch c {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ v = c - '0'
+ case 'a', 'b', 'c', 'd', 'e', 'f':
+ v = c - 'a' + 10
+ case 'A', 'B', 'C', 'D', 'E', 'F':
+ v = c - 'A' + 10
+ default:
+ return -1
+ }
+
+ val <<= 4
+ val |= rune(v)
+ }
+ return val
+}
+
+// processEscape processes a single escape sequence and returns number of bytes processed.
+func (r *Lexer) processEscape(data []byte) (int, error) {
+ if len(data) < 2 {
+ return 0, fmt.Errorf("syntax error at %v", string(data))
+ }
+
+ c := data[1]
+ switch c {
+ case '"', '/', '\\':
+ r.token.byteValue = append(r.token.byteValue, c)
+ return 2, nil
+ case 'b':
+ r.token.byteValue = append(r.token.byteValue, '\b')
+ return 2, nil
+ case 'f':
+ r.token.byteValue = append(r.token.byteValue, '\f')
+ return 2, nil
+ case 'n':
+ r.token.byteValue = append(r.token.byteValue, '\n')
+ return 2, nil
+ case 'r':
+ r.token.byteValue = append(r.token.byteValue, '\r')
+ return 2, nil
+ case 't':
+ r.token.byteValue = append(r.token.byteValue, '\t')
+ return 2, nil
+ case 'u':
+ rr := getu4(data)
+ if rr < 0 {
+ return 0, errors.New("syntax error")
+ }
+
+ read := 6
+ if utf16.IsSurrogate(rr) {
+ rr1 := getu4(data[read:])
+ if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+ read += 6
+ rr = dec
+ } else {
+ rr = unicode.ReplacementChar
+ }
+ }
+ var d [4]byte
+ s := utf8.EncodeRune(d[:], rr)
+ r.token.byteValue = append(r.token.byteValue, d[:s]...)
+ return read, nil
+ }
+
+ return 0, errors.New("syntax error")
+}
+
+// fetchString scans a string literal token.
+func (r *Lexer) fetchString() {
+ r.pos++
+ data := r.Data[r.pos:]
+
+ hasEscapes, length := findStringLen(data)
+ if !hasEscapes {
+ r.token.byteValue = data[:length]
+ r.pos += length + 1
+ return
+ }
+
+ r.token.byteValue = make([]byte, 0, length)
+ p := 0
+ for i := 0; i < len(data); {
+ switch data[i] {
+ case '"':
+ r.pos += i + 1
+ r.token.byteValue = append(r.token.byteValue, data[p:i]...)
+ i++
+ return
+
+ case '\\':
+ r.token.byteValue = append(r.token.byteValue, data[p:i]...)
+ off, err := r.processEscape(data[i:])
+ if err != nil {
+ r.errParse(err.Error())
+ return
+ }
+ i += off
+ p = i
+
+ default:
+ i++
+ }
+ }
+ r.errParse("unterminated string literal")
+}
+
+// scanToken scans the next token if no token is currently available in the lexer.
+func (r *Lexer) scanToken() {
+ if r.token.kind != tokenUndef || r.fatalError != nil {
+ return
+ }
+
+ r.FetchToken()
+}
+
+// consume resets the current token to allow scanning the next one.
+func (r *Lexer) consume() {
+ r.token.kind = tokenUndef
+ r.token.delimValue = 0
+}
+
+// Ok returns true if no error (including io.EOF) was encountered during scanning.
+func (r *Lexer) Ok() bool {
+ return r.fatalError == nil
+}
+
+const maxErrorContextLen = 13
+
+func (r *Lexer) errParse(what string) {
+ if r.fatalError == nil {
+ var str string
+ if len(r.Data)-r.pos <= maxErrorContextLen {
+ str = string(r.Data)
+ } else {
+ str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..."
+ }
+ r.fatalError = &LexerError{
+ Reason: what,
+ Offset: r.pos,
+ Data: str,
+ }
+ }
+}
+
+func (r *Lexer) errSyntax() {
+ r.errParse("syntax error")
+}
+
+func (r *Lexer) errInvalidToken(expected string) {
+ if r.fatalError != nil {
+ return
+ }
+ if r.UseMultipleErrors {
+ r.pos = r.start
+ r.consume()
+ r.SkipRecursive()
+ switch expected {
+ case "[":
+ r.token.delimValue = ']'
+ r.token.kind = tokenDelim
+ case "{":
+ r.token.delimValue = '}'
+ r.token.kind = tokenDelim
+ }
+ r.addNonfatalError(&LexerError{
+ Reason: fmt.Sprintf("expected %s", expected),
+ Offset: r.start,
+ Data: string(r.Data[r.start:r.pos]),
+ })
+ return
+ }
+
+ var str string
+ if len(r.token.byteValue) <= maxErrorContextLen {
+ str = string(r.token.byteValue)
+ } else {
+ str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..."
+ }
+ r.fatalError = &LexerError{
+ Reason: fmt.Sprintf("expected %s", expected),
+ Offset: r.pos,
+ Data: str,
+ }
+}
+
+func (r *Lexer) GetPos() int {
+ return r.pos
+}
+
+// Delim consumes a token and verifies that it is the given delimiter.
+func (r *Lexer) Delim(c byte) {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+
+ if !r.Ok() || r.token.delimValue != c {
+ r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled.
+ r.errInvalidToken(string([]byte{c}))
+ } else {
+ r.consume()
+ }
+}
+
+// IsDelim returns true if there was no scanning error and next token is the given delimiter.
+func (r *Lexer) IsDelim(c byte) bool {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ return !r.Ok() || r.token.delimValue == c
+}
+
+// Null verifies that the next token is null and consumes it.
+func (r *Lexer) Null() {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ if !r.Ok() || r.token.kind != tokenNull {
+ r.errInvalidToken("null")
+ }
+ r.consume()
+}
+
+// IsNull returns true if the next token is a null keyword.
+func (r *Lexer) IsNull() bool {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ return r.Ok() && r.token.kind == tokenNull
+}
+
+// Skip skips a single token.
+func (r *Lexer) Skip() {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ r.consume()
+}
+
+// SkipRecursive skips next array or object completely, or just skips a single token if not
+// an array/object.
+//
+// Note: no syntax validation is performed on the skipped data.
+func (r *Lexer) SkipRecursive() {
+ r.scanToken()
+ var start, end byte
+
+ if r.token.delimValue == '{' {
+ start, end = '{', '}'
+ } else if r.token.delimValue == '[' {
+ start, end = '[', ']'
+ } else {
+ r.consume()
+ return
+ }
+
+ r.consume()
+
+ level := 1
+ inQuotes := false
+ wasEscape := false
+
+ for i, c := range r.Data[r.pos:] {
+ switch {
+ case c == start && !inQuotes:
+ level++
+ case c == end && !inQuotes:
+ level--
+ if level == 0 {
+ r.pos += i + 1
+ return
+ }
+ case c == '\\' && inQuotes:
+ wasEscape = !wasEscape
+ continue
+ case c == '"' && inQuotes:
+ inQuotes = wasEscape
+ case c == '"':
+ inQuotes = true
+ }
+ wasEscape = false
+ }
+ r.pos = len(r.Data)
+ r.fatalError = &LexerError{
+ Reason: "EOF reached while skipping array/object or token",
+ Offset: r.pos,
+ Data: string(r.Data[r.pos:]),
+ }
+}
+
+// Raw fetches the next item recursively as a data slice
+func (r *Lexer) Raw() []byte {
+ r.SkipRecursive()
+ if !r.Ok() {
+ return nil
+ }
+ return r.Data[r.start:r.pos]
+}
+
+// IsStart returns whether the lexer is positioned at the start
+// of an input string.
+func (r *Lexer) IsStart() bool {
+ return r.pos == 0
+}
+
+// Consumed reads all remaining bytes from the input, publishing an error if
+// there is anything but whitespace remaining.
+func (r *Lexer) Consumed() {
+ if r.pos > len(r.Data) || !r.Ok() {
+ return
+ }
+
+ for _, c := range r.Data[r.pos:] {
+ if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+ r.AddError(&LexerError{
+ Reason: "invalid character '" + string(c) + "' after top-level value",
+ Offset: r.pos,
+ Data: string(r.Data[r.pos:]),
+ })
+ return
+ }
+
+ r.pos++
+ r.start++
+ }
+}
+
+func (r *Lexer) unsafeString() (string, []byte) {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ if !r.Ok() || r.token.kind != tokenString {
+ r.errInvalidToken("string")
+ return "", nil
+ }
+ bytes := r.token.byteValue
+ ret := bytesToStr(r.token.byteValue)
+ r.consume()
+ return ret, bytes
+}
+
+// UnsafeString returns the string value if the token is a string literal.
+//
+// Warning: returned string may point to the input buffer, so the string should not outlive
+// the input buffer. Intended pattern of usage is as an argument to a switch statement.
+func (r *Lexer) UnsafeString() string {
+ ret, _ := r.unsafeString()
+ return ret
+}
+
+// UnsafeBytes returns the byte slice if the token is a string literal.
+func (r *Lexer) UnsafeBytes() []byte {
+ _, ret := r.unsafeString()
+ return ret
+}
+
+// String reads a string literal.
+func (r *Lexer) String() string {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ if !r.Ok() || r.token.kind != tokenString {
+ r.errInvalidToken("string")
+ return ""
+ }
+ ret := string(r.token.byteValue)
+ r.consume()
+ return ret
+}
+
+// Bytes reads a string literal and base64 decodes it into a byte slice.
+func (r *Lexer) Bytes() []byte {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ if !r.Ok() || r.token.kind != tokenString {
+ r.errInvalidToken("string")
+ return nil
+ }
+ ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue)))
+ len, err := base64.StdEncoding.Decode(ret, r.token.byteValue)
+ if err != nil {
+ r.fatalError = &LexerError{
+ Reason: err.Error(),
+ }
+ return nil
+ }
+
+ r.consume()
+ return ret[:len]
+}
+
+// Bool reads a true or false boolean keyword.
+func (r *Lexer) Bool() bool {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ if !r.Ok() || r.token.kind != tokenBool {
+ r.errInvalidToken("bool")
+ return false
+ }
+ ret := r.token.boolValue
+ r.consume()
+ return ret
+}
+
+func (r *Lexer) number() string {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ if !r.Ok() || r.token.kind != tokenNumber {
+ r.errInvalidToken("number")
+ return ""
+ }
+ ret := bytesToStr(r.token.byteValue)
+ r.consume()
+ return ret
+}
+
+func (r *Lexer) Uint8() uint8 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 8)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return uint8(n)
+}
+
+func (r *Lexer) Uint16() uint16 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 16)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return uint16(n)
+}
+
+func (r *Lexer) Uint32() uint32 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 32)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return uint32(n)
+}
+
+func (r *Lexer) Uint64() uint64 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return n
+}
+
+func (r *Lexer) Uint() uint {
+ return uint(r.Uint64())
+}
+
+func (r *Lexer) Int8() int8 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 8)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return int8(n)
+}
+
+func (r *Lexer) Int16() int16 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 16)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return int16(n)
+}
+
+func (r *Lexer) Int32() int32 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return int32(n)
+}
+
+func (r *Lexer) Int64() int64 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return n
+}
+
+func (r *Lexer) Int() int {
+ return int(r.Int64())
+}
+
+func (r *Lexer) Uint8Str() uint8 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 8)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return uint8(n)
+}
+
+func (r *Lexer) Uint16Str() uint16 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 16)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return uint16(n)
+}
+
+func (r *Lexer) Uint32Str() uint32 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 32)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return uint32(n)
+}
+
+func (r *Lexer) Uint64Str() uint64 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return n
+}
+
+func (r *Lexer) UintStr() uint {
+ return uint(r.Uint64Str())
+}
+
+func (r *Lexer) UintptrStr() uintptr {
+ return uintptr(r.Uint64Str())
+}
+
+func (r *Lexer) Int8Str() int8 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 8)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return int8(n)
+}
+
+func (r *Lexer) Int16Str() int16 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 16)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return int16(n)
+}
+
+func (r *Lexer) Int32Str() int32 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return int32(n)
+}
+
+func (r *Lexer) Int64Str() int64 {
+ s, b := r.unsafeString()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: string(b),
+ })
+ }
+ return n
+}
+
+func (r *Lexer) IntStr() int {
+ return int(r.Int64Str())
+}
+
+func (r *Lexer) Float32() float32 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseFloat(s, 32)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return float32(n)
+}
+
+func (r *Lexer) Float64() float64 {
+ s := r.number()
+ if !r.Ok() {
+ return 0
+ }
+
+ n, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Reason: err.Error(),
+ Data: s,
+ })
+ }
+ return n
+}
+
+func (r *Lexer) Error() error {
+ return r.fatalError
+}
+
+func (r *Lexer) AddError(e error) {
+ if r.fatalError == nil {
+ r.fatalError = e
+ }
+}
+
+func (r *Lexer) AddNonFatalError(e error) {
+ r.addNonfatalError(&LexerError{
+ Offset: r.start,
+ Data: string(r.Data[r.start:r.pos]),
+ Reason: e.Error(),
+ })
+}
+
+func (r *Lexer) addNonfatalError(err *LexerError) {
+ if r.UseMultipleErrors {
+ // We don't want to add errors with the same offset.
+ if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset {
+ return
+ }
+ r.multipleErrors = append(r.multipleErrors, err)
+ return
+ }
+ r.fatalError = err
+}
+
+func (r *Lexer) GetNonFatalErrors() []*LexerError {
+ return r.multipleErrors
+}
+
+// JsonNumber fetches and json.Number from 'encoding/json' package.
+// Both int, float or string, contains them are valid values
+func (r *Lexer) JsonNumber() json.Number {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+ if !r.Ok() {
+ r.errInvalidToken("json.Number")
+ return json.Number("0")
+ }
+
+ switch r.token.kind {
+ case tokenString:
+ return json.Number(r.String())
+ case tokenNumber:
+ return json.Number(r.Raw())
+ default:
+ r.errSyntax()
+ return json.Number("0")
+ }
+}
+
+// Interface fetches an interface{} analogous to the 'encoding/json' package.
+func (r *Lexer) Interface() interface{} {
+ if r.token.kind == tokenUndef && r.Ok() {
+ r.FetchToken()
+ }
+
+ if !r.Ok() {
+ return nil
+ }
+ switch r.token.kind {
+ case tokenString:
+ return r.String()
+ case tokenNumber:
+ return r.Float64()
+ case tokenBool:
+ return r.Bool()
+ case tokenNull:
+ r.Null()
+ return nil
+ }
+
+ if r.token.delimValue == '{' {
+ r.consume()
+
+ ret := map[string]interface{}{}
+ for !r.IsDelim('}') {
+ key := r.String()
+ r.WantColon()
+ ret[key] = r.Interface()
+ r.WantComma()
+ }
+ r.Delim('}')
+
+ if r.Ok() {
+ return ret
+ } else {
+ return nil
+ }
+ } else if r.token.delimValue == '[' {
+ r.consume()
+
+ var ret []interface{}
+ for !r.IsDelim(']') {
+ ret = append(ret, r.Interface())
+ r.WantComma()
+ }
+ r.Delim(']')
+
+ if r.Ok() {
+ return ret
+ } else {
+ return nil
+ }
+ }
+ r.errSyntax()
+ return nil
+}
+
+// WantComma requires a comma to be present before fetching next token.
+func (r *Lexer) WantComma() {
+ r.wantSep = ','
+ r.firstElement = false
+}
+
+// WantColon requires a colon to be present before fetching next token.
+func (r *Lexer) WantColon() {
+ r.wantSep = ':'
+ r.firstElement = false
+}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer_test.go b/vendor/github.com/mailru/easyjson/jlexer/lexer_test.go
new file mode 100644
index 000000000..4ce4abe6a
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/jlexer/lexer_test.go
@@ -0,0 +1,311 @@
+package jlexer
+
+import (
+ "bytes"
+ "encoding/json"
+ "reflect"
+ "testing"
+)
+
+func TestString(t *testing.T) {
+ for i, test := range []struct {
+ toParse string
+ want string
+ wantError bool
+ }{
+ {toParse: `"simple string"`, want: "simple string"},
+ {toParse: " \r\r\n\t " + `"test"`, want: "test"},
+ {toParse: `"\n\t\"\/\\\f\r"`, want: "\n\t\"/\\\f\r"},
+ {toParse: `"\u0020"`, want: " "},
+ {toParse: `"\u0020-\t"`, want: " -\t"},
+ {toParse: `"\ufffd\uFFFD"`, want: "\ufffd\ufffd"},
+ {toParse: `"\ud83d\ude00"`, want: "😀"},
+ {toParse: `"\ud83d\ude08"`, want: "😈"},
+ {toParse: `"\ud8"`, wantError: true},
+
+ {toParse: `"test"junk`, want: "test"},
+
+ {toParse: `5`, wantError: true}, // not a string
+ {toParse: `"\x"`, wantError: true}, // invalid escape
+ {toParse: `"\ud800"`, want: "�"}, // invalid utf-8 char; return replacement char
+ } {
+ l := Lexer{Data: []byte(test.toParse)}
+
+ got := l.String()
+ if got != test.want {
+ t.Errorf("[%d, %q] String() = %v; want %v", i, test.toParse, got, test.want)
+ }
+ err := l.Error()
+ if err != nil && !test.wantError {
+ t.Errorf("[%d, %q] String() error: %v", i, test.toParse, err)
+ } else if err == nil && test.wantError {
+ t.Errorf("[%d, %q] String() ok; want error", i, test.toParse)
+ }
+ }
+}
+
+func TestBytes(t *testing.T) {
+ for i, test := range []struct {
+ toParse string
+ want string
+ wantError bool
+ }{
+ {toParse: `"c2ltcGxlIHN0cmluZw=="`, want: "simple string"},
+ {toParse: " \r\r\n\t " + `"dGVzdA=="`, want: "test"},
+
+ {toParse: `5`, wantError: true}, // not a JSON string
+ {toParse: `"foobar"`, wantError: true}, // not base64 encoded
+ {toParse: `"c2ltcGxlIHN0cmluZw="`, wantError: true}, // invalid base64 padding
+ } {
+ l := Lexer{Data: []byte(test.toParse)}
+
+ got := l.Bytes()
+ if bytes.Compare(got, []byte(test.want)) != 0 {
+ t.Errorf("[%d, %q] Bytes() = %v; want: %v", i, test.toParse, got, []byte(test.want))
+ }
+ err := l.Error()
+ if err != nil && !test.wantError {
+ t.Errorf("[%d, %q] Bytes() error: %v", i, test.toParse, err)
+ } else if err == nil && test.wantError {
+ t.Errorf("[%d, %q] Bytes() ok; want error", i, test.toParse)
+ }
+ }
+}
+
+func TestNumber(t *testing.T) {
+ for i, test := range []struct {
+ toParse string
+ want string
+ wantError bool
+ }{
+ {toParse: "123", want: "123"},
+ {toParse: "-123", want: "-123"},
+ {toParse: "\r\n12.35", want: "12.35"},
+ {toParse: "12.35e+1", want: "12.35e+1"},
+ {toParse: "12.35e-15", want: "12.35e-15"},
+ {toParse: "12.35E-15", want: "12.35E-15"},
+ {toParse: "12.35E15", want: "12.35E15"},
+
+ {toParse: `"a"`, wantError: true},
+ {toParse: "123junk", wantError: true},
+ {toParse: "1.2.3", wantError: true},
+ {toParse: "1e2e3", wantError: true},
+ {toParse: "1e2.3", wantError: true},
+ } {
+ l := Lexer{Data: []byte(test.toParse)}
+
+ got := l.number()
+ if got != test.want {
+ t.Errorf("[%d, %q] number() = %v; want %v", i, test.toParse, got, test.want)
+ }
+ err := l.Error()
+ if err != nil && !test.wantError {
+ t.Errorf("[%d, %q] number() error: %v", i, test.toParse, err)
+ } else if err == nil && test.wantError {
+ t.Errorf("[%d, %q] number() ok; want error", i, test.toParse)
+ }
+ }
+}
+
+func TestBool(t *testing.T) {
+ for i, test := range []struct {
+ toParse string
+ want bool
+ wantError bool
+ }{
+ {toParse: "true", want: true},
+ {toParse: "false", want: false},
+
+ {toParse: "1", wantError: true},
+ {toParse: "truejunk", wantError: true},
+ {toParse: `false"junk"`, wantError: true},
+ {toParse: "True", wantError: true},
+ {toParse: "False", wantError: true},
+ } {
+ l := Lexer{Data: []byte(test.toParse)}
+
+ got := l.Bool()
+ if got != test.want {
+ t.Errorf("[%d, %q] Bool() = %v; want %v", i, test.toParse, got, test.want)
+ }
+ err := l.Error()
+ if err != nil && !test.wantError {
+ t.Errorf("[%d, %q] Bool() error: %v", i, test.toParse, err)
+ } else if err == nil && test.wantError {
+ t.Errorf("[%d, %q] Bool() ok; want error", i, test.toParse)
+ }
+ }
+}
+
+func TestSkipRecursive(t *testing.T) {
+ for i, test := range []struct {
+ toParse string
+ left string
+ wantError bool
+ }{
+ {toParse: "5, 4", left: ", 4"},
+ {toParse: "[5, 6], 4", left: ", 4"},
+ {toParse: "[5, [7,8]]: 4", left: ": 4"},
+
+ {toParse: `{"a":1}, 4`, left: ", 4"},
+ {toParse: `{"a":1, "b":{"c": 5}, "e":[12,15]}, 4`, left: ", 4"},
+
+ // array start/end chars in a string
+ {toParse: `[5, "]"], 4`, left: ", 4"},
+ {toParse: `[5, "\"]"], 4`, left: ", 4"},
+ {toParse: `[5, "["], 4`, left: ", 4"},
+ {toParse: `[5, "\"["], 4`, left: ", 4"},
+
+ // object start/end chars in a string
+ {toParse: `{"a}":1}, 4`, left: ", 4"},
+ {toParse: `{"a\"}":1}, 4`, left: ", 4"},
+ {toParse: `{"a{":1}, 4`, left: ", 4"},
+ {toParse: `{"a\"{":1}, 4`, left: ", 4"},
+
+ // object with double slashes at the end of string
+ {toParse: `{"a":"hey\\"}, 4`, left: ", 4"},
+ } {
+ l := Lexer{Data: []byte(test.toParse)}
+
+ l.SkipRecursive()
+
+ got := string(l.Data[l.pos:])
+ if got != test.left {
+ t.Errorf("[%d, %q] SkipRecursive() left = %v; want %v", i, test.toParse, got, test.left)
+ }
+ err := l.Error()
+ if err != nil && !test.wantError {
+ t.Errorf("[%d, %q] SkipRecursive() error: %v", i, test.toParse, err)
+ } else if err == nil && test.wantError {
+ t.Errorf("[%d, %q] SkipRecursive() ok; want error", i, test.toParse)
+ }
+ }
+}
+
+func TestInterface(t *testing.T) {
+ for i, test := range []struct {
+ toParse string
+ want interface{}
+ wantError bool
+ }{
+ {toParse: "null", want: nil},
+ {toParse: "true", want: true},
+ {toParse: `"a"`, want: "a"},
+ {toParse: "5", want: float64(5)},
+
+ {toParse: `{}`, want: map[string]interface{}{}},
+ {toParse: `[]`, want: []interface{}(nil)},
+
+ {toParse: `{"a": "b"}`, want: map[string]interface{}{"a": "b"}},
+ {toParse: `[5]`, want: []interface{}{float64(5)}},
+
+ {toParse: `{"a":5 , "b" : "string"}`, want: map[string]interface{}{"a": float64(5), "b": "string"}},
+ {toParse: `["a", 5 , null, true]`, want: []interface{}{"a", float64(5), nil, true}},
+
+ {toParse: `{"a" "b"}`, wantError: true},
+ {toParse: `{"a": "b",}`, wantError: true},
+ {toParse: `{"a":"b","c" "b"}`, wantError: true},
+ {toParse: `{"a": "b","c":"d",}`, wantError: true},
+ {toParse: `{,}`, wantError: true},
+
+ {toParse: `[1, 2,]`, wantError: true},
+ {toParse: `[1 2]`, wantError: true},
+ {toParse: `[,]`, wantError: true},
+ } {
+ l := Lexer{Data: []byte(test.toParse)}
+
+ got := l.Interface()
+ if !reflect.DeepEqual(got, test.want) {
+ t.Errorf("[%d, %q] Interface() = %v; want %v", i, test.toParse, got, test.want)
+ }
+ err := l.Error()
+ if err != nil && !test.wantError {
+ t.Errorf("[%d, %q] Interface() error: %v", i, test.toParse, err)
+ } else if err == nil && test.wantError {
+ t.Errorf("[%d, %q] Interface() ok; want error", i, test.toParse)
+ }
+ }
+}
+
+func TestConsumed(t *testing.T) {
+ for i, test := range []struct {
+ toParse string
+ wantError bool
+ }{
+ {toParse: "", wantError: false},
+ {toParse: " ", wantError: false},
+ {toParse: "\r\n", wantError: false},
+ {toParse: "\t\t", wantError: false},
+
+ {toParse: "{", wantError: true},
+ } {
+ l := Lexer{Data: []byte(test.toParse)}
+ l.Consumed()
+
+ err := l.Error()
+ if err != nil && !test.wantError {
+ t.Errorf("[%d, %q] Consumed() error: %v", i, test.toParse, err)
+ } else if err == nil && test.wantError {
+ t.Errorf("[%d, %q] Consumed() ok; want error", i, test.toParse)
+ }
+ }
+}
+
+func TestJsonNumber(t *testing.T) {
+ for i, test := range []struct {
+ toParse string
+ want json.Number
+ wantLexerError bool
+ wantValue interface{}
+ wantValueError bool
+ }{
+ {toParse: `10`, want: json.Number("10"), wantValue: int64(10)},
+ {toParse: `0`, want: json.Number("0"), wantValue: int64(0)},
+ {toParse: `0.12`, want: json.Number("0.12"), wantValue: 0.12},
+ {toParse: `25E-4`, want: json.Number("25E-4"), wantValue: 25E-4},
+
+ {toParse: `"10"`, want: json.Number("10"), wantValue: int64(10)},
+ {toParse: `"0"`, want: json.Number("0"), wantValue: int64(0)},
+ {toParse: `"0.12"`, want: json.Number("0.12"), wantValue: 0.12},
+ {toParse: `"25E-4"`, want: json.Number("25E-4"), wantValue: 25E-4},
+
+ {toParse: `"a""`, wantValueError: true},
+
+ {toParse: `[1]`, wantLexerError: true},
+ {toParse: `{}`, wantLexerError: true},
+ {toParse: `a`, wantLexerError: true},
+ } {
+ l := Lexer{Data: []byte(test.toParse)}
+
+ got := l.JsonNumber()
+ if got != test.want && !test.wantLexerError && !test.wantValueError {
+ t.Errorf("[%d, %q] JsonNumber() = %v; want %v", i, test.toParse, got, test.want)
+ }
+
+ err := l.Error()
+ if err != nil && !test.wantLexerError {
+ t.Errorf("[%d, %q] JsonNumber() lexer error: %v", i, test.toParse, err)
+ } else if err == nil && test.wantLexerError {
+ t.Errorf("[%d, %q] JsonNumber() ok; want lexer error", i, test.toParse)
+ }
+
+ var valueErr error
+ var gotValue interface{}
+ switch test.wantValue.(type) {
+ case float64:
+ gotValue, valueErr = got.Float64()
+ default:
+ gotValue, valueErr = got.Int64()
+ }
+
+ if !reflect.DeepEqual(gotValue, test.wantValue) && !test.wantLexerError && !test.wantValueError {
+ t.Errorf("[%d, %q] JsonNumber() = %v; want %v", i, test.toParse, gotValue, test.wantValue)
+ }
+
+ if valueErr != nil && !test.wantValueError {
+ t.Errorf("[%d, %q] JsonNumber() value error: %v", i, test.toParse, err)
+ } else if valueErr == nil && test.wantValueError {
+ t.Errorf("[%d, %q] JsonNumber() ok; want value error", i, test.toParse)
+ }
+ }
+}
diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go
new file mode 100644
index 000000000..e5a5ddfdb
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/jwriter/writer.go
@@ -0,0 +1,377 @@
+// Package jwriter contains a JSON writer.
+package jwriter
+
+import (
+ "io"
+ "strconv"
+ "unicode/utf8"
+
+ "github.com/mailru/easyjson/buffer"
+)
+
+// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but
+// Flags field in Writer is used to set and pass them around.
+type Flags int
+
+const (
+ NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'.
+ NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'.
+)
+
+// Writer is a JSON writer.
+type Writer struct {
+ Flags Flags
+
+ Error error
+ Buffer buffer.Buffer
+ NoEscapeHTML bool
+}
+
+// Size returns the size of the data that was written out.
+func (w *Writer) Size() int {
+ return w.Buffer.Size()
+}
+
+// DumpTo outputs the data to given io.Writer, resetting the buffer.
+func (w *Writer) DumpTo(out io.Writer) (written int, err error) {
+ return w.Buffer.DumpTo(out)
+}
+
+// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice
+// as argument that it will try to reuse.
+func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) {
+ if w.Error != nil {
+ return nil, w.Error
+ }
+
+ return w.Buffer.BuildBytes(reuse...), nil
+}
+
+// ReadCloser returns an io.ReadCloser that can be used to read the data.
+// ReadCloser also resets the buffer.
+func (w *Writer) ReadCloser() (io.ReadCloser, error) {
+ if w.Error != nil {
+ return nil, w.Error
+ }
+
+ return w.Buffer.ReadCloser(), nil
+}
+
+// RawByte appends raw binary data to the buffer.
+func (w *Writer) RawByte(c byte) {
+ w.Buffer.AppendByte(c)
+}
+
+// RawByte appends raw binary data to the buffer.
+func (w *Writer) RawString(s string) {
+ w.Buffer.AppendString(s)
+}
+
+// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for
+// calling with results of MarshalJSON-like functions.
+func (w *Writer) Raw(data []byte, err error) {
+ switch {
+ case w.Error != nil:
+ return
+ case err != nil:
+ w.Error = err
+ case len(data) > 0:
+ w.Buffer.AppendBytes(data)
+ default:
+ w.RawString("null")
+ }
+}
+
+// RawText encloses raw binary data in quotes and appends in to the buffer.
+// Useful for calling with results of MarshalText-like functions.
+func (w *Writer) RawText(data []byte, err error) {
+ switch {
+ case w.Error != nil:
+ return
+ case err != nil:
+ w.Error = err
+ case len(data) > 0:
+ w.String(string(data))
+ default:
+ w.RawString("null")
+ }
+}
+
+// Base64Bytes appends data to the buffer after base64 encoding it
+func (w *Writer) Base64Bytes(data []byte) {
+ if data == nil {
+ w.Buffer.AppendString("null")
+ return
+ }
+ w.Buffer.AppendByte('"')
+ w.base64(data)
+ w.Buffer.AppendByte('"')
+}
+
+func (w *Writer) Uint8(n uint8) {
+ w.Buffer.EnsureSpace(3)
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+}
+
+func (w *Writer) Uint16(n uint16) {
+ w.Buffer.EnsureSpace(5)
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+}
+
+func (w *Writer) Uint32(n uint32) {
+ w.Buffer.EnsureSpace(10)
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+}
+
+func (w *Writer) Uint(n uint) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+}
+
+func (w *Writer) Uint64(n uint64) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
+}
+
+func (w *Writer) Int8(n int8) {
+ w.Buffer.EnsureSpace(4)
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+}
+
+func (w *Writer) Int16(n int16) {
+ w.Buffer.EnsureSpace(6)
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+}
+
+func (w *Writer) Int32(n int32) {
+ w.Buffer.EnsureSpace(11)
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+}
+
+func (w *Writer) Int(n int) {
+ w.Buffer.EnsureSpace(21)
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+}
+
+func (w *Writer) Int64(n int64) {
+ w.Buffer.EnsureSpace(21)
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
+}
+
+func (w *Writer) Uint8Str(n uint8) {
+ w.Buffer.EnsureSpace(3)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Uint16Str(n uint16) {
+ w.Buffer.EnsureSpace(5)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Uint32Str(n uint32) {
+ w.Buffer.EnsureSpace(10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) UintStr(n uint) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Uint64Str(n uint64) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) UintptrStr(n uintptr) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Int8Str(n int8) {
+ w.Buffer.EnsureSpace(4)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Int16Str(n int16) {
+ w.Buffer.EnsureSpace(6)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Int32Str(n int32) {
+ w.Buffer.EnsureSpace(11)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) IntStr(n int) {
+ w.Buffer.EnsureSpace(21)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Int64Str(n int64) {
+ w.Buffer.EnsureSpace(21)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+ w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
+ w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Float32(n float32) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32)
+}
+
+func (w *Writer) Float64(n float64) {
+ w.Buffer.EnsureSpace(20)
+ w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64)
+}
+
+func (w *Writer) Bool(v bool) {
+ w.Buffer.EnsureSpace(5)
+ if v {
+ w.Buffer.Buf = append(w.Buffer.Buf, "true"...)
+ } else {
+ w.Buffer.Buf = append(w.Buffer.Buf, "false"...)
+ }
+}
+
+const chars = "0123456789abcdef"
+
+func isNotEscapedSingleChar(c byte, escapeHTML bool) bool {
+ // Note: might make sense to use a table if there are more chars to escape. With 4 chars
+ // it benchmarks the same.
+ if escapeHTML {
+ return c != '<' && c != '>' && c != '&' && c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf
+ } else {
+ return c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf
+ }
+}
+
+func (w *Writer) String(s string) {
+ w.Buffer.AppendByte('"')
+
+ // Portions of the string that contain no escapes are appended as
+ // byte slices.
+
+ p := 0 // last non-escape symbol
+
+ for i := 0; i < len(s); {
+ c := s[i]
+
+ if isNotEscapedSingleChar(c, !w.NoEscapeHTML) {
+ // single-width character, no escaping is required
+ i++
+ continue
+ } else if c < utf8.RuneSelf {
+ // single-with character, need to escape
+ w.Buffer.AppendString(s[p:i])
+ switch c {
+ case '\t':
+ w.Buffer.AppendString(`\t`)
+ case '\r':
+ w.Buffer.AppendString(`\r`)
+ case '\n':
+ w.Buffer.AppendString(`\n`)
+ case '\\':
+ w.Buffer.AppendString(`\\`)
+ case '"':
+ w.Buffer.AppendString(`\"`)
+ default:
+ w.Buffer.AppendString(`\u00`)
+ w.Buffer.AppendByte(chars[c>>4])
+ w.Buffer.AppendByte(chars[c&0xf])
+ }
+
+ i++
+ p = i
+ continue
+ }
+
+ // broken utf
+ runeValue, runeWidth := utf8.DecodeRuneInString(s[i:])
+ if runeValue == utf8.RuneError && runeWidth == 1 {
+ w.Buffer.AppendString(s[p:i])
+ w.Buffer.AppendString(`\ufffd`)
+ i++
+ p = i
+ continue
+ }
+
+ // jsonp stuff - tab separator and line separator
+ if runeValue == '\u2028' || runeValue == '\u2029' {
+ w.Buffer.AppendString(s[p:i])
+ w.Buffer.AppendString(`\u202`)
+ w.Buffer.AppendByte(chars[runeValue&0xf])
+ i += runeWidth
+ p = i
+ continue
+ }
+ i += runeWidth
+ }
+ w.Buffer.AppendString(s[p:])
+ w.Buffer.AppendByte('"')
+}
+
+const encode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
+const padChar = '='
+
+func (w *Writer) base64(in []byte) {
+
+ if len(in) == 0 {
+ return
+ }
+
+ w.Buffer.EnsureSpace(((len(in) - 1) / 3 + 1) * 4)
+
+ si := 0
+ n := (len(in) / 3) * 3
+
+
+ for si < n {
+ // Convert 3x 8bit source bytes into 4 bytes
+ val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2])
+
+ w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F], encode[val>>6&0x3F], encode[val&0x3F])
+
+ si += 3
+ }
+
+ remain := len(in) - si
+ if remain == 0 {
+ return
+ }
+
+ // Add the remaining small block
+ val := uint(in[si+0]) << 16
+ if remain == 2 {
+ val |= uint(in[si+1]) << 8
+ }
+
+ w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F])
+
+ switch remain {
+ case 2:
+ w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>6&0x3F], byte(padChar))
+ case 1:
+ w.Buffer.Buf = append(w.Buffer.Buf, byte(padChar), byte(padChar))
+ }
+}
diff --git a/vendor/github.com/mailru/easyjson/opt/gotemplate_Bool.go b/vendor/github.com/mailru/easyjson/opt/gotemplate_Bool.go
new file mode 100644
index 000000000..6978ee971
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/opt/gotemplate_Bool.go
@@ -0,0 +1,79 @@
+// generated by gotemplate
+
+package opt
+
+import (
+ "fmt"
+
+ "github.com/mailru/easyjson/jlexer"
+ "github.com/mailru/easyjson/jwriter"
+)
+
+// template type Optional(A)
+
+// A 'gotemplate'-based type for providing optional semantics without using pointers.
+type Bool struct {
+ V bool
+ Defined bool
+}
+
+// Creates an optional type with a given value.
+func OBool(v bool) Bool {
+ return Bool{V: v, Defined: true}
+}
+
+// Get returns the value or given default in the case the value is undefined.
+func (v Bool) Get(deflt bool) bool {
+ if !v.Defined {
+ return deflt
+ }
+ return v.V
+}
+
+// MarshalEasyJSON does JSON marshaling using easyjson interface.
+func (v Bool) MarshalEasyJSON(w *jwriter.Writer) {
+ if v.Defined {
+ w.Bool(v.V)
+ } else {
+ w.RawString("null")
+ }
+}
+
+// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface.
+func (v *Bool) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ if l.IsNull() {
+ l.Skip()
+ *v = Bool{}
+ } else {
+ v.V = l.Bool()
+ v.Defined = true
+ }
+}
+
+// MarshalJSON implements a standard json marshaler interface.
+func (v Bool) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ v.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// UnmarshalJSON implements a standard json unmarshaler interface.
+func (v *Bool) UnmarshalJSON(data []byte) error {
+ l := jlexer.Lexer{Data: data}
+ v.UnmarshalEasyJSON(&l)
+ return l.Error()
+}
+
+// IsDefined returns whether the value is defined, a function is required so that it can
+// be used in an interface.
+func (v Bool) IsDefined() bool {
+ return v.Defined
+}
+
+// String implements a stringer interface using fmt.Sprint for the value.
+func (v Bool) String() string {
+ if !v.Defined {
+ return "<undefined>"
+ }
+ return fmt.Sprint(v.V)
+}
diff --git a/vendor/github.com/mailru/easyjson/opt/gotemplate_Float32.go b/vendor/github.com/mailru/easyjson/opt/gotemplate_Float32.go
new file mode 100644
index 000000000..643cea359
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/opt/gotemplate_Float32.go
@@ -0,0 +1,79 @@
+// generated by gotemplate
+
+package opt
+
+import (
+ "fmt"
+
+ "github.com/mailru/easyjson/jlexer"
+ "github.com/mailru/easyjson/jwriter"
+)
+
+// template type Optional(A)
+
+// A 'gotemplate'-based type for providing optional semantics without using pointers.
+type Float32 struct {
+ V float32
+ Defined bool
+}
+
+// Creates an optional type with a given value.
+func OFloat32(v float32) Float32 {
+ return Float32{V: v, Defined: true}
+}
+
+// Get returns the value or given default in the case the value is undefined.
+func (v Float32) Get(deflt float32) float32 {
+ if !v.Defined {
+ return deflt
+ }
+ return v.V
+}
+
+// MarshalEasyJSON does JSON marshaling using easyjson interface.
+func (v Float32) MarshalEasyJSON(w *jwriter.Writer) {
+ if v.Defined {
+ w.Float32(v.V)
+ } else {
+ w.RawString("null")
+ }
+}
+
+// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface.
+func (v *Float32) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ if l.IsNull() {
+ l.Skip()
+ *v = Float32{}
+ } else {
+ v.V = l.Float32()
+ v.Defined = true
+ }
+}
+
+// MarshalJSON implements a standard json marshaler interface.
+func (v Float32) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ v.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// UnmarshalJSON implements a standard json unmarshaler interface.
+func (v *Float32) UnmarshalJSON(data []byte) error {
+ l := jlexer.Lexer{Data: data}
+ v.UnmarshalEasyJSON(&l)
+ return l.Error()
+}
+
+// IsDefined returns whether the value is defined, a function is required so that it can
+// be used in an interface.
+func (v Float32) IsDefined() bool {
+ return v.Defined
+}
+
+// String implements a stringer interface using fmt.Sprint for the value.
+func (v Float32) String() string {
+ if !v.Defined {
+ return "<undefined>"
+ }
+ return fmt.Sprint(v.V)
+}
diff --git a/vendor/github.com/mailru/easyjson/opt/gotemplate_Float64.go b/vendor/github.com/mailru/easyjson/opt/gotemplate_Float64.go
new file mode 100644
index 000000000..75ae72757
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/opt/gotemplate_Float64.go
@@ -0,0 +1,79 @@
+// generated by gotemplate
+
+package opt
+
+import (
+ "fmt"
+
+ "github.com/mailru/easyjson/jlexer"
+ "github.com/mailru/easyjson/jwriter"
+)
+
+// template type Optional(A)
+
+// A 'gotemplate'-based type for providing optional semantics without using pointers.
+type Float64 struct {
+ V float64
+ Defined bool
+}
+
+// Creates an optional type with a given value.
+func OFloat64(v float64) Float64 {
+ return Float64{V: v, Defined: true}
+}
+
+// Get returns the value or given default in the case the value is undefined.
+func (v Float64) Get(deflt float64) float64 {
+ if !v.Defined {
+ return deflt
+ }
+ return v.V
+}
+
+// MarshalEasyJSON does JSON marshaling using easyjson interface.
+func (v Float64) MarshalEasyJSON(w *jwriter.Writer) {
+ if v.Defined {
+ w.Float64(v.V)
+ } else {
+ w.RawString("null")
+ }
+}
+
+// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface.
+func (v *Float64) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ if l.IsNull() {
+ l.Skip()
+ *v = Float64{}
+ } else {
+ v.V = l.Float64()
+ v.Defined = true
+ }
+}
+
+// MarshalJSON implements a standard json marshaler interface.
+func (v Float64) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ v.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// UnmarshalJSON implements a standard json unmarshaler interface.
+func (v *Float64) UnmarshalJSON(data []byte) error {
+ l := jlexer.Lexer{Data: data}
+ v.UnmarshalEasyJSON(&l)
+ return l.Error()
+}
+
+// IsDefined returns whether the value is defined, a function is required so that it can
+// be used in an interface.
+func (v Float64) IsDefined() bool {
+ return v.Defined
+}
+
+// String implements a stringer interface using fmt.Sprint for the value.
+func (v Float64) String() string {
+ if !v.Defined {
+ return "<undefined>"
+ }
+ return fmt.Sprint(v.V)
+}
diff --git a/vendor/github.com/mailru/easyjson/opt/gotemplate_Int.go b/vendor/github.com/mailru/easyjson/opt/gotemplate_Int.go
new file mode 100644
index 000000000..469742fee
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/opt/gotemplate_Int.go
@@ -0,0 +1,79 @@
+// generated by gotemplate
+
+package opt
+
+import (
+ "fmt"
+
+ "github.com/mailru/easyjson/jlexer"
+ "github.com/mailru/easyjson/jwriter"
+)
+
+// template type Optional(A)
+
+// A 'gotemplate'-based type for providing optional semantics without using pointers.
+type Int struct {
+ V int
+ Defined bool
+}
+
+// Creates an optional type with a given value.
+func OInt(v int) Int {
+ return Int{V: v, Defined: true}
+}
+
+// Get returns the value or given default in the case the value is undefined.
+func (v Int) Get(deflt int) int {
+ if !v.Defined {
+ return deflt
+ }
+ return v.V
+}
+
+// MarshalEasyJSON does JSON marshaling using easyjson interface.
+func (v Int) MarshalEasyJSON(w *jwriter.Writer) {
+ if v.Defined {
+ w.Int(v.V)
+ } else {
+ w.RawString("null")
+ }
+}
+
+// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface.
+func (v *Int) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ if l.IsNull() {
+ l.Skip()
+ *v = Int{}
+ } else {
+ v.V = l.Int()
+ v.Defined = true
+ }
+}
+
+// MarshalJSON implements a standard json marshaler interface.
+func (v Int) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ v.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// UnmarshalJSON implements a standard json unmarshaler interface.
+func (v *Int) UnmarshalJSON(data []byte) error {
+ l := jlexer.Lexer{Data: data}
+ v.UnmarshalEasyJSON(&l)
+ return l.Error()
+}
+
+// IsDefined returns whether the value is defined, a function is required so that it can
+// be used in an interface.
+func (v Int) IsDefined() bool {
+ return v.Defined
+}
+
+// String implements a stringer interface using fmt.Sprint for the value.
+func (v Int) String() string {
+ if !v.Defined {
+ return "<undefined>"
+ }
+ return fmt.Sprint(v.V)
+}
diff --git a/vendor/github.com/mailru/easyjson/opt/gotemplate_Int16.go b/vendor/github.com/mailru/easyjson/opt/gotemplate_Int16.go
new file mode 100644
index 000000000..b7723e241
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/opt/gotemplate_Int16.go
@@ -0,0 +1,79 @@
+// generated by gotemplate
+
+package opt
+
+import (
+ "fmt"
+
+ "github.com/mailru/easyjson/jlexer"
+ "github.com/mailru/easyjson/jwriter"
+)
+
+// template type Optional(A)
+
+// A 'gotemplate'-based type for providing optional semantics without using pointers.
+type Int16 struct {
+ V int16
+ Defined bool
+}
+
+// Creates an optional type with a given value.
+func OInt16(v int16) Int16 {
+ return Int16{V: v, Defined: true}
+}
+
+// Get returns the value or given default in the case the value is undefined.
+func (v Int16) Get(deflt int16) int16 {
+ if !v.Defined {
+ return deflt
+ }
+ return v.V
+}
+
+// MarshalEasyJSON does JSON marshaling using easyjson interface.
+func (v Int16) MarshalEasyJSON(w *jwriter.Writer) {
+ if v.Defined {
+ w.Int16(v.V)
+ } else {
+ w.RawString("null")
+ }
+}
+
+// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface.
+func (v *Int16) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ if l.IsNull() {
+ l.Skip()
+ *v = Int16{}
+ } else {
+ v.V = l.Int16()
+ v.Defined = true
+ }
+}
+
+// MarshalJSON implements a standard json marshaler interface.
+func (v Int16) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ v.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// UnmarshalJSON implements a standard json unmarshaler interface.
+func (v *Int16) UnmarshalJSON(data []byte) error {
+ l := jlexer.Lexer{Data: data}
+ v.UnmarshalEasyJSON(&l)
+ return l.Error()
+}
+
+// IsDefined returns whether the value is defined, a function is required so that it can
+// be used in an interface.
+func (v Int16) IsDefined() bool {
+ return v.Defined
+}
+
+// String implements a stringer interface using fmt.Sprint for the value.
+func (v Int16) String() string {
+ if !v.Defined {
+ return "<undefined>"
+ }
+ return fmt.Sprint(v.V)
+}
diff --git a/vendor/github.com/mailru/easyjson/opt/gotemplate_Int32.go b/vendor/github.com/mailru/easyjson/opt/gotemplate_Int32.go
new file mode 100644
index 000000000..7c7637a38
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/opt/gotemplate_Int32.go
@@ -0,0 +1,79 @@
+// generated by gotemplate
+
+package opt
+
+import (
+ "fmt"
+
+ "github.com/mailru/easyjson/jlexer"
+ "github.com/mailru/easyjson/jwriter"
+)
+
+// template type Optional(A)
+
+// A 'gotemplate'-based type for providing optional semantics without using pointers.
+type Int32 struct {
+ V int32
+ Defined bool
+}
+
+// Creates an optional type with a given value.
+func OInt32(v int32) Int32 {
+ return Int32{V: v, Defined: true}
+}
+
+// Get returns the value or given default in the case the value is undefined.
+func (v Int32) Get(deflt int32) int32 {
+ if !v.Defined {
+ return deflt
+ }
+ return v.V
+}
+
+// MarshalEasyJSON does JSON marshaling using easyjson interface.
+func (v Int32) MarshalEasyJSON(w *jwriter.Writer) {
+ if v.Defined {
+ w.Int32(v.V)
+ } else {
+ w.RawString("null")
+ }
+}
+
+// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface.
+func (v *Int32) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ if l.IsNull() {
+ l.Skip()
+ *v = Int32{}
+ } else {
+ v.V = l.Int32()
+ v.Defined = true
+ }
+}
+
+// MarshalJSON implements a standard json marshaler interface.
+func (v Int32) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ v.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// UnmarshalJSON implements a standard json unmarshaler interface.
+func (v *Int32) UnmarshalJSON(data []byte) error {
+ l := jlexer.Lexer{Data: data}
+ v.UnmarshalEasyJSON(&l)
+ return l.Error()
+}
+
+// IsDefined returns whether the value is defined, a function is required so that it can
+// be used in an interface.
+func (v Int32) IsDefined() bool {
+ return v.Defined
+}
+
+// String implements a stringer interface using fmt.Sprint for the value.
+func (v Int32) String() string {
+ if !v.Defined {
+ return "<undefined>"
+ }
+ return fmt.Sprint(v.V)
+}
diff --git a/vendor/github.com/mailru/easyjson/opt/gotemplate_Int64.go b/vendor/github.com/mailru/easyjson/opt/gotemplate_Int64.go
new file mode 100644
index 000000000..e6ea6dc41
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/opt/gotemplate_Int64.go
@@ -0,0 +1,79 @@
+// generated by gotemplate
+
+package opt
+
+import (
+ "fmt"
+
+ "github.com/mailru/easyjson/jlexer"
+ "github.com/mailru/easyjson/jwriter"
+)
+
+// template type Optional(A)
+
+// A 'gotemplate'-based type for providing optional semantics without using pointers.
+type Int64 struct {
+ V int64
+ Defined bool
+}
+
+// Creates an optional type with a given value.
+func OInt64(v int64) Int64 {
+ return Int64{V: v, Defined: true}
+}
+
+// Get returns the value or given default in the case the value is undefined.
+func (v Int64) Get(deflt int64) int64 {
+ if !v.Defined {
+ return deflt
+ }
+ return v.V
+}
+
+// MarshalEasyJSON does JSON marshaling using easyjson interface.
+func (v Int64) MarshalEasyJSON(w *jwriter.Writer) {
+ if v.Defined {
+ w.Int64(v.V)
+ } else {
+ w.RawString("null")
+ }
+}
+
+// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface.
+func (v *Int64) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ if l.IsNull() {
+ l.Skip()
+ *v = Int64{}
+ } else {
+ v.V = l.Int64()
+ v.Defined = true
+ }
+}
+
+// MarshalJSON implements a standard json marshaler interface.
+func (v Int64) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ v.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// UnmarshalJSON implements a standard json unmarshaler interface.
+func (v *Int64) UnmarshalJSON(data []byte) error {
+ l := jlexer.Lexer{Data: data}
+ v.UnmarshalEasyJSON(&l)
+ return l.Error()
+}
+
+// IsDefined returns whether the value is defined, a function is required so that it can
+// be used in an interface.
+func (v Int64) IsDefined() bool {
+ return v.Defined
+}
+
+// String implements a stringer interface using fmt.Sprint for the value.
+func (v Int64) String() string {
+ if !v.Defined {
+ return "<undefined>"
+ }
+ return fmt.Sprint(v.V)
+}
diff --git a/vendor/github.com/mailru/easyjson/opt/gotemplate_Int8.go b/vendor/github.com/mailru/easyjson/opt/gotemplate_Int8.go
new file mode 100644
index 000000000..ddc666580
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/opt/gotemplate_Int8.go
@@ -0,0 +1,79 @@
+// generated by gotemplate
+
+package opt
+
+import (
+ "fmt"
+
+ "github.com/mailru/easyjson/jlexer"
+ "github.com/mailru/easyjson/jwriter"
+)
+
+// template type Optional(A)
+
+// A 'gotemplate'-based type for providing optional semantics without using pointers.
+type Int8 struct {
+ V int8
+ Defined bool
+}
+
+// Creates an optional type with a given value.
+func OInt8(v int8) Int8 {
+ return Int8{V: v, Defined: true}
+}
+
+// Get returns the value or given default in the case the value is undefined.
+func (v Int8) Get(deflt int8) int8 {
+ if !v.Defined {
+ return deflt
+ }
+ return v.V
+}
+
+// MarshalEasyJSON does JSON marshaling using easyjson interface.
+func (v Int8) MarshalEasyJSON(w *jwriter.Writer) {
+ if v.Defined {
+ w.Int8(v.V)
+ } else {
+ w.RawString("null")
+ }
+}
+
+// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface.
+func (v *Int8) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ if l.IsNull() {
+ l.Skip()
+ *v = Int8{}
+ } else {
+ v.V = l.Int8()
+ v.Defined = true
+ }
+}
+
+// MarshalJSON implements a standard json marshaler interface.
+func (v Int8) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ v.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// UnmarshalJSON implements a standard json unmarshaler interface.
+func (v *Int8) UnmarshalJSON(data []byte) error {
+ l := jlexer.Lexer{Data: data}
+ v.UnmarshalEasyJSON(&l)
+ return l.Error()
+}
+
+// IsDefined returns whether the value is defined, a function is required so that it can
+// be used in an interface.
+func (v Int8) IsDefined() bool {
+ return v.Defined
+}
+
+// String implements a stringer interface using fmt.Sprint for the value.
+func (v Int8) String() string {
+ if !v.Defined {
+ return "<undefined>"
+ }
+ return fmt.Sprint(v.V)
+}
diff --git a/vendor/github.com/mailru/easyjson/opt/gotemplate_String.go b/vendor/github.com/mailru/easyjson/opt/gotemplate_String.go
new file mode 100644
index 000000000..11c90b4ed
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/opt/gotemplate_String.go
@@ -0,0 +1,79 @@
+// generated by gotemplate
+
+package opt
+
+import (
+ "fmt"
+
+ "github.com/mailru/easyjson/jlexer"
+ "github.com/mailru/easyjson/jwriter"
+)
+
+// template type Optional(A)
+
+// A 'gotemplate'-based type for providing optional semantics without using pointers.
+type String struct {
+ V string
+ Defined bool
+}
+
+// Creates an optional type with a given value.
+func OString(v string) String {
+ return String{V: v, Defined: true}
+}
+
+// Get returns the value or given default in the case the value is undefined.
+func (v String) Get(deflt string) string {
+ if !v.Defined {
+ return deflt
+ }
+ return v.V
+}
+
+// MarshalEasyJSON does JSON marshaling using easyjson interface.
+func (v String) MarshalEasyJSON(w *jwriter.Writer) {
+ if v.Defined {
+ w.String(v.V)
+ } else {
+ w.RawString("null")
+ }
+}
+
+// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface.
+func (v *String) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ if l.IsNull() {
+ l.Skip()
+ *v = String{}
+ } else {
+ v.V = l.String()
+ v.Defined = true
+ }
+}
+
+// MarshalJSON implements a standard json marshaler interface.
+func (v String) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ v.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// UnmarshalJSON implements a standard json unmarshaler interface.
+func (v *String) UnmarshalJSON(data []byte) error {
+ l := jlexer.Lexer{Data: data}
+ v.UnmarshalEasyJSON(&l)
+ return l.Error()
+}
+
+// IsDefined returns whether the value is defined, a function is required so that it can
+// be used in an interface.
+func (v String) IsDefined() bool {
+ return v.Defined
+}
+
+// String implements a stringer interface using fmt.Sprint for the value.
+func (v String) String() string {
+ if !v.Defined {
+ return "<undefined>"
+ }
+ return fmt.Sprint(v.V)
+}
diff --git a/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint.go b/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint.go
new file mode 100644
index 000000000..57efd3185
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint.go
@@ -0,0 +1,79 @@
+// generated by gotemplate
+
+package opt
+
+import (
+ "fmt"
+
+ "github.com/mailru/easyjson/jlexer"
+ "github.com/mailru/easyjson/jwriter"
+)
+
+// template type Optional(A)
+
+// A 'gotemplate'-based type for providing optional semantics without using pointers.
+type Uint struct {
+ V uint
+ Defined bool
+}
+
+// Creates an optional type with a given value.
+func OUint(v uint) Uint {
+ return Uint{V: v, Defined: true}
+}
+
+// Get returns the value or given default in the case the value is undefined.
+func (v Uint) Get(deflt uint) uint {
+ if !v.Defined {
+ return deflt
+ }
+ return v.V
+}
+
+// MarshalEasyJSON does JSON marshaling using easyjson interface.
+func (v Uint) MarshalEasyJSON(w *jwriter.Writer) {
+ if v.Defined {
+ w.Uint(v.V)
+ } else {
+ w.RawString("null")
+ }
+}
+
+// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface.
+func (v *Uint) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ if l.IsNull() {
+ l.Skip()
+ *v = Uint{}
+ } else {
+ v.V = l.Uint()
+ v.Defined = true
+ }
+}
+
+// MarshalJSON implements a standard json marshaler interface.
+func (v Uint) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ v.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// UnmarshalJSON implements a standard json unmarshaler interface.
+func (v *Uint) UnmarshalJSON(data []byte) error {
+ l := jlexer.Lexer{Data: data}
+ v.UnmarshalEasyJSON(&l)
+ return l.Error()
+}
+
+// IsDefined returns whether the value is defined, a function is required so that it can
+// be used in an interface.
+func (v Uint) IsDefined() bool {
+ return v.Defined
+}
+
+// String implements a stringer interface using fmt.Sprint for the value.
+func (v Uint) String() string {
+ if !v.Defined {
+ return "<undefined>"
+ }
+ return fmt.Sprint(v.V)
+}
diff --git a/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint16.go b/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint16.go
new file mode 100644
index 000000000..f28e1d2ef
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint16.go
@@ -0,0 +1,79 @@
+// generated by gotemplate
+
+package opt
+
+import (
+ "fmt"
+
+ "github.com/mailru/easyjson/jlexer"
+ "github.com/mailru/easyjson/jwriter"
+)
+
+// template type Optional(A)
+
+// A 'gotemplate'-based type for providing optional semantics without using pointers.
+type Uint16 struct {
+ V uint16
+ Defined bool
+}
+
+// Creates an optional type with a given value.
+func OUint16(v uint16) Uint16 {
+ return Uint16{V: v, Defined: true}
+}
+
+// Get returns the value or given default in the case the value is undefined.
+func (v Uint16) Get(deflt uint16) uint16 {
+ if !v.Defined {
+ return deflt
+ }
+ return v.V
+}
+
+// MarshalEasyJSON does JSON marshaling using easyjson interface.
+func (v Uint16) MarshalEasyJSON(w *jwriter.Writer) {
+ if v.Defined {
+ w.Uint16(v.V)
+ } else {
+ w.RawString("null")
+ }
+}
+
+// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface.
+func (v *Uint16) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ if l.IsNull() {
+ l.Skip()
+ *v = Uint16{}
+ } else {
+ v.V = l.Uint16()
+ v.Defined = true
+ }
+}
+
+// MarshalJSON implements a standard json marshaler interface.
+func (v Uint16) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ v.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// UnmarshalJSON implements a standard json unmarshaler interface.
+func (v *Uint16) UnmarshalJSON(data []byte) error {
+ l := jlexer.Lexer{Data: data}
+ v.UnmarshalEasyJSON(&l)
+ return l.Error()
+}
+
+// IsDefined returns whether the value is defined, a function is required so that it can
+// be used in an interface.
+func (v Uint16) IsDefined() bool {
+ return v.Defined
+}
+
+// String implements a stringer interface using fmt.Sprint for the value.
+func (v Uint16) String() string {
+ if !v.Defined {
+ return "<undefined>"
+ }
+ return fmt.Sprint(v.V)
+}
diff --git a/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint32.go b/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint32.go
new file mode 100644
index 000000000..9fb95c0db
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint32.go
@@ -0,0 +1,79 @@
+// generated by gotemplate
+
+package opt
+
+import (
+ "fmt"
+
+ "github.com/mailru/easyjson/jlexer"
+ "github.com/mailru/easyjson/jwriter"
+)
+
+// template type Optional(A)
+
+// A 'gotemplate'-based type for providing optional semantics without using pointers.
+type Uint32 struct {
+ V uint32
+ Defined bool
+}
+
+// Creates an optional type with a given value.
+func OUint32(v uint32) Uint32 {
+ return Uint32{V: v, Defined: true}
+}
+
+// Get returns the value or given default in the case the value is undefined.
+func (v Uint32) Get(deflt uint32) uint32 {
+ if !v.Defined {
+ return deflt
+ }
+ return v.V
+}
+
+// MarshalEasyJSON does JSON marshaling using easyjson interface.
+func (v Uint32) MarshalEasyJSON(w *jwriter.Writer) {
+ if v.Defined {
+ w.Uint32(v.V)
+ } else {
+ w.RawString("null")
+ }
+}
+
+// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface.
+func (v *Uint32) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ if l.IsNull() {
+ l.Skip()
+ *v = Uint32{}
+ } else {
+ v.V = l.Uint32()
+ v.Defined = true
+ }
+}
+
+// MarshalJSON implements a standard json marshaler interface.
+func (v Uint32) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ v.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// UnmarshalJSON implements a standard json unmarshaler interface.
+func (v *Uint32) UnmarshalJSON(data []byte) error {
+ l := jlexer.Lexer{Data: data}
+ v.UnmarshalEasyJSON(&l)
+ return l.Error()
+}
+
+// IsDefined returns whether the value is defined, a function is required so that it can
+// be used in an interface.
+func (v Uint32) IsDefined() bool {
+ return v.Defined
+}
+
+// String implements a stringer interface using fmt.Sprint for the value.
+func (v Uint32) String() string {
+ if !v.Defined {
+ return "<undefined>"
+ }
+ return fmt.Sprint(v.V)
+}
diff --git a/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint64.go b/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint64.go
new file mode 100644
index 000000000..0e623c62d
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint64.go
@@ -0,0 +1,79 @@
+// generated by gotemplate
+
+package opt
+
+import (
+ "fmt"
+
+ "github.com/mailru/easyjson/jlexer"
+ "github.com/mailru/easyjson/jwriter"
+)
+
+// template type Optional(A)
+
+// A 'gotemplate'-based type for providing optional semantics without using pointers.
+type Uint64 struct {
+ V uint64
+ Defined bool
+}
+
+// Creates an optional type with a given value.
+func OUint64(v uint64) Uint64 {
+ return Uint64{V: v, Defined: true}
+}
+
+// Get returns the value or given default in the case the value is undefined.
+func (v Uint64) Get(deflt uint64) uint64 {
+ if !v.Defined {
+ return deflt
+ }
+ return v.V
+}
+
+// MarshalEasyJSON does JSON marshaling using easyjson interface.
+func (v Uint64) MarshalEasyJSON(w *jwriter.Writer) {
+ if v.Defined {
+ w.Uint64(v.V)
+ } else {
+ w.RawString("null")
+ }
+}
+
+// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface.
+func (v *Uint64) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ if l.IsNull() {
+ l.Skip()
+ *v = Uint64{}
+ } else {
+ v.V = l.Uint64()
+ v.Defined = true
+ }
+}
+
+// MarshalJSON implements a standard json marshaler interface.
+func (v Uint64) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ v.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// UnmarshalJSON implements a standard json unmarshaler interface.
+func (v *Uint64) UnmarshalJSON(data []byte) error {
+ l := jlexer.Lexer{Data: data}
+ v.UnmarshalEasyJSON(&l)
+ return l.Error()
+}
+
+// IsDefined returns whether the value is defined, a function is required so that it can
+// be used in an interface.
+func (v Uint64) IsDefined() bool {
+ return v.Defined
+}
+
+// String implements a stringer interface using fmt.Sprint for the value.
+func (v Uint64) String() string {
+ if !v.Defined {
+ return "<undefined>"
+ }
+ return fmt.Sprint(v.V)
+}
diff --git a/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint8.go b/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint8.go
new file mode 100644
index 000000000..c629e4453
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint8.go
@@ -0,0 +1,79 @@
+// generated by gotemplate
+
+package opt
+
+import (
+ "fmt"
+
+ "github.com/mailru/easyjson/jlexer"
+ "github.com/mailru/easyjson/jwriter"
+)
+
+// template type Optional(A)
+
+// A 'gotemplate'-based type for providing optional semantics without using pointers.
+type Uint8 struct {
+ V uint8
+ Defined bool
+}
+
+// Creates an optional type with a given value.
+func OUint8(v uint8) Uint8 {
+ return Uint8{V: v, Defined: true}
+}
+
+// Get returns the value or given default in the case the value is undefined.
+func (v Uint8) Get(deflt uint8) uint8 {
+ if !v.Defined {
+ return deflt
+ }
+ return v.V
+}
+
+// MarshalEasyJSON does JSON marshaling using easyjson interface.
+func (v Uint8) MarshalEasyJSON(w *jwriter.Writer) {
+ if v.Defined {
+ w.Uint8(v.V)
+ } else {
+ w.RawString("null")
+ }
+}
+
+// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface.
+func (v *Uint8) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ if l.IsNull() {
+ l.Skip()
+ *v = Uint8{}
+ } else {
+ v.V = l.Uint8()
+ v.Defined = true
+ }
+}
+
+// MarshalJSON implements a standard json marshaler interface.
+func (v Uint8) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ v.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// UnmarshalJSON implements a standard json unmarshaler interface.
+func (v *Uint8) UnmarshalJSON(data []byte) error {
+ l := jlexer.Lexer{Data: data}
+ v.UnmarshalEasyJSON(&l)
+ return l.Error()
+}
+
+// IsDefined returns whether the value is defined, a function is required so that it can
+// be used in an interface.
+func (v Uint8) IsDefined() bool {
+ return v.Defined
+}
+
+// String implements a stringer interface using fmt.Sprint for the value.
+func (v Uint8) String() string {
+ if !v.Defined {
+ return "<undefined>"
+ }
+ return fmt.Sprint(v.V)
+}
diff --git a/vendor/github.com/mailru/easyjson/opt/optional/opt.go b/vendor/github.com/mailru/easyjson/opt/optional/opt.go
new file mode 100644
index 000000000..277dd1a3b
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/opt/optional/opt.go
@@ -0,0 +1,80 @@
+// +build none
+
+package optional
+
+import (
+ "fmt"
+
+ "github.com/mailru/easyjson/jlexer"
+ "github.com/mailru/easyjson/jwriter"
+)
+
+// template type Optional(A)
+type A int
+
+// A 'gotemplate'-based type for providing optional semantics without using pointers.
+type Optional struct {
+ V A
+ Defined bool
+}
+
+// Creates an optional type with a given value.
+func OOptional(v A) Optional {
+ return Optional{V: v, Defined: true}
+}
+
+// Get returns the value or given default in the case the value is undefined.
+func (v Optional) Get(deflt A) A {
+ if !v.Defined {
+ return deflt
+ }
+ return v.V
+}
+
+// MarshalEasyJSON does JSON marshaling using easyjson interface.
+func (v Optional) MarshalEasyJSON(w *jwriter.Writer) {
+ if v.Defined {
+ w.Optional(v.V)
+ } else {
+ w.RawString("null")
+ }
+}
+
+// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface.
+func (v *Optional) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ if l.IsNull() {
+ l.Skip()
+ *v = Optional{}
+ } else {
+ v.V = l.Optional()
+ v.Defined = true
+ }
+}
+
+// MarshalJSON implements a standard json marshaler interface.
+func (v Optional) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ v.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// UnmarshalJSON implements a standard json unmarshaler interface.
+func (v *Optional) UnmarshalJSON(data []byte) error {
+ l := jlexer.Lexer{Data: data}
+ v.UnmarshalEasyJSON(&l)
+ return l.Error()
+}
+
+// IsDefined returns whether the value is defined, a function is required so that it can
+// be used in an interface.
+func (v Optional) IsDefined() bool {
+ return v.Defined
+}
+
+// String implements a stringer interface using fmt.Sprint for the value.
+func (v Optional) String() string {
+ if !v.Defined {
+ return "<undefined>"
+ }
+ return fmt.Sprint(v.V)
+}
diff --git a/vendor/github.com/mailru/easyjson/opt/opts.go b/vendor/github.com/mailru/easyjson/opt/opts.go
new file mode 100644
index 000000000..3617f7f9f
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/opt/opts.go
@@ -0,0 +1,22 @@
+package opt
+
+//go:generate sed -i "s/\\+build none/generated by gotemplate/" optional/opt.go
+//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Int(int)
+//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Uint(uint)
+
+//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Int8(int8)
+//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Int16(int16)
+//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Int32(int32)
+//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Int64(int64)
+
+//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Uint8(uint8)
+//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Uint16(uint16)
+//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Uint32(uint32)
+//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Uint64(uint64)
+
+//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Float32(float32)
+//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Float64(float64)
+
+//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Bool(bool)
+//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" String(string)
+//go:generate sed -i "s/generated by gotemplate/+build none/" optional/opt.go
diff --git a/vendor/github.com/mailru/easyjson/parser/parser.go b/vendor/github.com/mailru/easyjson/parser/parser.go
new file mode 100644
index 000000000..5bd06e946
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/parser/parser.go
@@ -0,0 +1,97 @@
+package parser
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "os/exec"
+ "strings"
+)
+
+const structComment = "easyjson:json"
+
+type Parser struct {
+ PkgPath string
+ PkgName string
+ StructNames []string
+ AllStructs bool
+}
+
+type visitor struct {
+ *Parser
+
+ name string
+ explicit bool
+}
+
+func (p *Parser) needType(comments string) bool {
+ for _, v := range strings.Split(comments, "\n") {
+ if strings.HasPrefix(v, structComment) {
+ return true
+ }
+ }
+ return false
+}
+
+func (v *visitor) Visit(n ast.Node) (w ast.Visitor) {
+ switch n := n.(type) {
+ case *ast.Package:
+ return v
+ case *ast.File:
+ v.PkgName = n.Name.String()
+ return v
+
+ case *ast.GenDecl:
+ v.explicit = v.needType(n.Doc.Text())
+
+ if !v.explicit && !v.AllStructs {
+ return nil
+ }
+ return v
+ case *ast.TypeSpec:
+ v.name = n.Name.String()
+
+ // Allow to specify non-structs explicitly independent of '-all' flag.
+ if v.explicit {
+ v.StructNames = append(v.StructNames, v.name)
+ return nil
+ }
+ return v
+ case *ast.StructType:
+ v.StructNames = append(v.StructNames, v.name)
+ return nil
+ }
+ return nil
+}
+
+func (p *Parser) Parse(fname string, isDir bool) error {
+ var err error
+ if p.PkgPath, err = getPkgPath(fname, isDir); err != nil {
+ return err
+ }
+
+ fset := token.NewFileSet()
+ if isDir {
+ packages, err := parser.ParseDir(fset, fname, nil, parser.ParseComments)
+ if err != nil {
+ return err
+ }
+
+ for _, pckg := range packages {
+ ast.Walk(&visitor{Parser: p}, pckg)
+ }
+ } else {
+ f, err := parser.ParseFile(fset, fname, nil, parser.ParseComments)
+ if err != nil {
+ return err
+ }
+
+ ast.Walk(&visitor{Parser: p}, f)
+ }
+ return nil
+}
+
+func getDefaultGoPath() (string, error) {
+ output, err := exec.Command("go", "env", "GOPATH").Output()
+ return string(output), err
+}
diff --git a/vendor/github.com/mailru/easyjson/parser/parser_unix.go b/vendor/github.com/mailru/easyjson/parser/parser_unix.go
new file mode 100644
index 000000000..09b20a2e1
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/parser/parser_unix.go
@@ -0,0 +1,42 @@
+// +build !windows
+
+package parser
+
+import (
+ "fmt"
+ "os"
+ "path"
+ "strings"
+)
+
+func getPkgPath(fname string, isDir bool) (string, error) {
+ if !path.IsAbs(fname) {
+ pwd, err := os.Getwd()
+ if err != nil {
+ return "", err
+ }
+ fname = path.Join(pwd, fname)
+ }
+
+ gopath := os.Getenv("GOPATH")
+ if gopath == "" {
+ var err error
+ gopath, err = getDefaultGoPath()
+ if err != nil {
+ return "", fmt.Errorf("cannot determine GOPATH: %s", err)
+ }
+ }
+
+ for _, p := range strings.Split(os.Getenv("GOPATH"), ":") {
+ prefix := path.Join(p, "src") + "/"
+ if rel := strings.TrimPrefix(fname, prefix); rel != fname {
+ if !isDir {
+ return path.Dir(rel), nil
+ } else {
+ return path.Clean(rel), nil
+ }
+ }
+ }
+
+ return "", fmt.Errorf("file '%v' is not in GOPATH", fname)
+}
diff --git a/vendor/github.com/mailru/easyjson/parser/parser_windows.go b/vendor/github.com/mailru/easyjson/parser/parser_windows.go
new file mode 100644
index 000000000..90d3a78b5
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/parser/parser_windows.go
@@ -0,0 +1,49 @@
+package parser
+
+import (
+ "fmt"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+)
+
+func normalizePath(path string) string {
+ // use lower case, as Windows file systems will almost always be case insensitive
+ return strings.ToLower(strings.Replace(path, "\\", "/", -1))
+}
+
+func getPkgPath(fname string, isDir bool) (string, error) {
+ // path.IsAbs doesn't work properly on Windows; use filepath.IsAbs instead
+ if !filepath.IsAbs(fname) {
+ pwd, err := os.Getwd()
+ if err != nil {
+ return "", err
+ }
+ fname = path.Join(pwd, fname)
+ }
+
+ fname = normalizePath(fname)
+
+ gopath := os.Getenv("GOPATH")
+ if gopath == "" {
+ var err error
+ gopath, err = getDefaultGoPath()
+ if err != nil {
+ return "", fmt.Errorf("cannot determine GOPATH: %s", err)
+ }
+ }
+
+ for _, p := range strings.Split(os.Getenv("GOPATH"), ";") {
+ prefix := path.Join(normalizePath(p), "src") + "/"
+ if rel := strings.TrimPrefix(fname, prefix); rel != fname {
+ if !isDir {
+ return path.Dir(rel), nil
+ } else {
+ return path.Clean(rel), nil
+ }
+ }
+ }
+
+ return "", fmt.Errorf("file '%v' is not in GOPATH", fname)
+}
diff --git a/vendor/github.com/mailru/easyjson/raw.go b/vendor/github.com/mailru/easyjson/raw.go
new file mode 100644
index 000000000..81bd002e1
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/raw.go
@@ -0,0 +1,45 @@
+package easyjson
+
+import (
+ "github.com/mailru/easyjson/jlexer"
+ "github.com/mailru/easyjson/jwriter"
+)
+
+// RawMessage is a raw piece of JSON (number, string, bool, object, array or
+// null) that is extracted without parsing and output as is during marshaling.
+type RawMessage []byte
+
+// MarshalEasyJSON does JSON marshaling using easyjson interface.
+func (v *RawMessage) MarshalEasyJSON(w *jwriter.Writer) {
+ if len(*v) == 0 {
+ w.RawString("null")
+ } else {
+ w.Raw(*v, nil)
+ }
+}
+
+// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface.
+func (v *RawMessage) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ *v = RawMessage(l.Raw())
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler interface.
+func (v *RawMessage) UnmarshalJSON(data []byte) error {
+ *v = data
+ return nil
+}
+
+var nullBytes = []byte("null")
+
+// MarshalJSON implements encoding/json.Marshaler interface.
+func (v RawMessage) MarshalJSON() ([]byte, error) {
+ if len(v) == 0 {
+ return nullBytes, nil
+ }
+ return v, nil
+}
+
+// IsDefined is required for integration with omitempty easyjson logic.
+func (v *RawMessage) IsDefined() bool {
+ return len(*v) > 0
+}
diff --git a/vendor/github.com/mailru/easyjson/tests/basic_test.go b/vendor/github.com/mailru/easyjson/tests/basic_test.go
new file mode 100644
index 000000000..018678402
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/tests/basic_test.go
@@ -0,0 +1,231 @@
+package tests
+
+import (
+ "reflect"
+ "testing"
+
+ "encoding/json"
+
+ "github.com/mailru/easyjson"
+ "github.com/mailru/easyjson/jwriter"
+)
+
+type testType interface {
+ json.Marshaler
+ json.Unmarshaler
+}
+
+var testCases = []struct {
+ Decoded testType
+ Encoded string
+}{
+ {&primitiveTypesValue, primitiveTypesString},
+ {&namedPrimitiveTypesValue, namedPrimitiveTypesString},
+ {&structsValue, structsString},
+ {&omitEmptyValue, omitEmptyString},
+ {&snakeStructValue, snakeStructString},
+ {&omitEmptyDefaultValue, omitEmptyDefaultString},
+ {&optsValue, optsString},
+ {&rawValue, rawString},
+ {&stdMarshalerValue, stdMarshalerString},
+ {&userMarshalerValue, userMarshalerString},
+ {&unexportedStructValue, unexportedStructString},
+ {&excludedFieldValue, excludedFieldString},
+ {&sliceValue, sliceString},
+ {&arrayValue, arrayString},
+ {&mapsValue, mapsString},
+ {&deepNestValue, deepNestString},
+ {&IntsValue, IntsString},
+ {&mapStringStringValue, mapStringStringString},
+ {&namedTypeValue, namedTypeValueString},
+ {&mapMyIntStringValue, mapMyIntStringValueString},
+ {&mapIntStringValue, mapIntStringValueString},
+ {&mapInt32StringValue, mapInt32StringValueString},
+ {&mapInt64StringValue, mapInt64StringValueString},
+ {&mapUintStringValue, mapUintStringValueString},
+ {&mapUint32StringValue, mapUint32StringValueString},
+ {&mapUint64StringValue, mapUint64StringValueString},
+ {&mapUintptrStringValue, mapUintptrStringValueString},
+ {&intKeyedMapStructValue, intKeyedMapStructValueString},
+}
+
+func TestMarshal(t *testing.T) {
+ for i, test := range testCases {
+ data, err := test.Decoded.MarshalJSON()
+ if err != nil {
+ t.Errorf("[%d, %T] MarshalJSON() error: %v", i, test.Decoded, err)
+ }
+
+ got := string(data)
+ if got != test.Encoded {
+ t.Errorf("[%d, %T] MarshalJSON(): got \n%v\n\t\t want \n%v", i, test.Decoded, got, test.Encoded)
+ }
+ }
+}
+
+func TestUnmarshal(t *testing.T) {
+ for i, test := range testCases {
+ v1 := reflect.New(reflect.TypeOf(test.Decoded).Elem()).Interface()
+ v := v1.(testType)
+
+ err := v.UnmarshalJSON([]byte(test.Encoded))
+ if err != nil {
+ t.Errorf("[%d, %T] UnmarshalJSON() error: %v", i, test.Decoded, err)
+ }
+
+ if !reflect.DeepEqual(v, test.Decoded) {
+ t.Errorf("[%d, %T] UnmarshalJSON(): got \n%+v\n\t\t want \n%+v", i, test.Decoded, v, test.Decoded)
+ }
+ }
+}
+
+func TestRawMessageSTD(t *testing.T) {
+ type T struct {
+ F easyjson.RawMessage
+ Fnil easyjson.RawMessage
+ }
+
+ val := T{F: easyjson.RawMessage([]byte(`"test"`))}
+ str := `{"F":"test","Fnil":null}`
+
+ data, err := json.Marshal(val)
+ if err != nil {
+ t.Errorf("json.Marshal() error: %v", err)
+ }
+ got := string(data)
+ if got != str {
+ t.Errorf("json.Marshal() = %v; want %v", got, str)
+ }
+
+ wantV := T{F: easyjson.RawMessage([]byte(`"test"`)), Fnil: easyjson.RawMessage([]byte("null"))}
+ var gotV T
+
+ err = json.Unmarshal([]byte(str), &gotV)
+ if err != nil {
+ t.Errorf("json.Unmarshal() error: %v", err)
+ }
+ if !reflect.DeepEqual(gotV, wantV) {
+ t.Errorf("json.Unmarshal() = %v; want %v", gotV, wantV)
+ }
+}
+
+func TestParseNull(t *testing.T) {
+ var got, want SubStruct
+ if err := easyjson.Unmarshal([]byte("null"), &got); err != nil {
+ t.Errorf("Unmarshal() error: %v", err)
+ }
+
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("Unmarshal() = %+v; want %+v", got, want)
+ }
+}
+
+var testSpecialCases = []struct {
+ EncodedString string
+ Value string
+}{
+ {`"Username \u003cuser@example.com\u003e"`, `Username <user@example.com>`},
+ {`"Username\ufffd"`, "Username\xc5"},
+ {`"тестzтест"`, "тестzтест"},
+ {`"тест\ufffdтест"`, "тест\xc5тест"},
+ {`"绿茶"`, "绿茶"},
+ {`"绿\ufffd茶"`, "绿\xc5茶"},
+ {`"тест\u2028"`, "тест\xE2\x80\xA8"},
+ {`"\\\r\n\t\""`, "\\\r\n\t\""},
+ {`"ü"`, "ü"},
+}
+
+func TestSpecialCases(t *testing.T) {
+ for i, test := range testSpecialCases {
+ w := jwriter.Writer{}
+ w.String(test.Value)
+ got := string(w.Buffer.BuildBytes())
+ if got != test.EncodedString {
+ t.Errorf("[%d] Encoded() = %+v; want %+v", i, got, test.EncodedString)
+ }
+ }
+}
+
+func TestOverflowArray(t *testing.T) {
+ var a Arrays
+ err := easyjson.Unmarshal([]byte(arrayOverflowString), &a)
+ if err != nil {
+ t.Error(err)
+ }
+ if a != arrayValue {
+ t.Errorf("Unmarshal(%v) = %+v; want %+v", arrayOverflowString, a, arrayValue)
+ }
+}
+
+func TestUnderflowArray(t *testing.T) {
+ var a Arrays
+ err := easyjson.Unmarshal([]byte(arrayUnderflowString), &a)
+ if err != nil {
+ t.Error(err)
+ }
+ if a != arrayUnderflowValue {
+ t.Errorf("Unmarshal(%v) = %+v; want %+v", arrayUnderflowString, a, arrayUnderflowValue)
+ }
+}
+
+func TestEncodingFlags(t *testing.T) {
+ for i, test := range []struct {
+ Flags jwriter.Flags
+ In easyjson.Marshaler
+ Want string
+ }{
+ {0, EncodingFlagsTestMap{}, `{"F":null}`},
+ {0, EncodingFlagsTestSlice{}, `{"F":null}`},
+ {jwriter.NilMapAsEmpty, EncodingFlagsTestMap{}, `{"F":{}}`},
+ {jwriter.NilSliceAsEmpty, EncodingFlagsTestSlice{}, `{"F":[]}`},
+ } {
+ w := &jwriter.Writer{Flags: test.Flags}
+ test.In.MarshalEasyJSON(w)
+
+ data, err := w.BuildBytes()
+ if err != nil {
+ t.Errorf("[%v] easyjson.Marshal(%+v) error: %v", i, test.In, err)
+ }
+
+ v := string(data)
+ if v != test.Want {
+ t.Errorf("[%v] easyjson.Marshal(%+v) = %v; want %v", i, test.In, v, test.Want)
+ }
+ }
+
+}
+
+func TestNestedEasyJsonMarshal(t *testing.T) {
+ n := map[string]*NestedEasyMarshaler{
+ "Value": {},
+ "Slice1": {},
+ "Slice2": {},
+ "Map1": {},
+ "Map2": {},
+ }
+
+ ni := NestedInterfaces{
+ Value: n["Value"],
+ Slice: []interface{}{n["Slice1"], n["Slice2"]},
+ Map: map[string]interface{}{"1": n["Map1"], "2": n["Map2"]},
+ }
+ easyjson.Marshal(ni)
+
+ for k, v := range n {
+ if !v.EasilyMarshaled {
+ t.Errorf("Nested interface %s wasn't easily marshaled", k)
+ }
+ }
+}
+
+func TestUnmarshalStructWithEmbeddedPtrStruct(t *testing.T) {
+ var s = StructWithInterface{Field2: &EmbeddedStruct{}}
+ var err error
+ err = easyjson.Unmarshal([]byte(structWithInterfaceString), &s)
+ if err != nil {
+ t.Errorf("easyjson.Unmarshal() error: %v", err)
+ }
+ if !reflect.DeepEqual(s, structWithInterfaceValueFilled) {
+ t.Errorf("easyjson.Unmarshal() = %#v; want %#v", s, structWithInterfaceValueFilled)
+ }
+}
diff --git a/vendor/github.com/mailru/easyjson/tests/data.go b/vendor/github.com/mailru/easyjson/tests/data.go
new file mode 100644
index 000000000..145f093d6
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/tests/data.go
@@ -0,0 +1,759 @@
+package tests
+
+import (
+ "fmt"
+ "math"
+ "net"
+ "time"
+
+ "github.com/mailru/easyjson"
+ "github.com/mailru/easyjson/opt"
+)
+
+type PrimitiveTypes struct {
+ String string
+ Bool bool
+
+ Int int
+ Int8 int8
+ Int16 int16
+ Int32 int32
+ Int64 int64
+
+ Uint uint
+ Uint8 uint8
+ Uint16 uint16
+ Uint32 uint32
+ Uint64 uint64
+
+ IntString int `json:",string"`
+ Int8String int8 `json:",string"`
+ Int16String int16 `json:",string"`
+ Int32String int32 `json:",string"`
+ Int64String int64 `json:",string"`
+
+ UintString uint `json:",string"`
+ Uint8String uint8 `json:",string"`
+ Uint16String uint16 `json:",string"`
+ Uint32String uint32 `json:",string"`
+ Uint64String uint64 `json:",string"`
+
+ Float32 float32
+ Float64 float64
+
+ Ptr *string
+ PtrNil *string
+}
+
+var str = "bla"
+
+var primitiveTypesValue = PrimitiveTypes{
+ String: "test", Bool: true,
+
+ Int: math.MinInt32,
+ Int8: math.MinInt8,
+ Int16: math.MinInt16,
+ Int32: math.MinInt32,
+ Int64: math.MinInt64,
+
+ Uint: math.MaxUint32,
+ Uint8: math.MaxUint8,
+ Uint16: math.MaxUint16,
+ Uint32: math.MaxUint32,
+ Uint64: math.MaxUint64,
+
+ IntString: math.MinInt32,
+ Int8String: math.MinInt8,
+ Int16String: math.MinInt16,
+ Int32String: math.MinInt32,
+ Int64String: math.MinInt64,
+
+ UintString: math.MaxUint32,
+ Uint8String: math.MaxUint8,
+ Uint16String: math.MaxUint16,
+ Uint32String: math.MaxUint32,
+ Uint64String: math.MaxUint64,
+
+ Float32: 1.5,
+ Float64: math.MaxFloat64,
+
+ Ptr: &str,
+}
+
+var primitiveTypesString = "{" +
+ `"String":"test","Bool":true,` +
+
+ `"Int":` + fmt.Sprint(math.MinInt32) + `,` +
+ `"Int8":` + fmt.Sprint(math.MinInt8) + `,` +
+ `"Int16":` + fmt.Sprint(math.MinInt16) + `,` +
+ `"Int32":` + fmt.Sprint(math.MinInt32) + `,` +
+ `"Int64":` + fmt.Sprint(int64(math.MinInt64)) + `,` +
+
+ `"Uint":` + fmt.Sprint(uint32(math.MaxUint32)) + `,` +
+ `"Uint8":` + fmt.Sprint(math.MaxUint8) + `,` +
+ `"Uint16":` + fmt.Sprint(math.MaxUint16) + `,` +
+ `"Uint32":` + fmt.Sprint(uint32(math.MaxUint32)) + `,` +
+ `"Uint64":` + fmt.Sprint(uint64(math.MaxUint64)) + `,` +
+
+ `"IntString":"` + fmt.Sprint(math.MinInt32) + `",` +
+ `"Int8String":"` + fmt.Sprint(math.MinInt8) + `",` +
+ `"Int16String":"` + fmt.Sprint(math.MinInt16) + `",` +
+ `"Int32String":"` + fmt.Sprint(math.MinInt32) + `",` +
+ `"Int64String":"` + fmt.Sprint(int64(math.MinInt64)) + `",` +
+
+ `"UintString":"` + fmt.Sprint(uint32(math.MaxUint32)) + `",` +
+ `"Uint8String":"` + fmt.Sprint(math.MaxUint8) + `",` +
+ `"Uint16String":"` + fmt.Sprint(math.MaxUint16) + `",` +
+ `"Uint32String":"` + fmt.Sprint(uint32(math.MaxUint32)) + `",` +
+ `"Uint64String":"` + fmt.Sprint(uint64(math.MaxUint64)) + `",` +
+
+ `"Float32":` + fmt.Sprint(1.5) + `,` +
+ `"Float64":` + fmt.Sprint(math.MaxFloat64) + `,` +
+
+ `"Ptr":"bla",` +
+ `"PtrNil":null` +
+
+ "}"
+
+type (
+ NamedString string
+ NamedBool bool
+
+ NamedInt int
+ NamedInt8 int8
+ NamedInt16 int16
+ NamedInt32 int32
+ NamedInt64 int64
+
+ NamedUint uint
+ NamedUint8 uint8
+ NamedUint16 uint16
+ NamedUint32 uint32
+ NamedUint64 uint64
+
+ NamedFloat32 float32
+ NamedFloat64 float64
+
+ NamedStrPtr *string
+)
+
+type NamedPrimitiveTypes struct {
+ String NamedString
+ Bool NamedBool
+
+ Int NamedInt
+ Int8 NamedInt8
+ Int16 NamedInt16
+ Int32 NamedInt32
+ Int64 NamedInt64
+
+ Uint NamedUint
+ Uint8 NamedUint8
+ Uint16 NamedUint16
+ Uint32 NamedUint32
+ Uint64 NamedUint64
+
+ Float32 NamedFloat32
+ Float64 NamedFloat64
+
+ Ptr NamedStrPtr
+ PtrNil NamedStrPtr
+}
+
+var namedPrimitiveTypesValue = NamedPrimitiveTypes{
+ String: "test",
+ Bool: true,
+
+ Int: math.MinInt32,
+ Int8: math.MinInt8,
+ Int16: math.MinInt16,
+ Int32: math.MinInt32,
+ Int64: math.MinInt64,
+
+ Uint: math.MaxUint32,
+ Uint8: math.MaxUint8,
+ Uint16: math.MaxUint16,
+ Uint32: math.MaxUint32,
+ Uint64: math.MaxUint64,
+
+ Float32: 1.5,
+ Float64: math.MaxFloat64,
+
+ Ptr: NamedStrPtr(&str),
+}
+
+var namedPrimitiveTypesString = "{" +
+ `"String":"test",` +
+ `"Bool":true,` +
+
+ `"Int":` + fmt.Sprint(math.MinInt32) + `,` +
+ `"Int8":` + fmt.Sprint(math.MinInt8) + `,` +
+ `"Int16":` + fmt.Sprint(math.MinInt16) + `,` +
+ `"Int32":` + fmt.Sprint(math.MinInt32) + `,` +
+ `"Int64":` + fmt.Sprint(int64(math.MinInt64)) + `,` +
+
+ `"Uint":` + fmt.Sprint(uint32(math.MaxUint32)) + `,` +
+ `"Uint8":` + fmt.Sprint(math.MaxUint8) + `,` +
+ `"Uint16":` + fmt.Sprint(math.MaxUint16) + `,` +
+ `"Uint32":` + fmt.Sprint(uint32(math.MaxUint32)) + `,` +
+ `"Uint64":` + fmt.Sprint(uint64(math.MaxUint64)) + `,` +
+
+ `"Float32":` + fmt.Sprint(1.5) + `,` +
+ `"Float64":` + fmt.Sprint(math.MaxFloat64) + `,` +
+
+ `"Ptr":"bla",` +
+ `"PtrNil":null` +
+ "}"
+
+type SubStruct struct {
+ Value string
+ Value2 string
+ unexpored bool
+}
+
+type SubP struct {
+ V string
+}
+
+type SubStructAlias SubStruct
+
+type Structs struct {
+ SubStruct
+ *SubP
+
+ Value2 int
+
+ Sub1 SubStruct `json:"substruct"`
+ Sub2 *SubStruct
+ SubNil *SubStruct
+
+ SubSlice []SubStruct
+ SubSliceNil []SubStruct
+
+ SubPtrSlice []*SubStruct
+ SubPtrSliceNil []*SubStruct
+
+ SubA1 SubStructAlias
+ SubA2 *SubStructAlias
+
+ Anonymous struct {
+ V string
+ I int
+ }
+ Anonymous1 *struct {
+ V string
+ }
+
+ AnonymousSlice []struct{ V int }
+ AnonymousPtrSlice []*struct{ V int }
+
+ Slice []string
+
+ unexported bool
+}
+
+var structsValue = Structs{
+ SubStruct: SubStruct{Value: "test"},
+ SubP: &SubP{V: "subp"},
+
+ Value2: 5,
+
+ Sub1: SubStruct{Value: "test1", Value2: "v"},
+ Sub2: &SubStruct{Value: "test2", Value2: "v2"},
+
+ SubSlice: []SubStruct{
+ {Value: "s1"},
+ {Value: "s2"},
+ },
+
+ SubPtrSlice: []*SubStruct{
+ {Value: "p1"},
+ {Value: "p2"},
+ },
+
+ SubA1: SubStructAlias{Value: "test3", Value2: "v3"},
+ SubA2: &SubStructAlias{Value: "test4", Value2: "v4"},
+
+ Anonymous: struct {
+ V string
+ I int
+ }{V: "bla", I: 5},
+
+ Anonymous1: &struct {
+ V string
+ }{V: "bla1"},
+
+ AnonymousSlice: []struct{ V int }{{1}, {2}},
+ AnonymousPtrSlice: []*struct{ V int }{{3}, {4}},
+
+ Slice: []string{"test5", "test6"},
+}
+
+var structsString = "{" +
+ `"Value2":5,` +
+
+ `"substruct":{"Value":"test1","Value2":"v"},` +
+ `"Sub2":{"Value":"test2","Value2":"v2"},` +
+ `"SubNil":null,` +
+
+ `"SubSlice":[{"Value":"s1","Value2":""},{"Value":"s2","Value2":""}],` +
+ `"SubSliceNil":null,` +
+
+ `"SubPtrSlice":[{"Value":"p1","Value2":""},{"Value":"p2","Value2":""}],` +
+ `"SubPtrSliceNil":null,` +
+
+ `"SubA1":{"Value":"test3","Value2":"v3"},` +
+ `"SubA2":{"Value":"test4","Value2":"v4"},` +
+
+ `"Anonymous":{"V":"bla","I":5},` +
+ `"Anonymous1":{"V":"bla1"},` +
+
+ `"AnonymousSlice":[{"V":1},{"V":2}],` +
+ `"AnonymousPtrSlice":[{"V":3},{"V":4}],` +
+
+ `"Slice":["test5","test6"],` +
+
+ // Embedded fields go last.
+ `"V":"subp",` +
+ `"Value":"test"` +
+ "}"
+
+type OmitEmpty struct {
+ // NOTE: first field is empty to test comma printing.
+
+ StrE, StrNE string `json:",omitempty"`
+ PtrE, PtrNE *string `json:",omitempty"`
+
+ IntNE int `json:"intField,omitempty"`
+ IntE int `json:",omitempty"`
+
+ // NOTE: omitempty has no effect on non-pointer struct fields.
+ SubE, SubNE SubStruct `json:",omitempty"`
+ SubPE, SubPNE *SubStruct `json:",omitempty"`
+}
+
+var omitEmptyValue = OmitEmpty{
+ StrNE: "str",
+ PtrNE: &str,
+ IntNE: 6,
+ SubNE: SubStruct{Value: "1", Value2: "2"},
+ SubPNE: &SubStruct{Value: "3", Value2: "4"},
+}
+
+var omitEmptyString = "{" +
+ `"StrNE":"str",` +
+ `"PtrNE":"bla",` +
+ `"intField":6,` +
+ `"SubE":{"Value":"","Value2":""},` +
+ `"SubNE":{"Value":"1","Value2":"2"},` +
+ `"SubPNE":{"Value":"3","Value2":"4"}` +
+ "}"
+
+type Opts struct {
+ StrNull opt.String
+ StrEmpty opt.String
+ Str opt.String
+ StrOmitempty opt.String `json:",omitempty"`
+
+ IntNull opt.Int
+ IntZero opt.Int
+ Int opt.Int
+}
+
+var optsValue = Opts{
+ StrEmpty: opt.OString(""),
+ Str: opt.OString("test"),
+
+ IntZero: opt.OInt(0),
+ Int: opt.OInt(5),
+}
+
+var optsString = `{` +
+ `"StrNull":null,` +
+ `"StrEmpty":"",` +
+ `"Str":"test",` +
+ `"IntNull":null,` +
+ `"IntZero":0,` +
+ `"Int":5` +
+ `}`
+
+type Raw struct {
+ Field easyjson.RawMessage
+ Field2 string
+}
+
+var rawValue = Raw{
+ Field: []byte(`{"a" : "b"}`),
+ Field2: "test",
+}
+
+var rawString = `{` +
+ `"Field":{"a" : "b"},` +
+ `"Field2":"test"` +
+ `}`
+
+type StdMarshaler struct {
+ T time.Time
+ IP net.IP
+}
+
+var stdMarshalerValue = StdMarshaler{
+ T: time.Date(2016, 01, 02, 14, 15, 10, 0, time.UTC),
+ IP: net.IPv4(192, 168, 0, 1),
+}
+var stdMarshalerString = `{` +
+ `"T":"2016-01-02T14:15:10Z",` +
+ `"IP":"192.168.0.1"` +
+ `}`
+
+type UserMarshaler struct {
+ V vMarshaler
+ T tMarshaler
+}
+
+type vMarshaler net.IP
+
+func (v vMarshaler) MarshalJSON() ([]byte, error) {
+ return []byte(`"0::0"`), nil
+}
+
+func (v *vMarshaler) UnmarshalJSON([]byte) error {
+ *v = vMarshaler(net.IPv6zero)
+ return nil
+}
+
+type tMarshaler net.IP
+
+func (v tMarshaler) MarshalText() ([]byte, error) {
+ return []byte(`[0::0]`), nil
+}
+
+func (v *tMarshaler) UnmarshalText([]byte) error {
+ *v = tMarshaler(net.IPv6zero)
+ return nil
+}
+
+var userMarshalerValue = UserMarshaler{
+ V: vMarshaler(net.IPv6zero),
+ T: tMarshaler(net.IPv6zero),
+}
+var userMarshalerString = `{` +
+ `"V":"0::0",` +
+ `"T":"[0::0]"` +
+ `}`
+
+type unexportedStruct struct {
+ Value string
+}
+
+var unexportedStructValue = unexportedStruct{"test"}
+var unexportedStructString = `{"Value":"test"}`
+
+type ExcludedField struct {
+ Process bool `json:"process"`
+ DoNotProcess bool `json:"-"`
+ DoNotProcess1 bool `json:"-"`
+}
+
+var excludedFieldValue = ExcludedField{
+ Process: true,
+ DoNotProcess: false,
+ DoNotProcess1: false,
+}
+var excludedFieldString = `{"process":true}`
+
+type Slices struct {
+ ByteSlice []byte
+ EmptyByteSlice []byte
+ NilByteSlice []byte
+ IntSlice []int
+ EmptyIntSlice []int
+ NilIntSlice []int
+}
+
+var sliceValue = Slices{
+ ByteSlice: []byte("abc"),
+ EmptyByteSlice: []byte{},
+ NilByteSlice: []byte(nil),
+ IntSlice: []int{1, 2, 3, 4, 5},
+ EmptyIntSlice: []int{},
+ NilIntSlice: []int(nil),
+}
+
+var sliceString = `{` +
+ `"ByteSlice":"YWJj",` +
+ `"EmptyByteSlice":"",` +
+ `"NilByteSlice":null,` +
+ `"IntSlice":[1,2,3,4,5],` +
+ `"EmptyIntSlice":[],` +
+ `"NilIntSlice":null` +
+ `}`
+
+type Arrays struct {
+ ByteArray [3]byte
+ EmptyByteArray [0]byte
+ IntArray [5]int
+ EmptyIntArray [0]int
+}
+
+var arrayValue = Arrays{
+ ByteArray: [3]byte{'a', 'b', 'c'},
+ EmptyByteArray: [0]byte{},
+ IntArray: [5]int{1, 2, 3, 4, 5},
+ EmptyIntArray: [0]int{},
+}
+
+var arrayString = `{` +
+ `"ByteArray":"YWJj",` +
+ `"EmptyByteArray":"",` +
+ `"IntArray":[1,2,3,4,5],` +
+ `"EmptyIntArray":[]` +
+ `}`
+
+var arrayOverflowString = `{` +
+ `"ByteArray":"YWJjbnNk",` +
+ `"EmptyByteArray":"YWJj",` +
+ `"IntArray":[1,2,3,4,5,6],` +
+ `"EmptyIntArray":[7,8]` +
+ `}`
+
+var arrayUnderflowValue = Arrays{
+ ByteArray: [3]byte{'x', 0, 0},
+ EmptyByteArray: [0]byte{},
+ IntArray: [5]int{1, 2, 0, 0, 0},
+ EmptyIntArray: [0]int{},
+}
+
+var arrayUnderflowString = `{` +
+ `"ByteArray":"eA==",` +
+ `"IntArray":[1,2]` +
+ `}`
+
+type Str string
+
+type Maps struct {
+ Map map[string]string
+ InterfaceMap map[string]interface{}
+ NilMap map[string]string
+
+ CustomMap map[Str]Str
+}
+
+var mapsValue = Maps{
+ Map: map[string]string{"A": "b"}, // only one item since map iteration is randomized
+ InterfaceMap: map[string]interface{}{"G": float64(1)},
+
+ CustomMap: map[Str]Str{"c": "d"},
+}
+
+var mapsString = `{` +
+ `"Map":{"A":"b"},` +
+ `"InterfaceMap":{"G":1},` +
+ `"NilMap":null,` +
+ `"CustomMap":{"c":"d"}` +
+ `}`
+
+type NamedSlice []Str
+type NamedMap map[Str]Str
+
+type DeepNest struct {
+ SliceMap map[Str][]Str
+ SliceMap1 map[Str][]Str
+ SliceMap2 map[Str][]Str
+ NamedSliceMap map[Str]NamedSlice
+ NamedMapMap map[Str]NamedMap
+ MapSlice []map[Str]Str
+ NamedSliceSlice []NamedSlice
+ NamedMapSlice []NamedMap
+ NamedStringSlice []NamedString
+}
+
+var deepNestValue = DeepNest{
+ SliceMap: map[Str][]Str{
+ "testSliceMap": []Str{
+ "0",
+ "1",
+ },
+ },
+ SliceMap1: map[Str][]Str{
+ "testSliceMap1": []Str(nil),
+ },
+ SliceMap2: map[Str][]Str{
+ "testSliceMap2": []Str{},
+ },
+ NamedSliceMap: map[Str]NamedSlice{
+ "testNamedSliceMap": NamedSlice{
+ "2",
+ "3",
+ },
+ },
+ NamedMapMap: map[Str]NamedMap{
+ "testNamedMapMap": NamedMap{
+ "key1": "value1",
+ },
+ },
+ MapSlice: []map[Str]Str{
+ map[Str]Str{
+ "testMapSlice": "someValue",
+ },
+ },
+ NamedSliceSlice: []NamedSlice{
+ NamedSlice{
+ "someValue1",
+ "someValue2",
+ },
+ NamedSlice{
+ "someValue3",
+ "someValue4",
+ },
+ },
+ NamedMapSlice: []NamedMap{
+ NamedMap{
+ "key2": "value2",
+ },
+ NamedMap{
+ "key3": "value3",
+ },
+ },
+ NamedStringSlice: []NamedString{
+ "value4", "value5",
+ },
+}
+
+var deepNestString = `{` +
+ `"SliceMap":{` +
+ `"testSliceMap":["0","1"]` +
+ `},` +
+ `"SliceMap1":{` +
+ `"testSliceMap1":null` +
+ `},` +
+ `"SliceMap2":{` +
+ `"testSliceMap2":[]` +
+ `},` +
+ `"NamedSliceMap":{` +
+ `"testNamedSliceMap":["2","3"]` +
+ `},` +
+ `"NamedMapMap":{` +
+ `"testNamedMapMap":{"key1":"value1"}` +
+ `},` +
+ `"MapSlice":[` +
+ `{"testMapSlice":"someValue"}` +
+ `],` +
+ `"NamedSliceSlice":[` +
+ `["someValue1","someValue2"],` +
+ `["someValue3","someValue4"]` +
+ `],` +
+ `"NamedMapSlice":[` +
+ `{"key2":"value2"},` +
+ `{"key3":"value3"}` +
+ `],` +
+ `"NamedStringSlice":["value4","value5"]` +
+ `}`
+
+//easyjson:json
+type Ints []int
+
+var IntsValue = Ints{1, 2, 3, 4, 5}
+
+var IntsString = `[1,2,3,4,5]`
+
+//easyjson:json
+type MapStringString map[string]string
+
+var mapStringStringValue = MapStringString{"a": "b"}
+
+var mapStringStringString = `{"a":"b"}`
+
+type RequiredOptionalStruct struct {
+ FirstName string `json:"first_name,required"`
+ Lastname string `json:"last_name"`
+}
+
+//easyjson:json
+type EncodingFlagsTestMap struct {
+ F map[string]string
+}
+
+//easyjson:json
+type EncodingFlagsTestSlice struct {
+ F []string
+}
+
+type StructWithInterface struct {
+ Field1 int `json:"f1"`
+ Field2 interface{} `json:"f2"`
+ Field3 string `json:"f3"`
+}
+
+type EmbeddedStruct struct {
+ Field1 int `json:"f1"`
+ Field2 string `json:"f2"`
+}
+
+var structWithInterfaceString = `{"f1":1,"f2":{"f1":11,"f2":"22"},"f3":"3"}`
+var structWithInterfaceValueFilled = StructWithInterface{1, &EmbeddedStruct{11, "22"}, "3"}
+
+//easyjson:json
+type MapIntString map[int]string
+
+var mapIntStringValue = MapIntString{3: "hi"}
+var mapIntStringValueString = `{"3":"hi"}`
+
+//easyjson:json
+type MapInt32String map[int32]string
+
+var mapInt32StringValue = MapInt32String{-354634382: "life"}
+var mapInt32StringValueString = `{"-354634382":"life"}`
+
+//easyjson:json
+type MapInt64String map[int64]string
+
+var mapInt64StringValue = MapInt64String{-3546343826724305832: "life"}
+var mapInt64StringValueString = `{"-3546343826724305832":"life"}`
+
+//easyjson:json
+type MapUintString map[uint]string
+
+var mapUintStringValue = MapUintString{42: "life"}
+var mapUintStringValueString = `{"42":"life"}`
+
+//easyjson:json
+type MapUint32String map[uint32]string
+
+var mapUint32StringValue = MapUint32String{354634382: "life"}
+var mapUint32StringValueString = `{"354634382":"life"}`
+
+//easyjson:json
+type MapUint64String map[uint64]string
+
+var mapUint64StringValue = MapUint64String{3546343826724305832: "life"}
+var mapUint64StringValueString = `{"3546343826724305832":"life"}`
+
+//easyjson:json
+type MapUintptrString map[uintptr]string
+
+var mapUintptrStringValue = MapUintptrString{272679208: "obj"}
+var mapUintptrStringValueString = `{"272679208":"obj"}`
+
+type MyInt int
+
+//easyjson:json
+type MapMyIntString map[MyInt]string
+
+var mapMyIntStringValue = MapMyIntString{MyInt(42): "life"}
+var mapMyIntStringValueString = `{"42":"life"}`
+
+//easyjson:json
+type IntKeyedMapStruct struct {
+ Foo MapMyIntString `json:"foo"`
+ Bar map[int16]MapUint32String `json:"bar"`
+}
+
+var intKeyedMapStructValue = IntKeyedMapStruct{
+ Foo: mapMyIntStringValue,
+ Bar: map[int16]MapUint32String{32: mapUint32StringValue},
+}
+var intKeyedMapStructValueString = `{` +
+ `"foo":{"42":"life"},` +
+ `"bar":{"32":{"354634382":"life"}}` +
+ `}`
diff --git a/vendor/github.com/mailru/easyjson/tests/errors.go b/vendor/github.com/mailru/easyjson/tests/errors.go
new file mode 100644
index 000000000..14360fcc2
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/tests/errors.go
@@ -0,0 +1,26 @@
+package tests
+
+//easyjson:json
+type ErrorIntSlice []int
+
+//easyjson:json
+type ErrorBoolSlice []bool
+
+//easyjson:json
+type ErrorUintSlice []uint
+
+//easyjson:json
+type ErrorStruct struct {
+ Int int `json:"int"`
+ String string `json:"string"`
+ Slice []int `json:"slice"`
+ IntSlice []int `json:"int_slice"`
+}
+
+type ErrorNestedStruct struct {
+ ErrorStruct ErrorStruct `json:"error_struct"`
+ Int int `json:"int"`
+}
+
+//easyjson:json
+type ErrorIntMap map[uint32]string
diff --git a/vendor/github.com/mailru/easyjson/tests/errors_test.go b/vendor/github.com/mailru/easyjson/tests/errors_test.go
new file mode 100644
index 000000000..40fa33544
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/tests/errors_test.go
@@ -0,0 +1,285 @@
+package tests
+
+import (
+ "testing"
+
+ "github.com/mailru/easyjson/jlexer"
+)
+
+func TestMultipleErrorsInt(t *testing.T) {
+ for i, test := range []struct {
+ Data []byte
+ Offsets []int
+ }{
+ {
+ Data: []byte(`[1, 2, 3, "4", "5"]`),
+ Offsets: []int{10, 15},
+ },
+ {
+ Data: []byte(`[1, {"2":"3"}, 3, "4"]`),
+ Offsets: []int{4, 18},
+ },
+ {
+ Data: []byte(`[1, "2", "3", "4", "5", "6"]`),
+ Offsets: []int{4, 9, 14, 19, 24},
+ },
+ {
+ Data: []byte(`[1, 2, 3, 4, "5"]`),
+ Offsets: []int{13},
+ },
+ {
+ Data: []byte(`[{"1": "2"}]`),
+ Offsets: []int{1},
+ },
+ } {
+ l := jlexer.Lexer{
+ Data: test.Data,
+ UseMultipleErrors: true,
+ }
+
+ var v ErrorIntSlice
+
+ v.UnmarshalEasyJSON(&l)
+
+ errors := l.GetNonFatalErrors()
+
+ if len(errors) != len(test.Offsets) {
+ t.Errorf("[%d] TestMultipleErrorsInt(): errornum: want: %d, got %d", i, len(test.Offsets), len(errors))
+ return
+ }
+
+ for ii, e := range errors {
+ if e.Offset != test.Offsets[ii] {
+ t.Errorf("[%d] TestMultipleErrorsInt(): offset[%d]: want %d, got %d", i, ii, test.Offsets[ii], e.Offset)
+ }
+ }
+ }
+}
+
+func TestMultipleErrorsBool(t *testing.T) {
+ for i, test := range []struct {
+ Data []byte
+ Offsets []int
+ }{
+ {
+ Data: []byte(`[true, false, true, false]`),
+ },
+ {
+ Data: []byte(`["test", "value", "lol", "1"]`),
+ Offsets: []int{1, 9, 18, 25},
+ },
+ {
+ Data: []byte(`[true, 42, {"a":"b", "c":"d"}, false]`),
+ Offsets: []int{7, 11},
+ },
+ } {
+ l := jlexer.Lexer{
+ Data: test.Data,
+ UseMultipleErrors: true,
+ }
+
+ var v ErrorBoolSlice
+ v.UnmarshalEasyJSON(&l)
+
+ errors := l.GetNonFatalErrors()
+
+ if len(errors) != len(test.Offsets) {
+ t.Errorf("[%d] TestMultipleErrorsBool(): errornum: want: %d, got %d", i, len(test.Offsets), len(errors))
+ return
+ }
+ for ii, e := range errors {
+ if e.Offset != test.Offsets[ii] {
+ t.Errorf("[%d] TestMultipleErrorsBool(): offset[%d]: want %d, got %d", i, ii, test.Offsets[ii], e.Offset)
+ }
+ }
+ }
+}
+
+func TestMultipleErrorsUint(t *testing.T) {
+ for i, test := range []struct {
+ Data []byte
+ Offsets []int
+ }{
+ {
+ Data: []byte(`[42, 42, 42]`),
+ },
+ {
+ Data: []byte(`[17, "42", 32]`),
+ Offsets: []int{5},
+ },
+ {
+ Data: []byte(`["zz", "zz"]`),
+ Offsets: []int{1, 7},
+ },
+ {
+ Data: []byte(`[{}, 42]`),
+ Offsets: []int{1},
+ },
+ } {
+ l := jlexer.Lexer{
+ Data: test.Data,
+ UseMultipleErrors: true,
+ }
+
+ var v ErrorUintSlice
+ v.UnmarshalEasyJSON(&l)
+
+ errors := l.GetNonFatalErrors()
+
+ if len(errors) != len(test.Offsets) {
+ t.Errorf("[%d] TestMultipleErrorsUint(): errornum: want: %d, got %d", i, len(test.Offsets), len(errors))
+ return
+ }
+ for ii, e := range errors {
+ if e.Offset != test.Offsets[ii] {
+ t.Errorf("[%d] TestMultipleErrorsUint(): offset[%d]: want %d, got %d", i, ii, test.Offsets[ii], e.Offset)
+ }
+ }
+ }
+}
+
+func TestMultipleErrorsStruct(t *testing.T) {
+ for i, test := range []struct {
+ Data []byte
+ Offsets []int
+ }{
+ {
+ Data: []byte(`{"string": "test", "slice":[42, 42, 42], "int_slice":[1, 2, 3]}`),
+ },
+ {
+ Data: []byte(`{"string": {"test": "test"}, "slice":[42, 42, 42], "int_slice":["1", 2, 3]}`),
+ Offsets: []int{11, 64},
+ },
+ {
+ Data: []byte(`{"slice": [42, 42], "string": {"test": "test"}, "int_slice":["1", "2", 3]}`),
+ Offsets: []int{30, 61, 66},
+ },
+ {
+ Data: []byte(`{"string": "test", "slice": {}}`),
+ Offsets: []int{28},
+ },
+ {
+ Data: []byte(`{"slice":5, "string" : "test"}`),
+ Offsets: []int{9},
+ },
+ {
+ Data: []byte(`{"slice" : "test", "string" : "test"}`),
+ Offsets: []int{11},
+ },
+ {
+ Data: []byte(`{"slice": "", "string" : {}, "int":{}}`),
+ Offsets: []int{10, 25, 35},
+ },
+ } {
+ l := jlexer.Lexer{
+ Data: test.Data,
+ UseMultipleErrors: true,
+ }
+ var v ErrorStruct
+ v.UnmarshalEasyJSON(&l)
+
+ errors := l.GetNonFatalErrors()
+
+ if len(errors) != len(test.Offsets) {
+ t.Errorf("[%d] TestMultipleErrorsStruct(): errornum: want: %d, got %d", i, len(test.Offsets), len(errors))
+ return
+ }
+ for ii, e := range errors {
+ if e.Offset != test.Offsets[ii] {
+ t.Errorf("[%d] TestMultipleErrorsStruct(): offset[%d]: want %d, got %d", i, ii, test.Offsets[ii], e.Offset)
+ }
+ }
+ }
+}
+
+func TestMultipleErrorsNestedStruct(t *testing.T) {
+ for i, test := range []struct {
+ Data []byte
+ Offsets []int
+ }{
+ {
+ Data: []byte(`{"error_struct":{}}`),
+ },
+ {
+ Data: []byte(`{"error_struct":5}`),
+ Offsets: []int{16},
+ },
+ {
+ Data: []byte(`{"error_struct":[]}`),
+ Offsets: []int{16},
+ },
+ {
+ Data: []byte(`{"error_struct":{"int":{}}}`),
+ Offsets: []int{23},
+ },
+ {
+ Data: []byte(`{"error_struct":{"int_slice":{}}, "int":4}`),
+ Offsets: []int{29},
+ },
+ {
+ Data: []byte(`{"error_struct":{"int_slice":["1", 2, "3"]}, "int":[]}`),
+ Offsets: []int{30, 38, 51},
+ },
+ } {
+ l := jlexer.Lexer{
+ Data: test.Data,
+ UseMultipleErrors: true,
+ }
+ var v ErrorNestedStruct
+ v.UnmarshalEasyJSON(&l)
+
+ errors := l.GetNonFatalErrors()
+
+ if len(errors) != len(test.Offsets) {
+ t.Errorf("[%d] TestMultipleErrorsNestedStruct(): errornum: want: %d, got %d", i, len(test.Offsets), len(errors))
+ return
+ }
+ for ii, e := range errors {
+ if e.Offset != test.Offsets[ii] {
+ t.Errorf("[%d] TestMultipleErrorsNestedStruct(): offset[%d]: want %d, got %d", i, ii, test.Offsets[ii], e.Offset)
+ }
+ }
+ }
+}
+
+func TestMultipleErrorsIntMap(t *testing.T) {
+ for i, test := range []struct {
+ Data []byte
+ Offsets []int
+ }{
+ {
+ Data: []byte(`{"a":"NumErr"}`),
+ Offsets: []int{1},
+ },
+ {
+ Data: []byte(`{"":"ErrSyntax"}`),
+ Offsets: []int{1},
+ },
+ {
+ Data: []byte(`{"a":"NumErr","33147483647":"ErrRange","-1":"ErrRange"}`),
+ Offsets: []int{1, 14, 39},
+ },
+ } {
+ l := jlexer.Lexer{
+ Data: test.Data,
+ UseMultipleErrors: true,
+ }
+
+ var v ErrorIntMap
+
+ v.UnmarshalEasyJSON(&l)
+
+ errors := l.GetNonFatalErrors()
+
+ if len(errors) != len(test.Offsets) {
+ t.Errorf("[%d] TestMultipleErrorsInt(): errornum: want: %d, got %d", i, len(test.Offsets), len(errors))
+ return
+ }
+
+ for ii, e := range errors {
+ if e.Offset != test.Offsets[ii] {
+ t.Errorf("[%d] TestMultipleErrorsInt(): offset[%d]: want %d, got %d", i, ii, test.Offsets[ii], e.Offset)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/mailru/easyjson/tests/named_type.go b/vendor/github.com/mailru/easyjson/tests/named_type.go
new file mode 100644
index 000000000..0ff8dfeb3
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/tests/named_type.go
@@ -0,0 +1,22 @@
+package tests
+
+//easyjson:json
+type NamedType struct {
+ Inner struct {
+ // easyjson is mistakenly naming the type of this field 'tests.MyString' in the generated output
+ // something about a named type inside an anonmymous type is triggering this bug
+ Field MyString `tag:"value"`
+ Field2 int "tag:\"value with ` in it\""
+ }
+}
+
+type MyString string
+
+var namedTypeValue NamedType
+
+func init() {
+ namedTypeValue.Inner.Field = "test"
+ namedTypeValue.Inner.Field2 = 123
+}
+
+var namedTypeValueString = `{"Inner":{"Field":"test","Field2":123}}`
diff --git a/vendor/github.com/mailru/easyjson/tests/nested_easy.go b/vendor/github.com/mailru/easyjson/tests/nested_easy.go
new file mode 100644
index 000000000..6309a49f9
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/tests/nested_easy.go
@@ -0,0 +1,25 @@
+package tests
+
+import (
+ "github.com/mailru/easyjson"
+ "github.com/mailru/easyjson/jwriter"
+)
+
+//easyjson:json
+type NestedInterfaces struct {
+ Value interface{}
+ Slice []interface{}
+ Map map[string]interface{}
+}
+
+type NestedEasyMarshaler struct {
+ EasilyMarshaled bool
+}
+
+var _ easyjson.Marshaler = &NestedEasyMarshaler{}
+
+func (i *NestedEasyMarshaler) MarshalEasyJSON(w *jwriter.Writer) {
+ // We use this method only to indicate that easyjson.Marshaler
+ // interface was really used while encoding.
+ i.EasilyMarshaled = true
+} \ No newline at end of file
diff --git a/vendor/github.com/mailru/easyjson/tests/nothing.go b/vendor/github.com/mailru/easyjson/tests/nothing.go
new file mode 100644
index 000000000..35334f5f5
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/tests/nothing.go
@@ -0,0 +1,3 @@
+package tests
+
+// No structs in this file
diff --git a/vendor/github.com/mailru/easyjson/tests/omitempty.go b/vendor/github.com/mailru/easyjson/tests/omitempty.go
new file mode 100644
index 000000000..ede5eb95a
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/tests/omitempty.go
@@ -0,0 +1,12 @@
+package tests
+
+//easyjson:json
+type OmitEmptyDefault struct {
+ Field string
+ Str string
+ Str1 string `json:"s,!omitempty"`
+ Str2 string `json:",!omitempty"`
+}
+
+var omitEmptyDefaultValue = OmitEmptyDefault{Field: "test"}
+var omitEmptyDefaultString = `{"Field":"test","s":"","Str2":""}`
diff --git a/vendor/github.com/mailru/easyjson/tests/opt_test.go b/vendor/github.com/mailru/easyjson/tests/opt_test.go
new file mode 100644
index 000000000..bdd32aa4a
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/tests/opt_test.go
@@ -0,0 +1,70 @@
+package tests
+
+import (
+ "math"
+ "reflect"
+ "testing"
+
+ "encoding/json"
+
+ "github.com/mailru/easyjson/opt"
+)
+
+// This struct type must NOT have a generated marshaler
+type OptsVanilla struct {
+ Int opt.Int
+ Uint opt.Uint
+
+ Int8 opt.Int8
+ Int16 opt.Int16
+ Int32 opt.Int32
+ Int64 opt.Int64
+
+ Uint8 opt.Uint8
+ Uint16 opt.Uint16
+ Uint32 opt.Uint32
+ Uint64 opt.Uint64
+
+ Float32 opt.Float32
+ Float64 opt.Float64
+
+ Bool opt.Bool
+ String opt.String
+}
+
+var optsVanillaValue = OptsVanilla{
+ Int: opt.OInt(-123),
+ Uint: opt.OUint(123),
+
+ Int8: opt.OInt8(math.MaxInt8),
+ Int16: opt.OInt16(math.MaxInt16),
+ Int32: opt.OInt32(math.MaxInt32),
+ Int64: opt.OInt64(math.MaxInt64),
+
+ Uint8: opt.OUint8(math.MaxUint8),
+ Uint16: opt.OUint16(math.MaxUint16),
+ Uint32: opt.OUint32(math.MaxUint32),
+ Uint64: opt.OUint64(math.MaxUint64),
+
+ Float32: opt.OFloat32(math.MaxFloat32),
+ Float64: opt.OFloat64(math.MaxFloat64),
+
+ Bool: opt.OBool(true),
+ String: opt.OString("foo"),
+}
+
+func TestOptsVanilla(t *testing.T) {
+ data, err := json.Marshal(optsVanillaValue)
+ if err != nil {
+ t.Errorf("Failed to marshal vanilla opts: %v", err)
+ }
+
+ var ov OptsVanilla
+ if err := json.Unmarshal(data, &ov); err != nil {
+ t.Errorf("Failed to unmarshal vanilla opts: %v", err)
+ }
+
+ if !reflect.DeepEqual(optsVanillaValue, ov) {
+ t.Errorf("Vanilla opts unmarshal returned invalid value %+v, want %+v", ov, optsVanillaValue)
+ }
+}
diff --git a/vendor/github.com/mailru/easyjson/tests/required_test.go b/vendor/github.com/mailru/easyjson/tests/required_test.go
new file mode 100644
index 000000000..8cc743d8c
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/tests/required_test.go
@@ -0,0 +1,28 @@
+package tests
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestRequiredField(t *testing.T) {
+ cases := []struct{ json, errorMessage string }{
+ {`{"first_name":"Foo", "last_name": "Bar"}`, ""},
+ {`{"last_name":"Bar"}`, "key 'first_name' is required"},
+ {"{}", "key 'first_name' is required"},
+ }
+
+ for _, tc := range cases {
+ var v RequiredOptionalStruct
+ err := v.UnmarshalJSON([]byte(tc.json))
+ if tc.errorMessage == "" {
+ if err != nil {
+ t.Errorf("%s. UnmarshalJSON didn`t expect error: %v", tc.json, err)
+ }
+ } else {
+ if fmt.Sprintf("%v", err) != tc.errorMessage {
+ t.Errorf("%s. UnmarshalJSON expected error: %v. got: %v", tc.json, tc.errorMessage, err)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/mailru/easyjson/tests/snake.go b/vendor/github.com/mailru/easyjson/tests/snake.go
new file mode 100644
index 000000000..9b64f8612
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/tests/snake.go
@@ -0,0 +1,10 @@
+package tests
+
+//easyjson:json
+type SnakeStruct struct {
+ WeirdHTTPStuff bool
+ CustomNamedField string `json:"cUsToM"`
+}
+
+var snakeStructValue SnakeStruct
+var snakeStructString = `{"weird_http_stuff":false,"cUsToM":""}`
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/.travis.yml b/vendor/github.com/matttproud/golang_protobuf_extensions/.travis.yml
index f1309c9f8..5db258039 100644
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/.travis.yml
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/.travis.yml
@@ -1,2 +1,8 @@
language: go
+go:
+ - 1.5
+ - 1.6
+ - tip
+
+script: make -f Makefile.TRAVIS
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/Makefile.TRAVIS b/vendor/github.com/matttproud/golang_protobuf_extensions/Makefile.TRAVIS
new file mode 100644
index 000000000..24f9649e2
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/Makefile.TRAVIS
@@ -0,0 +1,15 @@
+all: build cover test vet
+
+build:
+ go build -v ./...
+
+cover: test
+ $(MAKE) -C pbutil cover
+
+test: build
+ go test -v ./...
+
+vet: build
+ go vet -v ./...
+
+.PHONY: build cover test vet
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
new file mode 100644
index 000000000..e16fb946b
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
@@ -0,0 +1 @@
+cover.dat
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
new file mode 100644
index 000000000..81be21437
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
@@ -0,0 +1,7 @@
+all:
+
+cover:
+ go test -cover -v -coverprofile=cover.dat ./...
+ go tool cover -func cover.dat
+
+.PHONY: cover
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go
index 5c463722d..a793c8856 100644
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go
@@ -18,14 +18,15 @@ import (
"bytes"
"testing"
- . "github.com/golang/protobuf/proto"
- . "github.com/golang/protobuf/proto/testdata"
+ "github.com/golang/protobuf/proto"
+
+ . "github.com/matttproud/golang_protobuf_extensions/testdata"
)
func TestWriteDelimited(t *testing.T) {
t.Parallel()
for _, test := range []struct {
- msg Message
+ msg proto.Message
buf []byte
n int
err error
@@ -42,7 +43,7 @@ func TestWriteDelimited(t *testing.T) {
},
{
msg: &Strings{
- StringField: String(`This is my gigantic, unhappy string. It exceeds
+ StringField: proto.String(`This is my gigantic, unhappy string. It exceeds
the encoding size of a single byte varint. We are using it to fuzz test the
correctness of the header decoding mechanisms, which may prove problematic.
I expect it may. Let's hope you enjoy testing as much as we do.`),
@@ -82,7 +83,7 @@ func TestReadDelimited(t *testing.T) {
t.Parallel()
for _, test := range []struct {
buf []byte
- msg Message
+ msg proto.Message
n int
err error
}{
@@ -116,7 +117,7 @@ func TestReadDelimited(t *testing.T) {
106, 111, 121, 32, 116, 101, 115, 116, 105, 110, 103, 32, 97, 115, 32,
109, 117, 99, 104, 32, 97, 115, 32, 119, 101, 32, 100, 111, 46},
msg: &Strings{
- StringField: String(`This is my gigantic, unhappy string. It exceeds
+ StringField: proto.String(`This is my gigantic, unhappy string. It exceeds
the encoding size of a single byte varint. We are using it to fuzz test the
correctness of the header decoding mechanisms, which may prove problematic.
I expect it may. Let's hope you enjoy testing as much as we do.`),
@@ -124,12 +125,12 @@ I expect it may. Let's hope you enjoy testing as much as we do.`),
n: 271,
},
} {
- msg := Clone(test.msg)
+ msg := proto.Clone(test.msg)
msg.Reset()
if n, err := ReadDelimited(bytes.NewBuffer(test.buf), msg); n != test.n || err != test.err {
t.Fatalf("ReadDelimited(%v, msg) = %v, %v; want %v, %v", test.buf, n, err, test.n, test.err)
}
- if !Equal(msg, test.msg) {
+ if !proto.Equal(msg, test.msg) {
t.Fatalf("ReadDelimited(%v, msg); msg = %v; want %v", test.buf, msg, test.msg)
}
}
@@ -137,12 +138,12 @@ I expect it may. Let's hope you enjoy testing as much as we do.`),
func TestEndToEndValid(t *testing.T) {
t.Parallel()
- for _, test := range [][]Message{
+ for _, test := range [][]proto.Message{
{&Empty{}},
{&GoEnum{Foo: FOO_FOO1.Enum()}, &Empty{}, &GoEnum{Foo: FOO_FOO1.Enum()}},
{&GoEnum{Foo: FOO_FOO1.Enum()}},
{&Strings{
- StringField: String(`This is my gigantic, unhappy string. It exceeds
+ StringField: proto.String(`This is my gigantic, unhappy string. It exceeds
the encoding size of a single byte varint. We are using it to fuzz test the
correctness of the header decoding mechanisms, which may prove problematic.
I expect it may. Let's hope you enjoy testing as much as we do.`),
@@ -161,12 +162,12 @@ I expect it may. Let's hope you enjoy testing as much as we do.`),
}
var read int
for i, msg := range test {
- out := Clone(msg)
+ out := proto.Clone(msg)
out.Reset()
n, _ := ReadDelimited(&buf, out)
// Decide to do EOF checking?
read += n
- if !Equal(out, msg) {
+ if !proto.Equal(out, msg) {
t.Fatalf("out = %v; want %v[%d] = %#v", out, test, i, msg)
}
}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/fixtures_test.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/fixtures_test.go
deleted file mode 100644
index d6d9b2559..000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/fixtures_test.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// http://github.com/golang/protobuf/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package pbutil
-
-import (
- . "github.com/golang/protobuf/proto"
- . "github.com/golang/protobuf/proto/testdata"
-)
-
-// FROM https://github.com/golang/protobuf/blob/master/proto/all_test.go.
-
-func initGoTestField() *GoTestField {
- f := new(GoTestField)
- f.Label = String("label")
- f.Type = String("type")
- return f
-}
-
-// These are all structurally equivalent but the tag numbers differ.
-// (It's remarkable that required, optional, and repeated all have
-// 8 letters.)
-func initGoTest_RequiredGroup() *GoTest_RequiredGroup {
- return &GoTest_RequiredGroup{
- RequiredField: String("required"),
- }
-}
-
-func initGoTest_OptionalGroup() *GoTest_OptionalGroup {
- return &GoTest_OptionalGroup{
- RequiredField: String("optional"),
- }
-}
-
-func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup {
- return &GoTest_RepeatedGroup{
- RequiredField: String("repeated"),
- }
-}
-
-func initGoTest(setdefaults bool) *GoTest {
- pb := new(GoTest)
- if setdefaults {
- pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted)
- pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted)
- pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted)
- pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted)
- pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted)
- pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted)
- pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted)
- pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted)
- pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted)
- pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted)
- pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted
- pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted)
- pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted)
- }
-
- pb.Kind = GoTest_TIME.Enum()
- pb.RequiredField = initGoTestField()
- pb.F_BoolRequired = Bool(true)
- pb.F_Int32Required = Int32(3)
- pb.F_Int64Required = Int64(6)
- pb.F_Fixed32Required = Uint32(32)
- pb.F_Fixed64Required = Uint64(64)
- pb.F_Uint32Required = Uint32(3232)
- pb.F_Uint64Required = Uint64(6464)
- pb.F_FloatRequired = Float32(3232)
- pb.F_DoubleRequired = Float64(6464)
- pb.F_StringRequired = String("string")
- pb.F_BytesRequired = []byte("bytes")
- pb.F_Sint32Required = Int32(-32)
- pb.F_Sint64Required = Int64(-64)
- pb.Requiredgroup = initGoTest_RequiredGroup()
-
- return pb
-}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/testdata/README.THIRD_PARTY b/vendor/github.com/matttproud/golang_protobuf_extensions/testdata/README.THIRD_PARTY
new file mode 100644
index 000000000..0c1f84246
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/testdata/README.THIRD_PARTY
@@ -0,0 +1,4 @@
+test.pb.go and test.proto are third-party data.
+
+SOURCE: https://github.com/golang/protobuf
+REVISION: bf531ff1a004f24ee53329dfd5ce0b41bfdc17df
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/testdata/test.pb.go b/vendor/github.com/matttproud/golang_protobuf_extensions/testdata/test.pb.go
new file mode 100644
index 000000000..772adcb62
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/testdata/test.pb.go
@@ -0,0 +1,4029 @@
+// Code generated by protoc-gen-go.
+// source: test.proto
+// DO NOT EDIT!
+
+/*
+Package testdata is a generated protocol buffer package.
+
+It is generated from these files:
+ test.proto
+
+It has these top-level messages:
+ GoEnum
+ GoTestField
+ GoTest
+ GoSkipTest
+ NonPackedTest
+ PackedTest
+ MaxTag
+ OldMessage
+ NewMessage
+ InnerMessage
+ OtherMessage
+ RequiredInnerMessage
+ MyMessage
+ Ext
+ ComplexExtension
+ DefaultsMessage
+ MyMessageSet
+ Empty
+ MessageList
+ Strings
+ Defaults
+ SubDefaults
+ RepeatedEnum
+ MoreRepeated
+ GroupOld
+ GroupNew
+ FloatingPoint
+ MessageWithMap
+ Oneof
+ Communique
+*/
+package testdata
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+const _ = proto.ProtoPackageIsVersion1
+
+type FOO int32
+
+const (
+ FOO_FOO1 FOO = 1
+)
+
+var FOO_name = map[int32]string{
+ 1: "FOO1",
+}
+var FOO_value = map[string]int32{
+ "FOO1": 1,
+}
+
+func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+}
+func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+}
+func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO")
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+}
+func (FOO) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+// An enum, for completeness.
+type GoTest_KIND int32
+
+const (
+ GoTest_VOID GoTest_KIND = 0
+ // Basic types
+ GoTest_BOOL GoTest_KIND = 1
+ GoTest_BYTES GoTest_KIND = 2
+ GoTest_FINGERPRINT GoTest_KIND = 3
+ GoTest_FLOAT GoTest_KIND = 4
+ GoTest_INT GoTest_KIND = 5
+ GoTest_STRING GoTest_KIND = 6
+ GoTest_TIME GoTest_KIND = 7
+ // Groupings
+ GoTest_TUPLE GoTest_KIND = 8
+ GoTest_ARRAY GoTest_KIND = 9
+ GoTest_MAP GoTest_KIND = 10
+ // Table types
+ GoTest_TABLE GoTest_KIND = 11
+ // Functions
+ GoTest_FUNCTION GoTest_KIND = 12
+)
+
+var GoTest_KIND_name = map[int32]string{
+ 0: "VOID",
+ 1: "BOOL",
+ 2: "BYTES",
+ 3: "FINGERPRINT",
+ 4: "FLOAT",
+ 5: "INT",
+ 6: "STRING",
+ 7: "TIME",
+ 8: "TUPLE",
+ 9: "ARRAY",
+ 10: "MAP",
+ 11: "TABLE",
+ 12: "FUNCTION",
+}
+var GoTest_KIND_value = map[string]int32{
+ "VOID": 0,
+ "BOOL": 1,
+ "BYTES": 2,
+ "FINGERPRINT": 3,
+ "FLOAT": 4,
+ "INT": 5,
+ "STRING": 6,
+ "TIME": 7,
+ "TUPLE": 8,
+ "ARRAY": 9,
+ "MAP": 10,
+ "TABLE": 11,
+ "FUNCTION": 12,
+}
+
+func (x GoTest_KIND) Enum() *GoTest_KIND {
+ p := new(GoTest_KIND)
+ *p = x
+ return p
+}
+func (x GoTest_KIND) String() string {
+ return proto.EnumName(GoTest_KIND_name, int32(x))
+}
+func (x *GoTest_KIND) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND")
+ if err != nil {
+ return err
+ }
+ *x = GoTest_KIND(value)
+ return nil
+}
+func (GoTest_KIND) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
+
+type MyMessage_Color int32
+
+const (
+ MyMessage_RED MyMessage_Color = 0
+ MyMessage_GREEN MyMessage_Color = 1
+ MyMessage_BLUE MyMessage_Color = 2
+)
+
+var MyMessage_Color_name = map[int32]string{
+ 0: "RED",
+ 1: "GREEN",
+ 2: "BLUE",
+}
+var MyMessage_Color_value = map[string]int32{
+ "RED": 0,
+ "GREEN": 1,
+ "BLUE": 2,
+}
+
+func (x MyMessage_Color) Enum() *MyMessage_Color {
+ p := new(MyMessage_Color)
+ *p = x
+ return p
+}
+func (x MyMessage_Color) String() string {
+ return proto.EnumName(MyMessage_Color_name, int32(x))
+}
+func (x *MyMessage_Color) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color")
+ if err != nil {
+ return err
+ }
+ *x = MyMessage_Color(value)
+ return nil
+}
+func (MyMessage_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 0} }
+
+type DefaultsMessage_DefaultsEnum int32
+
+const (
+ DefaultsMessage_ZERO DefaultsMessage_DefaultsEnum = 0
+ DefaultsMessage_ONE DefaultsMessage_DefaultsEnum = 1
+ DefaultsMessage_TWO DefaultsMessage_DefaultsEnum = 2
+)
+
+var DefaultsMessage_DefaultsEnum_name = map[int32]string{
+ 0: "ZERO",
+ 1: "ONE",
+ 2: "TWO",
+}
+var DefaultsMessage_DefaultsEnum_value = map[string]int32{
+ "ZERO": 0,
+ "ONE": 1,
+ "TWO": 2,
+}
+
+func (x DefaultsMessage_DefaultsEnum) Enum() *DefaultsMessage_DefaultsEnum {
+ p := new(DefaultsMessage_DefaultsEnum)
+ *p = x
+ return p
+}
+func (x DefaultsMessage_DefaultsEnum) String() string {
+ return proto.EnumName(DefaultsMessage_DefaultsEnum_name, int32(x))
+}
+func (x *DefaultsMessage_DefaultsEnum) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(DefaultsMessage_DefaultsEnum_value, data, "DefaultsMessage_DefaultsEnum")
+ if err != nil {
+ return err
+ }
+ *x = DefaultsMessage_DefaultsEnum(value)
+ return nil
+}
+func (DefaultsMessage_DefaultsEnum) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{15, 0}
+}
+
+type Defaults_Color int32
+
+const (
+ Defaults_RED Defaults_Color = 0
+ Defaults_GREEN Defaults_Color = 1
+ Defaults_BLUE Defaults_Color = 2
+)
+
+var Defaults_Color_name = map[int32]string{
+ 0: "RED",
+ 1: "GREEN",
+ 2: "BLUE",
+}
+var Defaults_Color_value = map[string]int32{
+ "RED": 0,
+ "GREEN": 1,
+ "BLUE": 2,
+}
+
+func (x Defaults_Color) Enum() *Defaults_Color {
+ p := new(Defaults_Color)
+ *p = x
+ return p
+}
+func (x Defaults_Color) String() string {
+ return proto.EnumName(Defaults_Color_name, int32(x))
+}
+func (x *Defaults_Color) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color")
+ if err != nil {
+ return err
+ }
+ *x = Defaults_Color(value)
+ return nil
+}
+func (Defaults_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{20, 0} }
+
+type RepeatedEnum_Color int32
+
+const (
+ RepeatedEnum_RED RepeatedEnum_Color = 1
+)
+
+var RepeatedEnum_Color_name = map[int32]string{
+ 1: "RED",
+}
+var RepeatedEnum_Color_value = map[string]int32{
+ "RED": 1,
+}
+
+func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color {
+ p := new(RepeatedEnum_Color)
+ *p = x
+ return p
+}
+func (x RepeatedEnum_Color) String() string {
+ return proto.EnumName(RepeatedEnum_Color_name, int32(x))
+}
+func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color")
+ if err != nil {
+ return err
+ }
+ *x = RepeatedEnum_Color(value)
+ return nil
+}
+func (RepeatedEnum_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{22, 0} }
+
+type GoEnum struct {
+ Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoEnum) Reset() { *m = GoEnum{} }
+func (m *GoEnum) String() string { return proto.CompactTextString(m) }
+func (*GoEnum) ProtoMessage() {}
+func (*GoEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *GoEnum) GetFoo() FOO {
+ if m != nil && m.Foo != nil {
+ return *m.Foo
+ }
+ return FOO_FOO1
+}
+
+type GoTestField struct {
+ Label *string `protobuf:"bytes,1,req,name=Label,json=label" json:"Label,omitempty"`
+ Type *string `protobuf:"bytes,2,req,name=Type,json=type" json:"Type,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTestField) Reset() { *m = GoTestField{} }
+func (m *GoTestField) String() string { return proto.CompactTextString(m) }
+func (*GoTestField) ProtoMessage() {}
+func (*GoTestField) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *GoTestField) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+}
+
+func (m *GoTestField) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+type GoTest struct {
+ // Some typical parameters
+ Kind *GoTest_KIND `protobuf:"varint,1,req,name=Kind,json=kind,enum=testdata.GoTest_KIND" json:"Kind,omitempty"`
+ Table *string `protobuf:"bytes,2,opt,name=Table,json=table" json:"Table,omitempty"`
+ Param *int32 `protobuf:"varint,3,opt,name=Param,json=param" json:"Param,omitempty"`
+ // Required, repeated and optional foreign fields.
+ RequiredField *GoTestField `protobuf:"bytes,4,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"`
+ RepeatedField []*GoTestField `protobuf:"bytes,5,rep,name=RepeatedField,json=repeatedField" json:"RepeatedField,omitempty"`
+ OptionalField *GoTestField `protobuf:"bytes,6,opt,name=OptionalField,json=optionalField" json:"OptionalField,omitempty"`
+ // Required fields of all basic types
+ F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required,json=fBoolRequired" json:"F_Bool_required,omitempty"`
+ F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required,json=fInt32Required" json:"F_Int32_required,omitempty"`
+ F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required,json=fInt64Required" json:"F_Int64_required,omitempty"`
+ F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required,json=fFixed32Required" json:"F_Fixed32_required,omitempty"`
+ F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required,json=fFixed64Required" json:"F_Fixed64_required,omitempty"`
+ F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required,json=fUint32Required" json:"F_Uint32_required,omitempty"`
+ F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required,json=fUint64Required" json:"F_Uint64_required,omitempty"`
+ F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required,json=fFloatRequired" json:"F_Float_required,omitempty"`
+ F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required,json=fDoubleRequired" json:"F_Double_required,omitempty"`
+ F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required,json=fStringRequired" json:"F_String_required,omitempty"`
+ F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required,json=fBytesRequired" json:"F_Bytes_required,omitempty"`
+ F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required,json=fSint32Required" json:"F_Sint32_required,omitempty"`
+ F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required,json=fSint64Required" json:"F_Sint64_required,omitempty"`
+ // Repeated fields of all basic types
+ F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated,json=fBoolRepeated" json:"F_Bool_repeated,omitempty"`
+ F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated,json=fInt32Repeated" json:"F_Int32_repeated,omitempty"`
+ F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated,json=fInt64Repeated" json:"F_Int64_repeated,omitempty"`
+ F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated,json=fFixed32Repeated" json:"F_Fixed32_repeated,omitempty"`
+ F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated,json=fFixed64Repeated" json:"F_Fixed64_repeated,omitempty"`
+ F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated,json=fUint32Repeated" json:"F_Uint32_repeated,omitempty"`
+ F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated,json=fUint64Repeated" json:"F_Uint64_repeated,omitempty"`
+ F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated,json=fFloatRepeated" json:"F_Float_repeated,omitempty"`
+ F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated,json=fDoubleRepeated" json:"F_Double_repeated,omitempty"`
+ F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated,json=fStringRepeated" json:"F_String_repeated,omitempty"`
+ F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated,json=fBytesRepeated" json:"F_Bytes_repeated,omitempty"`
+ F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated,json=fSint32Repeated" json:"F_Sint32_repeated,omitempty"`
+ F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated,json=fSint64Repeated" json:"F_Sint64_repeated,omitempty"`
+ // Optional fields of all basic types
+ F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional,json=fBoolOptional" json:"F_Bool_optional,omitempty"`
+ F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional,json=fInt32Optional" json:"F_Int32_optional,omitempty"`
+ F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional,json=fInt64Optional" json:"F_Int64_optional,omitempty"`
+ F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional,json=fFixed32Optional" json:"F_Fixed32_optional,omitempty"`
+ F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional,json=fFixed64Optional" json:"F_Fixed64_optional,omitempty"`
+ F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional,json=fUint32Optional" json:"F_Uint32_optional,omitempty"`
+ F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional,json=fUint64Optional" json:"F_Uint64_optional,omitempty"`
+ F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional,json=fFloatOptional" json:"F_Float_optional,omitempty"`
+ F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional,json=fDoubleOptional" json:"F_Double_optional,omitempty"`
+ F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional,json=fStringOptional" json:"F_String_optional,omitempty"`
+ F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional,json=fBytesOptional" json:"F_Bytes_optional,omitempty"`
+ F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional,json=fSint32Optional" json:"F_Sint32_optional,omitempty"`
+ F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional,json=fSint64Optional" json:"F_Sint64_optional,omitempty"`
+ // Default-valued fields of all basic types
+ F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,json=fBoolDefaulted,def=1" json:"F_Bool_defaulted,omitempty"`
+ F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,json=fInt32Defaulted,def=32" json:"F_Int32_defaulted,omitempty"`
+ F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,json=fInt64Defaulted,def=64" json:"F_Int64_defaulted,omitempty"`
+ F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,json=fFixed32Defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"`
+ F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,json=fFixed64Defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"`
+ F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,json=fUint32Defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"`
+ F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,json=fUint64Defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"`
+ F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,json=fFloatDefaulted,def=314159" json:"F_Float_defaulted,omitempty"`
+ F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,json=fDoubleDefaulted,def=271828" json:"F_Double_defaulted,omitempty"`
+ F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,json=fStringDefaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"`
+ F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,json=fBytesDefaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"`
+ F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,json=fSint32Defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"`
+ F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,json=fSint64Defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"`
+ // Packed repeated fields (no string or bytes).
+ F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed,json=fBoolRepeatedPacked" json:"F_Bool_repeated_packed,omitempty"`
+ F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed,json=fInt32RepeatedPacked" json:"F_Int32_repeated_packed,omitempty"`
+ F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed,json=fInt64RepeatedPacked" json:"F_Int64_repeated_packed,omitempty"`
+ F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed,json=fFixed32RepeatedPacked" json:"F_Fixed32_repeated_packed,omitempty"`
+ F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed,json=fFixed64RepeatedPacked" json:"F_Fixed64_repeated_packed,omitempty"`
+ F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed,json=fUint32RepeatedPacked" json:"F_Uint32_repeated_packed,omitempty"`
+ F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed,json=fUint64RepeatedPacked" json:"F_Uint64_repeated_packed,omitempty"`
+ F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed,json=fFloatRepeatedPacked" json:"F_Float_repeated_packed,omitempty"`
+ F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed,json=fDoubleRepeatedPacked" json:"F_Double_repeated_packed,omitempty"`
+ F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed,json=fSint32RepeatedPacked" json:"F_Sint32_repeated_packed,omitempty"`
+ F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed,json=fSint64RepeatedPacked" json:"F_Sint64_repeated_packed,omitempty"`
+ Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup,json=requiredgroup" json:"requiredgroup,omitempty"`
+ Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup,json=repeatedgroup" json:"repeatedgroup,omitempty"`
+ Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup,json=optionalgroup" json:"optionalgroup,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTest) Reset() { *m = GoTest{} }
+func (m *GoTest) String() string { return proto.CompactTextString(m) }
+func (*GoTest) ProtoMessage() {}
+func (*GoTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+const Default_GoTest_F_BoolDefaulted bool = true
+const Default_GoTest_F_Int32Defaulted int32 = 32
+const Default_GoTest_F_Int64Defaulted int64 = 64
+const Default_GoTest_F_Fixed32Defaulted uint32 = 320
+const Default_GoTest_F_Fixed64Defaulted uint64 = 640
+const Default_GoTest_F_Uint32Defaulted uint32 = 3200
+const Default_GoTest_F_Uint64Defaulted uint64 = 6400
+const Default_GoTest_F_FloatDefaulted float32 = 314159
+const Default_GoTest_F_DoubleDefaulted float64 = 271828
+const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n"
+
+var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose")
+
+const Default_GoTest_F_Sint32Defaulted int32 = -32
+const Default_GoTest_F_Sint64Defaulted int64 = -64
+
+func (m *GoTest) GetKind() GoTest_KIND {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return GoTest_VOID
+}
+
+func (m *GoTest) GetTable() string {
+ if m != nil && m.Table != nil {
+ return *m.Table
+ }
+ return ""
+}
+
+func (m *GoTest) GetParam() int32 {
+ if m != nil && m.Param != nil {
+ return *m.Param
+ }
+ return 0
+}
+
+func (m *GoTest) GetRequiredField() *GoTestField {
+ if m != nil {
+ return m.RequiredField
+ }
+ return nil
+}
+
+func (m *GoTest) GetRepeatedField() []*GoTestField {
+ if m != nil {
+ return m.RepeatedField
+ }
+ return nil
+}
+
+func (m *GoTest) GetOptionalField() *GoTestField {
+ if m != nil {
+ return m.OptionalField
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_BoolRequired() bool {
+ if m != nil && m.F_BoolRequired != nil {
+ return *m.F_BoolRequired
+ }
+ return false
+}
+
+func (m *GoTest) GetF_Int32Required() int32 {
+ if m != nil && m.F_Int32Required != nil {
+ return *m.F_Int32Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Int64Required() int64 {
+ if m != nil && m.F_Int64Required != nil {
+ return *m.F_Int64Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Fixed32Required() uint32 {
+ if m != nil && m.F_Fixed32Required != nil {
+ return *m.F_Fixed32Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Fixed64Required() uint64 {
+ if m != nil && m.F_Fixed64Required != nil {
+ return *m.F_Fixed64Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Uint32Required() uint32 {
+ if m != nil && m.F_Uint32Required != nil {
+ return *m.F_Uint32Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Uint64Required() uint64 {
+ if m != nil && m.F_Uint64Required != nil {
+ return *m.F_Uint64Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_FloatRequired() float32 {
+ if m != nil && m.F_FloatRequired != nil {
+ return *m.F_FloatRequired
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_DoubleRequired() float64 {
+ if m != nil && m.F_DoubleRequired != nil {
+ return *m.F_DoubleRequired
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_StringRequired() string {
+ if m != nil && m.F_StringRequired != nil {
+ return *m.F_StringRequired
+ }
+ return ""
+}
+
+func (m *GoTest) GetF_BytesRequired() []byte {
+ if m != nil {
+ return m.F_BytesRequired
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint32Required() int32 {
+ if m != nil && m.F_Sint32Required != nil {
+ return *m.F_Sint32Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Sint64Required() int64 {
+ if m != nil && m.F_Sint64Required != nil {
+ return *m.F_Sint64Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_BoolRepeated() []bool {
+ if m != nil {
+ return m.F_BoolRepeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Int32Repeated() []int32 {
+ if m != nil {
+ return m.F_Int32Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Int64Repeated() []int64 {
+ if m != nil {
+ return m.F_Int64Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Fixed32Repeated() []uint32 {
+ if m != nil {
+ return m.F_Fixed32Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Fixed64Repeated() []uint64 {
+ if m != nil {
+ return m.F_Fixed64Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Uint32Repeated() []uint32 {
+ if m != nil {
+ return m.F_Uint32Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Uint64Repeated() []uint64 {
+ if m != nil {
+ return m.F_Uint64Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_FloatRepeated() []float32 {
+ if m != nil {
+ return m.F_FloatRepeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_DoubleRepeated() []float64 {
+ if m != nil {
+ return m.F_DoubleRepeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_StringRepeated() []string {
+ if m != nil {
+ return m.F_StringRepeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_BytesRepeated() [][]byte {
+ if m != nil {
+ return m.F_BytesRepeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint32Repeated() []int32 {
+ if m != nil {
+ return m.F_Sint32Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint64Repeated() []int64 {
+ if m != nil {
+ return m.F_Sint64Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_BoolOptional() bool {
+ if m != nil && m.F_BoolOptional != nil {
+ return *m.F_BoolOptional
+ }
+ return false
+}
+
+func (m *GoTest) GetF_Int32Optional() int32 {
+ if m != nil && m.F_Int32Optional != nil {
+ return *m.F_Int32Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Int64Optional() int64 {
+ if m != nil && m.F_Int64Optional != nil {
+ return *m.F_Int64Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Fixed32Optional() uint32 {
+ if m != nil && m.F_Fixed32Optional != nil {
+ return *m.F_Fixed32Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Fixed64Optional() uint64 {
+ if m != nil && m.F_Fixed64Optional != nil {
+ return *m.F_Fixed64Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Uint32Optional() uint32 {
+ if m != nil && m.F_Uint32Optional != nil {
+ return *m.F_Uint32Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Uint64Optional() uint64 {
+ if m != nil && m.F_Uint64Optional != nil {
+ return *m.F_Uint64Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_FloatOptional() float32 {
+ if m != nil && m.F_FloatOptional != nil {
+ return *m.F_FloatOptional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_DoubleOptional() float64 {
+ if m != nil && m.F_DoubleOptional != nil {
+ return *m.F_DoubleOptional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_StringOptional() string {
+ if m != nil && m.F_StringOptional != nil {
+ return *m.F_StringOptional
+ }
+ return ""
+}
+
+func (m *GoTest) GetF_BytesOptional() []byte {
+ if m != nil {
+ return m.F_BytesOptional
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint32Optional() int32 {
+ if m != nil && m.F_Sint32Optional != nil {
+ return *m.F_Sint32Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Sint64Optional() int64 {
+ if m != nil && m.F_Sint64Optional != nil {
+ return *m.F_Sint64Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_BoolDefaulted() bool {
+ if m != nil && m.F_BoolDefaulted != nil {
+ return *m.F_BoolDefaulted
+ }
+ return Default_GoTest_F_BoolDefaulted
+}
+
+func (m *GoTest) GetF_Int32Defaulted() int32 {
+ if m != nil && m.F_Int32Defaulted != nil {
+ return *m.F_Int32Defaulted
+ }
+ return Default_GoTest_F_Int32Defaulted
+}
+
+func (m *GoTest) GetF_Int64Defaulted() int64 {
+ if m != nil && m.F_Int64Defaulted != nil {
+ return *m.F_Int64Defaulted
+ }
+ return Default_GoTest_F_Int64Defaulted
+}
+
+func (m *GoTest) GetF_Fixed32Defaulted() uint32 {
+ if m != nil && m.F_Fixed32Defaulted != nil {
+ return *m.F_Fixed32Defaulted
+ }
+ return Default_GoTest_F_Fixed32Defaulted
+}
+
+func (m *GoTest) GetF_Fixed64Defaulted() uint64 {
+ if m != nil && m.F_Fixed64Defaulted != nil {
+ return *m.F_Fixed64Defaulted
+ }
+ return Default_GoTest_F_Fixed64Defaulted
+}
+
+func (m *GoTest) GetF_Uint32Defaulted() uint32 {
+ if m != nil && m.F_Uint32Defaulted != nil {
+ return *m.F_Uint32Defaulted
+ }
+ return Default_GoTest_F_Uint32Defaulted
+}
+
+func (m *GoTest) GetF_Uint64Defaulted() uint64 {
+ if m != nil && m.F_Uint64Defaulted != nil {
+ return *m.F_Uint64Defaulted
+ }
+ return Default_GoTest_F_Uint64Defaulted
+}
+
+func (m *GoTest) GetF_FloatDefaulted() float32 {
+ if m != nil && m.F_FloatDefaulted != nil {
+ return *m.F_FloatDefaulted
+ }
+ return Default_GoTest_F_FloatDefaulted
+}
+
+func (m *GoTest) GetF_DoubleDefaulted() float64 {
+ if m != nil && m.F_DoubleDefaulted != nil {
+ return *m.F_DoubleDefaulted
+ }
+ return Default_GoTest_F_DoubleDefaulted
+}
+
+func (m *GoTest) GetF_StringDefaulted() string {
+ if m != nil && m.F_StringDefaulted != nil {
+ return *m.F_StringDefaulted
+ }
+ return Default_GoTest_F_StringDefaulted
+}
+
+func (m *GoTest) GetF_BytesDefaulted() []byte {
+ if m != nil && m.F_BytesDefaulted != nil {
+ return m.F_BytesDefaulted
+ }
+ return append([]byte(nil), Default_GoTest_F_BytesDefaulted...)
+}
+
+func (m *GoTest) GetF_Sint32Defaulted() int32 {
+ if m != nil && m.F_Sint32Defaulted != nil {
+ return *m.F_Sint32Defaulted
+ }
+ return Default_GoTest_F_Sint32Defaulted
+}
+
+func (m *GoTest) GetF_Sint64Defaulted() int64 {
+ if m != nil && m.F_Sint64Defaulted != nil {
+ return *m.F_Sint64Defaulted
+ }
+ return Default_GoTest_F_Sint64Defaulted
+}
+
+func (m *GoTest) GetF_BoolRepeatedPacked() []bool {
+ if m != nil {
+ return m.F_BoolRepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Int32RepeatedPacked() []int32 {
+ if m != nil {
+ return m.F_Int32RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Int64RepeatedPacked() []int64 {
+ if m != nil {
+ return m.F_Int64RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 {
+ if m != nil {
+ return m.F_Fixed32RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 {
+ if m != nil {
+ return m.F_Fixed64RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 {
+ if m != nil {
+ return m.F_Uint32RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 {
+ if m != nil {
+ return m.F_Uint64RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_FloatRepeatedPacked() []float32 {
+ if m != nil {
+ return m.F_FloatRepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 {
+ if m != nil {
+ return m.F_DoubleRepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 {
+ if m != nil {
+ return m.F_Sint32RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 {
+ if m != nil {
+ return m.F_Sint64RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup {
+ if m != nil {
+ return m.Requiredgroup
+ }
+ return nil
+}
+
+func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup {
+ if m != nil {
+ return m.Repeatedgroup
+ }
+ return nil
+}
+
+func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+}
+
+// Required, repeated, and optional groups.
+type GoTest_RequiredGroup struct {
+ RequiredField *string `protobuf:"bytes,71,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} }
+func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) }
+func (*GoTest_RequiredGroup) ProtoMessage() {}
+func (*GoTest_RequiredGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
+
+func (m *GoTest_RequiredGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+}
+
+type GoTest_RepeatedGroup struct {
+ RequiredField *string `protobuf:"bytes,81,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} }
+func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) }
+func (*GoTest_RepeatedGroup) ProtoMessage() {}
+func (*GoTest_RepeatedGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 1} }
+
+func (m *GoTest_RepeatedGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+}
+
+type GoTest_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,91,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} }
+func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) }
+func (*GoTest_OptionalGroup) ProtoMessage() {}
+func (*GoTest_OptionalGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 2} }
+
+func (m *GoTest_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+}
+
+// For testing skipping of unrecognized fields.
+// Numbers are all big, larger than tag numbers in GoTestField,
+// the message used in the corresponding test.
+type GoSkipTest struct {
+ SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32,json=skipInt32" json:"skip_int32,omitempty"`
+ SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32,json=skipFixed32" json:"skip_fixed32,omitempty"`
+ SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64,json=skipFixed64" json:"skip_fixed64,omitempty"`
+ SkipString *string `protobuf:"bytes,14,req,name=skip_string,json=skipString" json:"skip_string,omitempty"`
+ Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup,json=skipgroup" json:"skipgroup,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoSkipTest) Reset() { *m = GoSkipTest{} }
+func (m *GoSkipTest) String() string { return proto.CompactTextString(m) }
+func (*GoSkipTest) ProtoMessage() {}
+func (*GoSkipTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+func (m *GoSkipTest) GetSkipInt32() int32 {
+ if m != nil && m.SkipInt32 != nil {
+ return *m.SkipInt32
+ }
+ return 0
+}
+
+func (m *GoSkipTest) GetSkipFixed32() uint32 {
+ if m != nil && m.SkipFixed32 != nil {
+ return *m.SkipFixed32
+ }
+ return 0
+}
+
+func (m *GoSkipTest) GetSkipFixed64() uint64 {
+ if m != nil && m.SkipFixed64 != nil {
+ return *m.SkipFixed64
+ }
+ return 0
+}
+
+func (m *GoSkipTest) GetSkipString() string {
+ if m != nil && m.SkipString != nil {
+ return *m.SkipString
+ }
+ return ""
+}
+
+func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup {
+ if m != nil {
+ return m.Skipgroup
+ }
+ return nil
+}
+
+type GoSkipTest_SkipGroup struct {
+ GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32,json=groupInt32" json:"group_int32,omitempty"`
+ GroupString *string `protobuf:"bytes,17,req,name=group_string,json=groupString" json:"group_string,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} }
+func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) }
+func (*GoSkipTest_SkipGroup) ProtoMessage() {}
+func (*GoSkipTest_SkipGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} }
+
+func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 {
+ if m != nil && m.GroupInt32 != nil {
+ return *m.GroupInt32
+ }
+ return 0
+}
+
+func (m *GoSkipTest_SkipGroup) GetGroupString() string {
+ if m != nil && m.GroupString != nil {
+ return *m.GroupString
+ }
+ return ""
+}
+
+// For testing packed/non-packed decoder switching.
+// A serialized instance of one should be deserializable as the other.
+type NonPackedTest struct {
+ A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *NonPackedTest) Reset() { *m = NonPackedTest{} }
+func (m *NonPackedTest) String() string { return proto.CompactTextString(m) }
+func (*NonPackedTest) ProtoMessage() {}
+func (*NonPackedTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+
+func (m *NonPackedTest) GetA() []int32 {
+ if m != nil {
+ return m.A
+ }
+ return nil
+}
+
+type PackedTest struct {
+ B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PackedTest) Reset() { *m = PackedTest{} }
+func (m *PackedTest) String() string { return proto.CompactTextString(m) }
+func (*PackedTest) ProtoMessage() {}
+func (*PackedTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+
+func (m *PackedTest) GetB() []int32 {
+ if m != nil {
+ return m.B
+ }
+ return nil
+}
+
+type MaxTag struct {
+ // Maximum possible tag number.
+ LastField *string `protobuf:"bytes,536870911,opt,name=last_field,json=lastField" json:"last_field,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MaxTag) Reset() { *m = MaxTag{} }
+func (m *MaxTag) String() string { return proto.CompactTextString(m) }
+func (*MaxTag) ProtoMessage() {}
+func (*MaxTag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+
+func (m *MaxTag) GetLastField() string {
+ if m != nil && m.LastField != nil {
+ return *m.LastField
+ }
+ return ""
+}
+
+type OldMessage struct {
+ Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"`
+ Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OldMessage) Reset() { *m = OldMessage{} }
+func (m *OldMessage) String() string { return proto.CompactTextString(m) }
+func (*OldMessage) ProtoMessage() {}
+func (*OldMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+
+func (m *OldMessage) GetNested() *OldMessage_Nested {
+ if m != nil {
+ return m.Nested
+ }
+ return nil
+}
+
+func (m *OldMessage) GetNum() int32 {
+ if m != nil && m.Num != nil {
+ return *m.Num
+ }
+ return 0
+}
+
+type OldMessage_Nested struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} }
+func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) }
+func (*OldMessage_Nested) ProtoMessage() {}
+func (*OldMessage_Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} }
+
+func (m *OldMessage_Nested) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+// NewMessage is wire compatible with OldMessage;
+// imagine it as a future version.
+type NewMessage struct {
+ Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"`
+ // This is an int32 in OldMessage.
+ Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *NewMessage) Reset() { *m = NewMessage{} }
+func (m *NewMessage) String() string { return proto.CompactTextString(m) }
+func (*NewMessage) ProtoMessage() {}
+func (*NewMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+
+func (m *NewMessage) GetNested() *NewMessage_Nested {
+ if m != nil {
+ return m.Nested
+ }
+ return nil
+}
+
+func (m *NewMessage) GetNum() int64 {
+ if m != nil && m.Num != nil {
+ return *m.Num
+ }
+ return 0
+}
+
+type NewMessage_Nested struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ FoodGroup *string `protobuf:"bytes,2,opt,name=food_group,json=foodGroup" json:"food_group,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} }
+func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) }
+func (*NewMessage_Nested) ProtoMessage() {}
+func (*NewMessage_Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8, 0} }
+
+func (m *NewMessage_Nested) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *NewMessage_Nested) GetFoodGroup() string {
+ if m != nil && m.FoodGroup != nil {
+ return *m.FoodGroup
+ }
+ return ""
+}
+
+type InnerMessage struct {
+ Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"`
+ Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"`
+ Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *InnerMessage) Reset() { *m = InnerMessage{} }
+func (m *InnerMessage) String() string { return proto.CompactTextString(m) }
+func (*InnerMessage) ProtoMessage() {}
+func (*InnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+
+const Default_InnerMessage_Port int32 = 4000
+
+func (m *InnerMessage) GetHost() string {
+ if m != nil && m.Host != nil {
+ return *m.Host
+ }
+ return ""
+}
+
+func (m *InnerMessage) GetPort() int32 {
+ if m != nil && m.Port != nil {
+ return *m.Port
+ }
+ return Default_InnerMessage_Port
+}
+
+func (m *InnerMessage) GetConnected() bool {
+ if m != nil && m.Connected != nil {
+ return *m.Connected
+ }
+ return false
+}
+
+type OtherMessage struct {
+ Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"`
+ Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"`
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OtherMessage) Reset() { *m = OtherMessage{} }
+func (m *OtherMessage) String() string { return proto.CompactTextString(m) }
+func (*OtherMessage) ProtoMessage() {}
+func (*OtherMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+
+var extRange_OtherMessage = []proto.ExtensionRange{
+ {100, 536870911},
+}
+
+func (*OtherMessage) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_OtherMessage
+}
+func (m *OtherMessage) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+func (m *OtherMessage) GetKey() int64 {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return 0
+}
+
+func (m *OtherMessage) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *OtherMessage) GetWeight() float32 {
+ if m != nil && m.Weight != nil {
+ return *m.Weight
+ }
+ return 0
+}
+
+func (m *OtherMessage) GetInner() *InnerMessage {
+ if m != nil {
+ return m.Inner
+ }
+ return nil
+}
+
+type RequiredInnerMessage struct {
+ LeoFinallyWonAnOscar *InnerMessage `protobuf:"bytes,1,req,name=leo_finally_won_an_oscar,json=leoFinallyWonAnOscar" json:"leo_finally_won_an_oscar,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RequiredInnerMessage) Reset() { *m = RequiredInnerMessage{} }
+func (m *RequiredInnerMessage) String() string { return proto.CompactTextString(m) }
+func (*RequiredInnerMessage) ProtoMessage() {}
+func (*RequiredInnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+
+func (m *RequiredInnerMessage) GetLeoFinallyWonAnOscar() *InnerMessage {
+ if m != nil {
+ return m.LeoFinallyWonAnOscar
+ }
+ return nil
+}
+
+type MyMessage struct {
+ Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"`
+ Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
+ Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"`
+ Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"`
+ Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"`
+ Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"`
+ WeMustGoDeeper *RequiredInnerMessage `protobuf:"bytes,13,opt,name=we_must_go_deeper,json=weMustGoDeeper" json:"we_must_go_deeper,omitempty"`
+ RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner,json=repInner" json:"rep_inner,omitempty"`
+ Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"`
+ Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"`
+ // This field becomes [][]byte in the generated code.
+ RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes,json=repBytes" json:"rep_bytes,omitempty"`
+ Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"`
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MyMessage) Reset() { *m = MyMessage{} }
+func (m *MyMessage) String() string { return proto.CompactTextString(m) }
+func (*MyMessage) ProtoMessage() {}
+func (*MyMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+
+var extRange_MyMessage = []proto.ExtensionRange{
+ {100, 536870911},
+}
+
+func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_MyMessage
+}
+func (m *MyMessage) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+func (m *MyMessage) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *MyMessage) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MyMessage) GetQuote() string {
+ if m != nil && m.Quote != nil {
+ return *m.Quote
+ }
+ return ""
+}
+
+func (m *MyMessage) GetPet() []string {
+ if m != nil {
+ return m.Pet
+ }
+ return nil
+}
+
+func (m *MyMessage) GetInner() *InnerMessage {
+ if m != nil {
+ return m.Inner
+ }
+ return nil
+}
+
+func (m *MyMessage) GetOthers() []*OtherMessage {
+ if m != nil {
+ return m.Others
+ }
+ return nil
+}
+
+func (m *MyMessage) GetWeMustGoDeeper() *RequiredInnerMessage {
+ if m != nil {
+ return m.WeMustGoDeeper
+ }
+ return nil
+}
+
+func (m *MyMessage) GetRepInner() []*InnerMessage {
+ if m != nil {
+ return m.RepInner
+ }
+ return nil
+}
+
+func (m *MyMessage) GetBikeshed() MyMessage_Color {
+ if m != nil && m.Bikeshed != nil {
+ return *m.Bikeshed
+ }
+ return MyMessage_RED
+}
+
+func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup {
+ if m != nil {
+ return m.Somegroup
+ }
+ return nil
+}
+
+func (m *MyMessage) GetRepBytes() [][]byte {
+ if m != nil {
+ return m.RepBytes
+ }
+ return nil
+}
+
+func (m *MyMessage) GetBigfloat() float64 {
+ if m != nil && m.Bigfloat != nil {
+ return *m.Bigfloat
+ }
+ return 0
+}
+
+type MyMessage_SomeGroup struct {
+ GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} }
+func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) }
+func (*MyMessage_SomeGroup) ProtoMessage() {}
+func (*MyMessage_SomeGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 0} }
+
+func (m *MyMessage_SomeGroup) GetGroupField() int32 {
+ if m != nil && m.GroupField != nil {
+ return *m.GroupField
+ }
+ return 0
+}
+
+type Ext struct {
+ Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Ext) Reset() { *m = Ext{} }
+func (m *Ext) String() string { return proto.CompactTextString(m) }
+func (*Ext) ProtoMessage() {}
+func (*Ext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+
+func (m *Ext) GetData() string {
+ if m != nil && m.Data != nil {
+ return *m.Data
+ }
+ return ""
+}
+
+var E_Ext_More = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessage)(nil),
+ ExtensionType: (*Ext)(nil),
+ Field: 103,
+ Name: "testdata.Ext.more",
+ Tag: "bytes,103,opt,name=more",
+}
+
+var E_Ext_Text = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessage)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 104,
+ Name: "testdata.Ext.text",
+ Tag: "bytes,104,opt,name=text",
+}
+
+var E_Ext_Number = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessage)(nil),
+ ExtensionType: (*int32)(nil),
+ Field: 105,
+ Name: "testdata.Ext.number",
+ Tag: "varint,105,opt,name=number",
+}
+
+type ComplexExtension struct {
+ First *int32 `protobuf:"varint,1,opt,name=first" json:"first,omitempty"`
+ Second *int32 `protobuf:"varint,2,opt,name=second" json:"second,omitempty"`
+ Third []int32 `protobuf:"varint,3,rep,name=third" json:"third,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ComplexExtension) Reset() { *m = ComplexExtension{} }
+func (m *ComplexExtension) String() string { return proto.CompactTextString(m) }
+func (*ComplexExtension) ProtoMessage() {}
+func (*ComplexExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+
+func (m *ComplexExtension) GetFirst() int32 {
+ if m != nil && m.First != nil {
+ return *m.First
+ }
+ return 0
+}
+
+func (m *ComplexExtension) GetSecond() int32 {
+ if m != nil && m.Second != nil {
+ return *m.Second
+ }
+ return 0
+}
+
+func (m *ComplexExtension) GetThird() []int32 {
+ if m != nil {
+ return m.Third
+ }
+ return nil
+}
+
+type DefaultsMessage struct {
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} }
+func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) }
+func (*DefaultsMessage) ProtoMessage() {}
+func (*DefaultsMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+
+var extRange_DefaultsMessage = []proto.ExtensionRange{
+ {100, 536870911},
+}
+
+func (*DefaultsMessage) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_DefaultsMessage
+}
+func (m *DefaultsMessage) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+type MyMessageSet struct {
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MyMessageSet) Reset() { *m = MyMessageSet{} }
+func (m *MyMessageSet) String() string { return proto.CompactTextString(m) }
+func (*MyMessageSet) ProtoMessage() {}
+func (*MyMessageSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
+
+func (m *MyMessageSet) Marshal() ([]byte, error) {
+ return proto.MarshalMessageSet(m.ExtensionMap())
+}
+func (m *MyMessageSet) Unmarshal(buf []byte) error {
+ return proto.UnmarshalMessageSet(buf, m.ExtensionMap())
+}
+func (m *MyMessageSet) MarshalJSON() ([]byte, error) {
+ return proto.MarshalMessageSetJSON(m.XXX_extensions)
+}
+func (m *MyMessageSet) UnmarshalJSON(buf []byte) error {
+ return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions)
+}
+
+// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler
+var _ proto.Marshaler = (*MyMessageSet)(nil)
+var _ proto.Unmarshaler = (*MyMessageSet)(nil)
+
+var extRange_MyMessageSet = []proto.ExtensionRange{
+ {100, 2147483646},
+}
+
+func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_MyMessageSet
+}
+func (m *MyMessageSet) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+type Empty struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Empty) Reset() { *m = Empty{} }
+func (m *Empty) String() string { return proto.CompactTextString(m) }
+func (*Empty) ProtoMessage() {}
+func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
+
+type MessageList struct {
+ Message []*MessageList_Message `protobuf:"group,1,rep,name=Message,json=message" json:"message,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MessageList) Reset() { *m = MessageList{} }
+func (m *MessageList) String() string { return proto.CompactTextString(m) }
+func (*MessageList) ProtoMessage() {}
+func (*MessageList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
+
+func (m *MessageList) GetMessage() []*MessageList_Message {
+ if m != nil {
+ return m.Message
+ }
+ return nil
+}
+
+type MessageList_Message struct {
+ Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"`
+ Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MessageList_Message) Reset() { *m = MessageList_Message{} }
+func (m *MessageList_Message) String() string { return proto.CompactTextString(m) }
+func (*MessageList_Message) ProtoMessage() {}
+func (*MessageList_Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18, 0} }
+
+func (m *MessageList_Message) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MessageList_Message) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+type Strings struct {
+ StringField *string `protobuf:"bytes,1,opt,name=string_field,json=stringField" json:"string_field,omitempty"`
+ BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field,json=bytesField" json:"bytes_field,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Strings) Reset() { *m = Strings{} }
+func (m *Strings) String() string { return proto.CompactTextString(m) }
+func (*Strings) ProtoMessage() {}
+func (*Strings) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
+
+func (m *Strings) GetStringField() string {
+ if m != nil && m.StringField != nil {
+ return *m.StringField
+ }
+ return ""
+}
+
+func (m *Strings) GetBytesField() []byte {
+ if m != nil {
+ return m.BytesField
+ }
+ return nil
+}
+
+type Defaults struct {
+ // Default-valued fields of all basic types.
+ // Same as GoTest, but copied here to make testing easier.
+ F_Bool *bool `protobuf:"varint,1,opt,name=F_Bool,json=fBool,def=1" json:"F_Bool,omitempty"`
+ F_Int32 *int32 `protobuf:"varint,2,opt,name=F_Int32,json=fInt32,def=32" json:"F_Int32,omitempty"`
+ F_Int64 *int64 `protobuf:"varint,3,opt,name=F_Int64,json=fInt64,def=64" json:"F_Int64,omitempty"`
+ F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=fFixed32,def=320" json:"F_Fixed32,omitempty"`
+ F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=fFixed64,def=640" json:"F_Fixed64,omitempty"`
+ F_Uint32 *uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=fUint32,def=3200" json:"F_Uint32,omitempty"`
+ F_Uint64 *uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=fUint64,def=6400" json:"F_Uint64,omitempty"`
+ F_Float *float32 `protobuf:"fixed32,8,opt,name=F_Float,json=fFloat,def=314159" json:"F_Float,omitempty"`
+ F_Double *float64 `protobuf:"fixed64,9,opt,name=F_Double,json=fDouble,def=271828" json:"F_Double,omitempty"`
+ F_String *string `protobuf:"bytes,10,opt,name=F_String,json=fString,def=hello, \"world!\"\n" json:"F_String,omitempty"`
+ F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=fBytes,def=Bignose" json:"F_Bytes,omitempty"`
+ F_Sint32 *int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=fSint32,def=-32" json:"F_Sint32,omitempty"`
+ F_Sint64 *int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=fSint64,def=-64" json:"F_Sint64,omitempty"`
+ F_Enum *Defaults_Color `protobuf:"varint,14,opt,name=F_Enum,json=fEnum,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"`
+ // More fields with crazy defaults.
+ F_Pinf *float32 `protobuf:"fixed32,15,opt,name=F_Pinf,json=fPinf,def=inf" json:"F_Pinf,omitempty"`
+ F_Ninf *float32 `protobuf:"fixed32,16,opt,name=F_Ninf,json=fNinf,def=-inf" json:"F_Ninf,omitempty"`
+ F_Nan *float32 `protobuf:"fixed32,17,opt,name=F_Nan,json=fNan,def=nan" json:"F_Nan,omitempty"`
+ // Sub-message.
+ Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"`
+ // Redundant but explicit defaults.
+ StrZero *string `protobuf:"bytes,19,opt,name=str_zero,json=strZero,def=" json:"str_zero,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Defaults) Reset() { *m = Defaults{} }
+func (m *Defaults) String() string { return proto.CompactTextString(m) }
+func (*Defaults) ProtoMessage() {}
+func (*Defaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
+
+const Default_Defaults_F_Bool bool = true
+const Default_Defaults_F_Int32 int32 = 32
+const Default_Defaults_F_Int64 int64 = 64
+const Default_Defaults_F_Fixed32 uint32 = 320
+const Default_Defaults_F_Fixed64 uint64 = 640
+const Default_Defaults_F_Uint32 uint32 = 3200
+const Default_Defaults_F_Uint64 uint64 = 6400
+const Default_Defaults_F_Float float32 = 314159
+const Default_Defaults_F_Double float64 = 271828
+const Default_Defaults_F_String string = "hello, \"world!\"\n"
+
+var Default_Defaults_F_Bytes []byte = []byte("Bignose")
+
+const Default_Defaults_F_Sint32 int32 = -32
+const Default_Defaults_F_Sint64 int64 = -64
+const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN
+
+var Default_Defaults_F_Pinf float32 = float32(math.Inf(1))
+var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1))
+var Default_Defaults_F_Nan float32 = float32(math.NaN())
+
+func (m *Defaults) GetF_Bool() bool {
+ if m != nil && m.F_Bool != nil {
+ return *m.F_Bool
+ }
+ return Default_Defaults_F_Bool
+}
+
+func (m *Defaults) GetF_Int32() int32 {
+ if m != nil && m.F_Int32 != nil {
+ return *m.F_Int32
+ }
+ return Default_Defaults_F_Int32
+}
+
+func (m *Defaults) GetF_Int64() int64 {
+ if m != nil && m.F_Int64 != nil {
+ return *m.F_Int64
+ }
+ return Default_Defaults_F_Int64
+}
+
+func (m *Defaults) GetF_Fixed32() uint32 {
+ if m != nil && m.F_Fixed32 != nil {
+ return *m.F_Fixed32
+ }
+ return Default_Defaults_F_Fixed32
+}
+
+func (m *Defaults) GetF_Fixed64() uint64 {
+ if m != nil && m.F_Fixed64 != nil {
+ return *m.F_Fixed64
+ }
+ return Default_Defaults_F_Fixed64
+}
+
+func (m *Defaults) GetF_Uint32() uint32 {
+ if m != nil && m.F_Uint32 != nil {
+ return *m.F_Uint32
+ }
+ return Default_Defaults_F_Uint32
+}
+
+func (m *Defaults) GetF_Uint64() uint64 {
+ if m != nil && m.F_Uint64 != nil {
+ return *m.F_Uint64
+ }
+ return Default_Defaults_F_Uint64
+}
+
+func (m *Defaults) GetF_Float() float32 {
+ if m != nil && m.F_Float != nil {
+ return *m.F_Float
+ }
+ return Default_Defaults_F_Float
+}
+
+func (m *Defaults) GetF_Double() float64 {
+ if m != nil && m.F_Double != nil {
+ return *m.F_Double
+ }
+ return Default_Defaults_F_Double
+}
+
+func (m *Defaults) GetF_String() string {
+ if m != nil && m.F_String != nil {
+ return *m.F_String
+ }
+ return Default_Defaults_F_String
+}
+
+func (m *Defaults) GetF_Bytes() []byte {
+ if m != nil && m.F_Bytes != nil {
+ return m.F_Bytes
+ }
+ return append([]byte(nil), Default_Defaults_F_Bytes...)
+}
+
+func (m *Defaults) GetF_Sint32() int32 {
+ if m != nil && m.F_Sint32 != nil {
+ return *m.F_Sint32
+ }
+ return Default_Defaults_F_Sint32
+}
+
+func (m *Defaults) GetF_Sint64() int64 {
+ if m != nil && m.F_Sint64 != nil {
+ return *m.F_Sint64
+ }
+ return Default_Defaults_F_Sint64
+}
+
+func (m *Defaults) GetF_Enum() Defaults_Color {
+ if m != nil && m.F_Enum != nil {
+ return *m.F_Enum
+ }
+ return Default_Defaults_F_Enum
+}
+
+func (m *Defaults) GetF_Pinf() float32 {
+ if m != nil && m.F_Pinf != nil {
+ return *m.F_Pinf
+ }
+ return Default_Defaults_F_Pinf
+}
+
+func (m *Defaults) GetF_Ninf() float32 {
+ if m != nil && m.F_Ninf != nil {
+ return *m.F_Ninf
+ }
+ return Default_Defaults_F_Ninf
+}
+
+func (m *Defaults) GetF_Nan() float32 {
+ if m != nil && m.F_Nan != nil {
+ return *m.F_Nan
+ }
+ return Default_Defaults_F_Nan
+}
+
+func (m *Defaults) GetSub() *SubDefaults {
+ if m != nil {
+ return m.Sub
+ }
+ return nil
+}
+
+func (m *Defaults) GetStrZero() string {
+ if m != nil && m.StrZero != nil {
+ return *m.StrZero
+ }
+ return ""
+}
+
+type SubDefaults struct {
+ N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SubDefaults) Reset() { *m = SubDefaults{} }
+func (m *SubDefaults) String() string { return proto.CompactTextString(m) }
+func (*SubDefaults) ProtoMessage() {}
+func (*SubDefaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
+
+const Default_SubDefaults_N int64 = 7
+
+func (m *SubDefaults) GetN() int64 {
+ if m != nil && m.N != nil {
+ return *m.N
+ }
+ return Default_SubDefaults_N
+}
+
+type RepeatedEnum struct {
+ Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} }
+func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) }
+func (*RepeatedEnum) ProtoMessage() {}
+func (*RepeatedEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
+
+func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color {
+ if m != nil {
+ return m.Color
+ }
+ return nil
+}
+
+type MoreRepeated struct {
+ Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"`
+ BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed,json=boolsPacked" json:"bools_packed,omitempty"`
+ Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"`
+ IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed,json=intsPacked" json:"ints_packed,omitempty"`
+ Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed,json=int64sPacked" json:"int64s_packed,omitempty"`
+ Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"`
+ Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MoreRepeated) Reset() { *m = MoreRepeated{} }
+func (m *MoreRepeated) String() string { return proto.CompactTextString(m) }
+func (*MoreRepeated) ProtoMessage() {}
+func (*MoreRepeated) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
+
+func (m *MoreRepeated) GetBools() []bool {
+ if m != nil {
+ return m.Bools
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetBoolsPacked() []bool {
+ if m != nil {
+ return m.BoolsPacked
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetInts() []int32 {
+ if m != nil {
+ return m.Ints
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetIntsPacked() []int32 {
+ if m != nil {
+ return m.IntsPacked
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetInt64SPacked() []int64 {
+ if m != nil {
+ return m.Int64SPacked
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetStrings() []string {
+ if m != nil {
+ return m.Strings
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetFixeds() []uint32 {
+ if m != nil {
+ return m.Fixeds
+ }
+ return nil
+}
+
+type GroupOld struct {
+ G *GroupOld_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GroupOld) Reset() { *m = GroupOld{} }
+func (m *GroupOld) String() string { return proto.CompactTextString(m) }
+func (*GroupOld) ProtoMessage() {}
+func (*GroupOld) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
+
+func (m *GroupOld) GetG() *GroupOld_G {
+ if m != nil {
+ return m.G
+ }
+ return nil
+}
+
+type GroupOld_G struct {
+ X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GroupOld_G) Reset() { *m = GroupOld_G{} }
+func (m *GroupOld_G) String() string { return proto.CompactTextString(m) }
+func (*GroupOld_G) ProtoMessage() {}
+func (*GroupOld_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24, 0} }
+
+func (m *GroupOld_G) GetX() int32 {
+ if m != nil && m.X != nil {
+ return *m.X
+ }
+ return 0
+}
+
+type GroupNew struct {
+ G *GroupNew_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GroupNew) Reset() { *m = GroupNew{} }
+func (m *GroupNew) String() string { return proto.CompactTextString(m) }
+func (*GroupNew) ProtoMessage() {}
+func (*GroupNew) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
+
+func (m *GroupNew) GetG() *GroupNew_G {
+ if m != nil {
+ return m.G
+ }
+ return nil
+}
+
+type GroupNew_G struct {
+ X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"`
+ Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GroupNew_G) Reset() { *m = GroupNew_G{} }
+func (m *GroupNew_G) String() string { return proto.CompactTextString(m) }
+func (*GroupNew_G) ProtoMessage() {}
+func (*GroupNew_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25, 0} }
+
+func (m *GroupNew_G) GetX() int32 {
+ if m != nil && m.X != nil {
+ return *m.X
+ }
+ return 0
+}
+
+func (m *GroupNew_G) GetY() int32 {
+ if m != nil && m.Y != nil {
+ return *m.Y
+ }
+ return 0
+}
+
+type FloatingPoint struct {
+ F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FloatingPoint) Reset() { *m = FloatingPoint{} }
+func (m *FloatingPoint) String() string { return proto.CompactTextString(m) }
+func (*FloatingPoint) ProtoMessage() {}
+func (*FloatingPoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
+
+func (m *FloatingPoint) GetF() float64 {
+ if m != nil && m.F != nil {
+ return *m.F
+ }
+ return 0
+}
+
+type MessageWithMap struct {
+ NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str,json=strToStr" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MessageWithMap) Reset() { *m = MessageWithMap{} }
+func (m *MessageWithMap) String() string { return proto.CompactTextString(m) }
+func (*MessageWithMap) ProtoMessage() {}
+func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
+
+func (m *MessageWithMap) GetNameMapping() map[int32]string {
+ if m != nil {
+ return m.NameMapping
+ }
+ return nil
+}
+
+func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint {
+ if m != nil {
+ return m.MsgMapping
+ }
+ return nil
+}
+
+func (m *MessageWithMap) GetByteMapping() map[bool][]byte {
+ if m != nil {
+ return m.ByteMapping
+ }
+ return nil
+}
+
+func (m *MessageWithMap) GetStrToStr() map[string]string {
+ if m != nil {
+ return m.StrToStr
+ }
+ return nil
+}
+
+type Oneof struct {
+ // Types that are valid to be assigned to Union:
+ // *Oneof_F_Bool
+ // *Oneof_F_Int32
+ // *Oneof_F_Int64
+ // *Oneof_F_Fixed32
+ // *Oneof_F_Fixed64
+ // *Oneof_F_Uint32
+ // *Oneof_F_Uint64
+ // *Oneof_F_Float
+ // *Oneof_F_Double
+ // *Oneof_F_String
+ // *Oneof_F_Bytes
+ // *Oneof_F_Sint32
+ // *Oneof_F_Sint64
+ // *Oneof_F_Enum
+ // *Oneof_F_Message
+ // *Oneof_FGroup
+ // *Oneof_F_Largest_Tag
+ Union isOneof_Union `protobuf_oneof:"union"`
+ // Types that are valid to be assigned to Tormato:
+ // *Oneof_Value
+ Tormato isOneof_Tormato `protobuf_oneof:"tormato"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Oneof) Reset() { *m = Oneof{} }
+func (m *Oneof) String() string { return proto.CompactTextString(m) }
+func (*Oneof) ProtoMessage() {}
+func (*Oneof) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
+
+type isOneof_Union interface {
+ isOneof_Union()
+}
+type isOneof_Tormato interface {
+ isOneof_Tormato()
+}
+
+type Oneof_F_Bool struct {
+ F_Bool bool `protobuf:"varint,1,opt,name=F_Bool,json=fBool,oneof"`
+}
+type Oneof_F_Int32 struct {
+ F_Int32 int32 `protobuf:"varint,2,opt,name=F_Int32,json=fInt32,oneof"`
+}
+type Oneof_F_Int64 struct {
+ F_Int64 int64 `protobuf:"varint,3,opt,name=F_Int64,json=fInt64,oneof"`
+}
+type Oneof_F_Fixed32 struct {
+ F_Fixed32 uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=fFixed32,oneof"`
+}
+type Oneof_F_Fixed64 struct {
+ F_Fixed64 uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=fFixed64,oneof"`
+}
+type Oneof_F_Uint32 struct {
+ F_Uint32 uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=fUint32,oneof"`
+}
+type Oneof_F_Uint64 struct {
+ F_Uint64 uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=fUint64,oneof"`
+}
+type Oneof_F_Float struct {
+ F_Float float32 `protobuf:"fixed32,8,opt,name=F_Float,json=fFloat,oneof"`
+}
+type Oneof_F_Double struct {
+ F_Double float64 `protobuf:"fixed64,9,opt,name=F_Double,json=fDouble,oneof"`
+}
+type Oneof_F_String struct {
+ F_String string `protobuf:"bytes,10,opt,name=F_String,json=fString,oneof"`
+}
+type Oneof_F_Bytes struct {
+ F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=fBytes,oneof"`
+}
+type Oneof_F_Sint32 struct {
+ F_Sint32 int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=fSint32,oneof"`
+}
+type Oneof_F_Sint64 struct {
+ F_Sint64 int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=fSint64,oneof"`
+}
+type Oneof_F_Enum struct {
+ F_Enum MyMessage_Color `protobuf:"varint,14,opt,name=F_Enum,json=fEnum,enum=testdata.MyMessage_Color,oneof"`
+}
+type Oneof_F_Message struct {
+ F_Message *GoTestField `protobuf:"bytes,15,opt,name=F_Message,json=fMessage,oneof"`
+}
+type Oneof_FGroup struct {
+ FGroup *Oneof_F_Group `protobuf:"group,16,opt,name=F_Group,json=fGroup,oneof"`
+}
+type Oneof_F_Largest_Tag struct {
+ F_Largest_Tag int32 `protobuf:"varint,536870911,opt,name=F_Largest_Tag,json=fLargestTag,oneof"`
+}
+type Oneof_Value struct {
+ Value int32 `protobuf:"varint,100,opt,name=value,oneof"`
+}
+
+func (*Oneof_F_Bool) isOneof_Union() {}
+func (*Oneof_F_Int32) isOneof_Union() {}
+func (*Oneof_F_Int64) isOneof_Union() {}
+func (*Oneof_F_Fixed32) isOneof_Union() {}
+func (*Oneof_F_Fixed64) isOneof_Union() {}
+func (*Oneof_F_Uint32) isOneof_Union() {}
+func (*Oneof_F_Uint64) isOneof_Union() {}
+func (*Oneof_F_Float) isOneof_Union() {}
+func (*Oneof_F_Double) isOneof_Union() {}
+func (*Oneof_F_String) isOneof_Union() {}
+func (*Oneof_F_Bytes) isOneof_Union() {}
+func (*Oneof_F_Sint32) isOneof_Union() {}
+func (*Oneof_F_Sint64) isOneof_Union() {}
+func (*Oneof_F_Enum) isOneof_Union() {}
+func (*Oneof_F_Message) isOneof_Union() {}
+func (*Oneof_FGroup) isOneof_Union() {}
+func (*Oneof_F_Largest_Tag) isOneof_Union() {}
+func (*Oneof_Value) isOneof_Tormato() {}
+
+func (m *Oneof) GetUnion() isOneof_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+}
+func (m *Oneof) GetTormato() isOneof_Tormato {
+ if m != nil {
+ return m.Tormato
+ }
+ return nil
+}
+
+func (m *Oneof) GetF_Bool() bool {
+ if x, ok := m.GetUnion().(*Oneof_F_Bool); ok {
+ return x.F_Bool
+ }
+ return false
+}
+
+func (m *Oneof) GetF_Int32() int32 {
+ if x, ok := m.GetUnion().(*Oneof_F_Int32); ok {
+ return x.F_Int32
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Int64() int64 {
+ if x, ok := m.GetUnion().(*Oneof_F_Int64); ok {
+ return x.F_Int64
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Fixed32() uint32 {
+ if x, ok := m.GetUnion().(*Oneof_F_Fixed32); ok {
+ return x.F_Fixed32
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Fixed64() uint64 {
+ if x, ok := m.GetUnion().(*Oneof_F_Fixed64); ok {
+ return x.F_Fixed64
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Uint32() uint32 {
+ if x, ok := m.GetUnion().(*Oneof_F_Uint32); ok {
+ return x.F_Uint32
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Uint64() uint64 {
+ if x, ok := m.GetUnion().(*Oneof_F_Uint64); ok {
+ return x.F_Uint64
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Float() float32 {
+ if x, ok := m.GetUnion().(*Oneof_F_Float); ok {
+ return x.F_Float
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Double() float64 {
+ if x, ok := m.GetUnion().(*Oneof_F_Double); ok {
+ return x.F_Double
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_String() string {
+ if x, ok := m.GetUnion().(*Oneof_F_String); ok {
+ return x.F_String
+ }
+ return ""
+}
+
+func (m *Oneof) GetF_Bytes() []byte {
+ if x, ok := m.GetUnion().(*Oneof_F_Bytes); ok {
+ return x.F_Bytes
+ }
+ return nil
+}
+
+func (m *Oneof) GetF_Sint32() int32 {
+ if x, ok := m.GetUnion().(*Oneof_F_Sint32); ok {
+ return x.F_Sint32
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Sint64() int64 {
+ if x, ok := m.GetUnion().(*Oneof_F_Sint64); ok {
+ return x.F_Sint64
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Enum() MyMessage_Color {
+ if x, ok := m.GetUnion().(*Oneof_F_Enum); ok {
+ return x.F_Enum
+ }
+ return MyMessage_RED
+}
+
+func (m *Oneof) GetF_Message() *GoTestField {
+ if x, ok := m.GetUnion().(*Oneof_F_Message); ok {
+ return x.F_Message
+ }
+ return nil
+}
+
+func (m *Oneof) GetFGroup() *Oneof_F_Group {
+ if x, ok := m.GetUnion().(*Oneof_FGroup); ok {
+ return x.FGroup
+ }
+ return nil
+}
+
+func (m *Oneof) GetF_Largest_Tag() int32 {
+ if x, ok := m.GetUnion().(*Oneof_F_Largest_Tag); ok {
+ return x.F_Largest_Tag
+ }
+ return 0
+}
+
+func (m *Oneof) GetValue() int32 {
+ if x, ok := m.GetTormato().(*Oneof_Value); ok {
+ return x.Value
+ }
+ return 0
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Oneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Oneof_OneofMarshaler, _Oneof_OneofUnmarshaler, _Oneof_OneofSizer, []interface{}{
+ (*Oneof_F_Bool)(nil),
+ (*Oneof_F_Int32)(nil),
+ (*Oneof_F_Int64)(nil),
+ (*Oneof_F_Fixed32)(nil),
+ (*Oneof_F_Fixed64)(nil),
+ (*Oneof_F_Uint32)(nil),
+ (*Oneof_F_Uint64)(nil),
+ (*Oneof_F_Float)(nil),
+ (*Oneof_F_Double)(nil),
+ (*Oneof_F_String)(nil),
+ (*Oneof_F_Bytes)(nil),
+ (*Oneof_F_Sint32)(nil),
+ (*Oneof_F_Sint64)(nil),
+ (*Oneof_F_Enum)(nil),
+ (*Oneof_F_Message)(nil),
+ (*Oneof_FGroup)(nil),
+ (*Oneof_F_Largest_Tag)(nil),
+ (*Oneof_Value)(nil),
+ }
+}
+
+func _Oneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Oneof)
+ // union
+ switch x := m.Union.(type) {
+ case *Oneof_F_Bool:
+ t := uint64(0)
+ if x.F_Bool {
+ t = 1
+ }
+ b.EncodeVarint(1<<3 | proto.WireVarint)
+ b.EncodeVarint(t)
+ case *Oneof_F_Int32:
+ b.EncodeVarint(2<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.F_Int32))
+ case *Oneof_F_Int64:
+ b.EncodeVarint(3<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.F_Int64))
+ case *Oneof_F_Fixed32:
+ b.EncodeVarint(4<<3 | proto.WireFixed32)
+ b.EncodeFixed32(uint64(x.F_Fixed32))
+ case *Oneof_F_Fixed64:
+ b.EncodeVarint(5<<3 | proto.WireFixed64)
+ b.EncodeFixed64(uint64(x.F_Fixed64))
+ case *Oneof_F_Uint32:
+ b.EncodeVarint(6<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.F_Uint32))
+ case *Oneof_F_Uint64:
+ b.EncodeVarint(7<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.F_Uint64))
+ case *Oneof_F_Float:
+ b.EncodeVarint(8<<3 | proto.WireFixed32)
+ b.EncodeFixed32(uint64(math.Float32bits(x.F_Float)))
+ case *Oneof_F_Double:
+ b.EncodeVarint(9<<3 | proto.WireFixed64)
+ b.EncodeFixed64(math.Float64bits(x.F_Double))
+ case *Oneof_F_String:
+ b.EncodeVarint(10<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.F_String)
+ case *Oneof_F_Bytes:
+ b.EncodeVarint(11<<3 | proto.WireBytes)
+ b.EncodeRawBytes(x.F_Bytes)
+ case *Oneof_F_Sint32:
+ b.EncodeVarint(12<<3 | proto.WireVarint)
+ b.EncodeZigzag32(uint64(x.F_Sint32))
+ case *Oneof_F_Sint64:
+ b.EncodeVarint(13<<3 | proto.WireVarint)
+ b.EncodeZigzag64(uint64(x.F_Sint64))
+ case *Oneof_F_Enum:
+ b.EncodeVarint(14<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.F_Enum))
+ case *Oneof_F_Message:
+ b.EncodeVarint(15<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.F_Message); err != nil {
+ return err
+ }
+ case *Oneof_FGroup:
+ b.EncodeVarint(16<<3 | proto.WireStartGroup)
+ if err := b.Marshal(x.FGroup); err != nil {
+ return err
+ }
+ b.EncodeVarint(16<<3 | proto.WireEndGroup)
+ case *Oneof_F_Largest_Tag:
+ b.EncodeVarint(536870911<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.F_Largest_Tag))
+ case nil:
+ default:
+ return fmt.Errorf("Oneof.Union has unexpected type %T", x)
+ }
+ // tormato
+ switch x := m.Tormato.(type) {
+ case *Oneof_Value:
+ b.EncodeVarint(100<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Value))
+ case nil:
+ default:
+ return fmt.Errorf("Oneof.Tormato has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Oneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Oneof)
+ switch tag {
+ case 1: // union.F_Bool
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Oneof_F_Bool{x != 0}
+ return true, err
+ case 2: // union.F_Int32
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Oneof_F_Int32{int32(x)}
+ return true, err
+ case 3: // union.F_Int64
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Oneof_F_Int64{int64(x)}
+ return true, err
+ case 4: // union.F_Fixed32
+ if wire != proto.WireFixed32 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed32()
+ m.Union = &Oneof_F_Fixed32{uint32(x)}
+ return true, err
+ case 5: // union.F_Fixed64
+ if wire != proto.WireFixed64 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed64()
+ m.Union = &Oneof_F_Fixed64{x}
+ return true, err
+ case 6: // union.F_Uint32
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Oneof_F_Uint32{uint32(x)}
+ return true, err
+ case 7: // union.F_Uint64
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Oneof_F_Uint64{x}
+ return true, err
+ case 8: // union.F_Float
+ if wire != proto.WireFixed32 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed32()
+ m.Union = &Oneof_F_Float{math.Float32frombits(uint32(x))}
+ return true, err
+ case 9: // union.F_Double
+ if wire != proto.WireFixed64 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed64()
+ m.Union = &Oneof_F_Double{math.Float64frombits(x)}
+ return true, err
+ case 10: // union.F_String
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Union = &Oneof_F_String{x}
+ return true, err
+ case 11: // union.F_Bytes
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeRawBytes(true)
+ m.Union = &Oneof_F_Bytes{x}
+ return true, err
+ case 12: // union.F_Sint32
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeZigzag32()
+ m.Union = &Oneof_F_Sint32{int32(x)}
+ return true, err
+ case 13: // union.F_Sint64
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeZigzag64()
+ m.Union = &Oneof_F_Sint64{int64(x)}
+ return true, err
+ case 14: // union.F_Enum
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Oneof_F_Enum{MyMessage_Color(x)}
+ return true, err
+ case 15: // union.F_Message
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(GoTestField)
+ err := b.DecodeMessage(msg)
+ m.Union = &Oneof_F_Message{msg}
+ return true, err
+ case 16: // union.f_group
+ if wire != proto.WireStartGroup {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Oneof_F_Group)
+ err := b.DecodeGroup(msg)
+ m.Union = &Oneof_FGroup{msg}
+ return true, err
+ case 536870911: // union.F_Largest_Tag
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Oneof_F_Largest_Tag{int32(x)}
+ return true, err
+ case 100: // tormato.value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Tormato = &Oneof_Value{int32(x)}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Oneof_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Oneof)
+ // union
+ switch x := m.Union.(type) {
+ case *Oneof_F_Bool:
+ n += proto.SizeVarint(1<<3 | proto.WireVarint)
+ n += 1
+ case *Oneof_F_Int32:
+ n += proto.SizeVarint(2<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.F_Int32))
+ case *Oneof_F_Int64:
+ n += proto.SizeVarint(3<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.F_Int64))
+ case *Oneof_F_Fixed32:
+ n += proto.SizeVarint(4<<3 | proto.WireFixed32)
+ n += 4
+ case *Oneof_F_Fixed64:
+ n += proto.SizeVarint(5<<3 | proto.WireFixed64)
+ n += 8
+ case *Oneof_F_Uint32:
+ n += proto.SizeVarint(6<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.F_Uint32))
+ case *Oneof_F_Uint64:
+ n += proto.SizeVarint(7<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.F_Uint64))
+ case *Oneof_F_Float:
+ n += proto.SizeVarint(8<<3 | proto.WireFixed32)
+ n += 4
+ case *Oneof_F_Double:
+ n += proto.SizeVarint(9<<3 | proto.WireFixed64)
+ n += 8
+ case *Oneof_F_String:
+ n += proto.SizeVarint(10<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.F_String)))
+ n += len(x.F_String)
+ case *Oneof_F_Bytes:
+ n += proto.SizeVarint(11<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.F_Bytes)))
+ n += len(x.F_Bytes)
+ case *Oneof_F_Sint32:
+ n += proto.SizeVarint(12<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64((uint32(x.F_Sint32) << 1) ^ uint32((int32(x.F_Sint32) >> 31))))
+ case *Oneof_F_Sint64:
+ n += proto.SizeVarint(13<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(uint64(x.F_Sint64<<1) ^ uint64((int64(x.F_Sint64) >> 63))))
+ case *Oneof_F_Enum:
+ n += proto.SizeVarint(14<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.F_Enum))
+ case *Oneof_F_Message:
+ s := proto.Size(x.F_Message)
+ n += proto.SizeVarint(15<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Oneof_FGroup:
+ n += proto.SizeVarint(16<<3 | proto.WireStartGroup)
+ n += proto.Size(x.FGroup)
+ n += proto.SizeVarint(16<<3 | proto.WireEndGroup)
+ case *Oneof_F_Largest_Tag:
+ n += proto.SizeVarint(536870911<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.F_Largest_Tag))
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ // tormato
+ switch x := m.Tormato.(type) {
+ case *Oneof_Value:
+ n += proto.SizeVarint(100<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.Value))
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+type Oneof_F_Group struct {
+ X *int32 `protobuf:"varint,17,opt,name=x" json:"x,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Oneof_F_Group) Reset() { *m = Oneof_F_Group{} }
+func (m *Oneof_F_Group) String() string { return proto.CompactTextString(m) }
+func (*Oneof_F_Group) ProtoMessage() {}
+func (*Oneof_F_Group) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28, 0} }
+
+func (m *Oneof_F_Group) GetX() int32 {
+ if m != nil && m.X != nil {
+ return *m.X
+ }
+ return 0
+}
+
+type Communique struct {
+ MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"`
+ // This is a oneof, called "union".
+ //
+ // Types that are valid to be assigned to Union:
+ // *Communique_Number
+ // *Communique_Name
+ // *Communique_Data
+ // *Communique_TempC
+ // *Communique_Col
+ // *Communique_Msg
+ Union isCommunique_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Communique) Reset() { *m = Communique{} }
+func (m *Communique) String() string { return proto.CompactTextString(m) }
+func (*Communique) ProtoMessage() {}
+func (*Communique) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
+
+type isCommunique_Union interface {
+ isCommunique_Union()
+}
+
+type Communique_Number struct {
+ Number int32 `protobuf:"varint,5,opt,name=number,oneof"`
+}
+type Communique_Name struct {
+ Name string `protobuf:"bytes,6,opt,name=name,oneof"`
+}
+type Communique_Data struct {
+ Data []byte `protobuf:"bytes,7,opt,name=data,oneof"`
+}
+type Communique_TempC struct {
+ TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"`
+}
+type Communique_Col struct {
+ Col MyMessage_Color `protobuf:"varint,9,opt,name=col,enum=testdata.MyMessage_Color,oneof"`
+}
+type Communique_Msg struct {
+ Msg *Strings `protobuf:"bytes,10,opt,name=msg,oneof"`
+}
+
+func (*Communique_Number) isCommunique_Union() {}
+func (*Communique_Name) isCommunique_Union() {}
+func (*Communique_Data) isCommunique_Union() {}
+func (*Communique_TempC) isCommunique_Union() {}
+func (*Communique_Col) isCommunique_Union() {}
+func (*Communique_Msg) isCommunique_Union() {}
+
+func (m *Communique) GetUnion() isCommunique_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+}
+
+func (m *Communique) GetMakeMeCry() bool {
+ if m != nil && m.MakeMeCry != nil {
+ return *m.MakeMeCry
+ }
+ return false
+}
+
+func (m *Communique) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Communique_Number); ok {
+ return x.Number
+ }
+ return 0
+}
+
+func (m *Communique) GetName() string {
+ if x, ok := m.GetUnion().(*Communique_Name); ok {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *Communique) GetData() []byte {
+ if x, ok := m.GetUnion().(*Communique_Data); ok {
+ return x.Data
+ }
+ return nil
+}
+
+func (m *Communique) GetTempC() float64 {
+ if x, ok := m.GetUnion().(*Communique_TempC); ok {
+ return x.TempC
+ }
+ return 0
+}
+
+func (m *Communique) GetCol() MyMessage_Color {
+ if x, ok := m.GetUnion().(*Communique_Col); ok {
+ return x.Col
+ }
+ return MyMessage_RED
+}
+
+func (m *Communique) GetMsg() *Strings {
+ if x, ok := m.GetUnion().(*Communique_Msg); ok {
+ return x.Msg
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{
+ (*Communique_Number)(nil),
+ (*Communique_Name)(nil),
+ (*Communique_Data)(nil),
+ (*Communique_TempC)(nil),
+ (*Communique_Col)(nil),
+ (*Communique_Msg)(nil),
+ }
+}
+
+func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Communique)
+ // union
+ switch x := m.Union.(type) {
+ case *Communique_Number:
+ b.EncodeVarint(5<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Number))
+ case *Communique_Name:
+ b.EncodeVarint(6<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.Name)
+ case *Communique_Data:
+ b.EncodeVarint(7<<3 | proto.WireBytes)
+ b.EncodeRawBytes(x.Data)
+ case *Communique_TempC:
+ b.EncodeVarint(8<<3 | proto.WireFixed64)
+ b.EncodeFixed64(math.Float64bits(x.TempC))
+ case *Communique_Col:
+ b.EncodeVarint(9<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Col))
+ case *Communique_Msg:
+ b.EncodeVarint(10<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Msg); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("Communique.Union has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Communique)
+ switch tag {
+ case 5: // union.number
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Communique_Number{int32(x)}
+ return true, err
+ case 6: // union.name
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Union = &Communique_Name{x}
+ return true, err
+ case 7: // union.data
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeRawBytes(true)
+ m.Union = &Communique_Data{x}
+ return true, err
+ case 8: // union.temp_c
+ if wire != proto.WireFixed64 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed64()
+ m.Union = &Communique_TempC{math.Float64frombits(x)}
+ return true, err
+ case 9: // union.col
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Communique_Col{MyMessage_Color(x)}
+ return true, err
+ case 10: // union.msg
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Strings)
+ err := b.DecodeMessage(msg)
+ m.Union = &Communique_Msg{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Communique_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Communique)
+ // union
+ switch x := m.Union.(type) {
+ case *Communique_Number:
+ n += proto.SizeVarint(5<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.Number))
+ case *Communique_Name:
+ n += proto.SizeVarint(6<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.Name)))
+ n += len(x.Name)
+ case *Communique_Data:
+ n += proto.SizeVarint(7<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.Data)))
+ n += len(x.Data)
+ case *Communique_TempC:
+ n += proto.SizeVarint(8<<3 | proto.WireFixed64)
+ n += 8
+ case *Communique_Col:
+ n += proto.SizeVarint(9<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.Col))
+ case *Communique_Msg:
+ s := proto.Size(x.Msg)
+ n += proto.SizeVarint(10<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+var E_Greeting = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessage)(nil),
+ ExtensionType: ([]string)(nil),
+ Field: 106,
+ Name: "testdata.greeting",
+ Tag: "bytes,106,rep,name=greeting",
+}
+
+var E_Complex = &proto.ExtensionDesc{
+ ExtendedType: (*OtherMessage)(nil),
+ ExtensionType: (*ComplexExtension)(nil),
+ Field: 200,
+ Name: "testdata.complex",
+ Tag: "bytes,200,opt,name=complex",
+}
+
+var E_RComplex = &proto.ExtensionDesc{
+ ExtendedType: (*OtherMessage)(nil),
+ ExtensionType: ([]*ComplexExtension)(nil),
+ Field: 201,
+ Name: "testdata.r_complex",
+ Tag: "bytes,201,rep,name=r_complex,json=rComplex",
+}
+
+var E_NoDefaultDouble = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*float64)(nil),
+ Field: 101,
+ Name: "testdata.no_default_double",
+ Tag: "fixed64,101,opt,name=no_default_double,json=noDefaultDouble",
+}
+
+var E_NoDefaultFloat = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*float32)(nil),
+ Field: 102,
+ Name: "testdata.no_default_float",
+ Tag: "fixed32,102,opt,name=no_default_float,json=noDefaultFloat",
+}
+
+var E_NoDefaultInt32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int32)(nil),
+ Field: 103,
+ Name: "testdata.no_default_int32",
+ Tag: "varint,103,opt,name=no_default_int32,json=noDefaultInt32",
+}
+
+var E_NoDefaultInt64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int64)(nil),
+ Field: 104,
+ Name: "testdata.no_default_int64",
+ Tag: "varint,104,opt,name=no_default_int64,json=noDefaultInt64",
+}
+
+var E_NoDefaultUint32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint32)(nil),
+ Field: 105,
+ Name: "testdata.no_default_uint32",
+ Tag: "varint,105,opt,name=no_default_uint32,json=noDefaultUint32",
+}
+
+var E_NoDefaultUint64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint64)(nil),
+ Field: 106,
+ Name: "testdata.no_default_uint64",
+ Tag: "varint,106,opt,name=no_default_uint64,json=noDefaultUint64",
+}
+
+var E_NoDefaultSint32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int32)(nil),
+ Field: 107,
+ Name: "testdata.no_default_sint32",
+ Tag: "zigzag32,107,opt,name=no_default_sint32,json=noDefaultSint32",
+}
+
+var E_NoDefaultSint64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int64)(nil),
+ Field: 108,
+ Name: "testdata.no_default_sint64",
+ Tag: "zigzag64,108,opt,name=no_default_sint64,json=noDefaultSint64",
+}
+
+var E_NoDefaultFixed32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint32)(nil),
+ Field: 109,
+ Name: "testdata.no_default_fixed32",
+ Tag: "fixed32,109,opt,name=no_default_fixed32,json=noDefaultFixed32",
+}
+
+var E_NoDefaultFixed64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint64)(nil),
+ Field: 110,
+ Name: "testdata.no_default_fixed64",
+ Tag: "fixed64,110,opt,name=no_default_fixed64,json=noDefaultFixed64",
+}
+
+var E_NoDefaultSfixed32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int32)(nil),
+ Field: 111,
+ Name: "testdata.no_default_sfixed32",
+ Tag: "fixed32,111,opt,name=no_default_sfixed32,json=noDefaultSfixed32",
+}
+
+var E_NoDefaultSfixed64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int64)(nil),
+ Field: 112,
+ Name: "testdata.no_default_sfixed64",
+ Tag: "fixed64,112,opt,name=no_default_sfixed64,json=noDefaultSfixed64",
+}
+
+var E_NoDefaultBool = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 113,
+ Name: "testdata.no_default_bool",
+ Tag: "varint,113,opt,name=no_default_bool,json=noDefaultBool",
+}
+
+var E_NoDefaultString = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 114,
+ Name: "testdata.no_default_string",
+ Tag: "bytes,114,opt,name=no_default_string,json=noDefaultString",
+}
+
+var E_NoDefaultBytes = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: ([]byte)(nil),
+ Field: 115,
+ Name: "testdata.no_default_bytes",
+ Tag: "bytes,115,opt,name=no_default_bytes,json=noDefaultBytes",
+}
+
+var E_NoDefaultEnum = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil),
+ Field: 116,
+ Name: "testdata.no_default_enum",
+ Tag: "varint,116,opt,name=no_default_enum,json=noDefaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum",
+}
+
+var E_DefaultDouble = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*float64)(nil),
+ Field: 201,
+ Name: "testdata.default_double",
+ Tag: "fixed64,201,opt,name=default_double,json=defaultDouble,def=3.1415",
+}
+
+var E_DefaultFloat = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*float32)(nil),
+ Field: 202,
+ Name: "testdata.default_float",
+ Tag: "fixed32,202,opt,name=default_float,json=defaultFloat,def=3.14",
+}
+
+var E_DefaultInt32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int32)(nil),
+ Field: 203,
+ Name: "testdata.default_int32",
+ Tag: "varint,203,opt,name=default_int32,json=defaultInt32,def=42",
+}
+
+var E_DefaultInt64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int64)(nil),
+ Field: 204,
+ Name: "testdata.default_int64",
+ Tag: "varint,204,opt,name=default_int64,json=defaultInt64,def=43",
+}
+
+var E_DefaultUint32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint32)(nil),
+ Field: 205,
+ Name: "testdata.default_uint32",
+ Tag: "varint,205,opt,name=default_uint32,json=defaultUint32,def=44",
+}
+
+var E_DefaultUint64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint64)(nil),
+ Field: 206,
+ Name: "testdata.default_uint64",
+ Tag: "varint,206,opt,name=default_uint64,json=defaultUint64,def=45",
+}
+
+var E_DefaultSint32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int32)(nil),
+ Field: 207,
+ Name: "testdata.default_sint32",
+ Tag: "zigzag32,207,opt,name=default_sint32,json=defaultSint32,def=46",
+}
+
+var E_DefaultSint64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int64)(nil),
+ Field: 208,
+ Name: "testdata.default_sint64",
+ Tag: "zigzag64,208,opt,name=default_sint64,json=defaultSint64,def=47",
+}
+
+var E_DefaultFixed32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint32)(nil),
+ Field: 209,
+ Name: "testdata.default_fixed32",
+ Tag: "fixed32,209,opt,name=default_fixed32,json=defaultFixed32,def=48",
+}
+
+var E_DefaultFixed64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint64)(nil),
+ Field: 210,
+ Name: "testdata.default_fixed64",
+ Tag: "fixed64,210,opt,name=default_fixed64,json=defaultFixed64,def=49",
+}
+
+var E_DefaultSfixed32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int32)(nil),
+ Field: 211,
+ Name: "testdata.default_sfixed32",
+ Tag: "fixed32,211,opt,name=default_sfixed32,json=defaultSfixed32,def=50",
+}
+
+var E_DefaultSfixed64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int64)(nil),
+ Field: 212,
+ Name: "testdata.default_sfixed64",
+ Tag: "fixed64,212,opt,name=default_sfixed64,json=defaultSfixed64,def=51",
+}
+
+var E_DefaultBool = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 213,
+ Name: "testdata.default_bool",
+ Tag: "varint,213,opt,name=default_bool,json=defaultBool,def=1",
+}
+
+var E_DefaultString = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 214,
+ Name: "testdata.default_string",
+ Tag: "bytes,214,opt,name=default_string,json=defaultString,def=Hello, string",
+}
+
+var E_DefaultBytes = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: ([]byte)(nil),
+ Field: 215,
+ Name: "testdata.default_bytes",
+ Tag: "bytes,215,opt,name=default_bytes,json=defaultBytes,def=Hello, bytes",
+}
+
+var E_DefaultEnum = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil),
+ Field: 216,
+ Name: "testdata.default_enum",
+ Tag: "varint,216,opt,name=default_enum,json=defaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum,def=1",
+}
+
+var E_X201 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 201,
+ Name: "testdata.x201",
+ Tag: "bytes,201,opt,name=x201",
+}
+
+var E_X202 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 202,
+ Name: "testdata.x202",
+ Tag: "bytes,202,opt,name=x202",
+}
+
+var E_X203 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 203,
+ Name: "testdata.x203",
+ Tag: "bytes,203,opt,name=x203",
+}
+
+var E_X204 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 204,
+ Name: "testdata.x204",
+ Tag: "bytes,204,opt,name=x204",
+}
+
+var E_X205 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 205,
+ Name: "testdata.x205",
+ Tag: "bytes,205,opt,name=x205",
+}
+
+var E_X206 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 206,
+ Name: "testdata.x206",
+ Tag: "bytes,206,opt,name=x206",
+}
+
+var E_X207 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 207,
+ Name: "testdata.x207",
+ Tag: "bytes,207,opt,name=x207",
+}
+
+var E_X208 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 208,
+ Name: "testdata.x208",
+ Tag: "bytes,208,opt,name=x208",
+}
+
+var E_X209 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 209,
+ Name: "testdata.x209",
+ Tag: "bytes,209,opt,name=x209",
+}
+
+var E_X210 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 210,
+ Name: "testdata.x210",
+ Tag: "bytes,210,opt,name=x210",
+}
+
+var E_X211 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 211,
+ Name: "testdata.x211",
+ Tag: "bytes,211,opt,name=x211",
+}
+
+var E_X212 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 212,
+ Name: "testdata.x212",
+ Tag: "bytes,212,opt,name=x212",
+}
+
+var E_X213 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 213,
+ Name: "testdata.x213",
+ Tag: "bytes,213,opt,name=x213",
+}
+
+var E_X214 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 214,
+ Name: "testdata.x214",
+ Tag: "bytes,214,opt,name=x214",
+}
+
+var E_X215 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 215,
+ Name: "testdata.x215",
+ Tag: "bytes,215,opt,name=x215",
+}
+
+var E_X216 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 216,
+ Name: "testdata.x216",
+ Tag: "bytes,216,opt,name=x216",
+}
+
+var E_X217 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 217,
+ Name: "testdata.x217",
+ Tag: "bytes,217,opt,name=x217",
+}
+
+var E_X218 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 218,
+ Name: "testdata.x218",
+ Tag: "bytes,218,opt,name=x218",
+}
+
+var E_X219 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 219,
+ Name: "testdata.x219",
+ Tag: "bytes,219,opt,name=x219",
+}
+
+var E_X220 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 220,
+ Name: "testdata.x220",
+ Tag: "bytes,220,opt,name=x220",
+}
+
+var E_X221 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 221,
+ Name: "testdata.x221",
+ Tag: "bytes,221,opt,name=x221",
+}
+
+var E_X222 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 222,
+ Name: "testdata.x222",
+ Tag: "bytes,222,opt,name=x222",
+}
+
+var E_X223 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 223,
+ Name: "testdata.x223",
+ Tag: "bytes,223,opt,name=x223",
+}
+
+var E_X224 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 224,
+ Name: "testdata.x224",
+ Tag: "bytes,224,opt,name=x224",
+}
+
+var E_X225 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 225,
+ Name: "testdata.x225",
+ Tag: "bytes,225,opt,name=x225",
+}
+
+var E_X226 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 226,
+ Name: "testdata.x226",
+ Tag: "bytes,226,opt,name=x226",
+}
+
+var E_X227 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 227,
+ Name: "testdata.x227",
+ Tag: "bytes,227,opt,name=x227",
+}
+
+var E_X228 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 228,
+ Name: "testdata.x228",
+ Tag: "bytes,228,opt,name=x228",
+}
+
+var E_X229 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 229,
+ Name: "testdata.x229",
+ Tag: "bytes,229,opt,name=x229",
+}
+
+var E_X230 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 230,
+ Name: "testdata.x230",
+ Tag: "bytes,230,opt,name=x230",
+}
+
+var E_X231 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 231,
+ Name: "testdata.x231",
+ Tag: "bytes,231,opt,name=x231",
+}
+
+var E_X232 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 232,
+ Name: "testdata.x232",
+ Tag: "bytes,232,opt,name=x232",
+}
+
+var E_X233 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 233,
+ Name: "testdata.x233",
+ Tag: "bytes,233,opt,name=x233",
+}
+
+var E_X234 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 234,
+ Name: "testdata.x234",
+ Tag: "bytes,234,opt,name=x234",
+}
+
+var E_X235 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 235,
+ Name: "testdata.x235",
+ Tag: "bytes,235,opt,name=x235",
+}
+
+var E_X236 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 236,
+ Name: "testdata.x236",
+ Tag: "bytes,236,opt,name=x236",
+}
+
+var E_X237 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 237,
+ Name: "testdata.x237",
+ Tag: "bytes,237,opt,name=x237",
+}
+
+var E_X238 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 238,
+ Name: "testdata.x238",
+ Tag: "bytes,238,opt,name=x238",
+}
+
+var E_X239 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 239,
+ Name: "testdata.x239",
+ Tag: "bytes,239,opt,name=x239",
+}
+
+var E_X240 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 240,
+ Name: "testdata.x240",
+ Tag: "bytes,240,opt,name=x240",
+}
+
+var E_X241 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 241,
+ Name: "testdata.x241",
+ Tag: "bytes,241,opt,name=x241",
+}
+
+var E_X242 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 242,
+ Name: "testdata.x242",
+ Tag: "bytes,242,opt,name=x242",
+}
+
+var E_X243 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 243,
+ Name: "testdata.x243",
+ Tag: "bytes,243,opt,name=x243",
+}
+
+var E_X244 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 244,
+ Name: "testdata.x244",
+ Tag: "bytes,244,opt,name=x244",
+}
+
+var E_X245 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 245,
+ Name: "testdata.x245",
+ Tag: "bytes,245,opt,name=x245",
+}
+
+var E_X246 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 246,
+ Name: "testdata.x246",
+ Tag: "bytes,246,opt,name=x246",
+}
+
+var E_X247 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 247,
+ Name: "testdata.x247",
+ Tag: "bytes,247,opt,name=x247",
+}
+
+var E_X248 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 248,
+ Name: "testdata.x248",
+ Tag: "bytes,248,opt,name=x248",
+}
+
+var E_X249 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 249,
+ Name: "testdata.x249",
+ Tag: "bytes,249,opt,name=x249",
+}
+
+var E_X250 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 250,
+ Name: "testdata.x250",
+ Tag: "bytes,250,opt,name=x250",
+}
+
+func init() {
+ proto.RegisterType((*GoEnum)(nil), "testdata.GoEnum")
+ proto.RegisterType((*GoTestField)(nil), "testdata.GoTestField")
+ proto.RegisterType((*GoTest)(nil), "testdata.GoTest")
+ proto.RegisterType((*GoTest_RequiredGroup)(nil), "testdata.GoTest.RequiredGroup")
+ proto.RegisterType((*GoTest_RepeatedGroup)(nil), "testdata.GoTest.RepeatedGroup")
+ proto.RegisterType((*GoTest_OptionalGroup)(nil), "testdata.GoTest.OptionalGroup")
+ proto.RegisterType((*GoSkipTest)(nil), "testdata.GoSkipTest")
+ proto.RegisterType((*GoSkipTest_SkipGroup)(nil), "testdata.GoSkipTest.SkipGroup")
+ proto.RegisterType((*NonPackedTest)(nil), "testdata.NonPackedTest")
+ proto.RegisterType((*PackedTest)(nil), "testdata.PackedTest")
+ proto.RegisterType((*MaxTag)(nil), "testdata.MaxTag")
+ proto.RegisterType((*OldMessage)(nil), "testdata.OldMessage")
+ proto.RegisterType((*OldMessage_Nested)(nil), "testdata.OldMessage.Nested")
+ proto.RegisterType((*NewMessage)(nil), "testdata.NewMessage")
+ proto.RegisterType((*NewMessage_Nested)(nil), "testdata.NewMessage.Nested")
+ proto.RegisterType((*InnerMessage)(nil), "testdata.InnerMessage")
+ proto.RegisterType((*OtherMessage)(nil), "testdata.OtherMessage")
+ proto.RegisterType((*RequiredInnerMessage)(nil), "testdata.RequiredInnerMessage")
+ proto.RegisterType((*MyMessage)(nil), "testdata.MyMessage")
+ proto.RegisterType((*MyMessage_SomeGroup)(nil), "testdata.MyMessage.SomeGroup")
+ proto.RegisterType((*Ext)(nil), "testdata.Ext")
+ proto.RegisterType((*ComplexExtension)(nil), "testdata.ComplexExtension")
+ proto.RegisterType((*DefaultsMessage)(nil), "testdata.DefaultsMessage")
+ proto.RegisterType((*MyMessageSet)(nil), "testdata.MyMessageSet")
+ proto.RegisterType((*Empty)(nil), "testdata.Empty")
+ proto.RegisterType((*MessageList)(nil), "testdata.MessageList")
+ proto.RegisterType((*MessageList_Message)(nil), "testdata.MessageList.Message")
+ proto.RegisterType((*Strings)(nil), "testdata.Strings")
+ proto.RegisterType((*Defaults)(nil), "testdata.Defaults")
+ proto.RegisterType((*SubDefaults)(nil), "testdata.SubDefaults")
+ proto.RegisterType((*RepeatedEnum)(nil), "testdata.RepeatedEnum")
+ proto.RegisterType((*MoreRepeated)(nil), "testdata.MoreRepeated")
+ proto.RegisterType((*GroupOld)(nil), "testdata.GroupOld")
+ proto.RegisterType((*GroupOld_G)(nil), "testdata.GroupOld.G")
+ proto.RegisterType((*GroupNew)(nil), "testdata.GroupNew")
+ proto.RegisterType((*GroupNew_G)(nil), "testdata.GroupNew.G")
+ proto.RegisterType((*FloatingPoint)(nil), "testdata.FloatingPoint")
+ proto.RegisterType((*MessageWithMap)(nil), "testdata.MessageWithMap")
+ proto.RegisterType((*Oneof)(nil), "testdata.Oneof")
+ proto.RegisterType((*Oneof_F_Group)(nil), "testdata.Oneof.F_Group")
+ proto.RegisterType((*Communique)(nil), "testdata.Communique")
+ proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value)
+ proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value)
+ proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value)
+ proto.RegisterEnum("testdata.DefaultsMessage_DefaultsEnum", DefaultsMessage_DefaultsEnum_name, DefaultsMessage_DefaultsEnum_value)
+ proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value)
+ proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value)
+ proto.RegisterExtension(E_Ext_More)
+ proto.RegisterExtension(E_Ext_Text)
+ proto.RegisterExtension(E_Ext_Number)
+ proto.RegisterExtension(E_Greeting)
+ proto.RegisterExtension(E_Complex)
+ proto.RegisterExtension(E_RComplex)
+ proto.RegisterExtension(E_NoDefaultDouble)
+ proto.RegisterExtension(E_NoDefaultFloat)
+ proto.RegisterExtension(E_NoDefaultInt32)
+ proto.RegisterExtension(E_NoDefaultInt64)
+ proto.RegisterExtension(E_NoDefaultUint32)
+ proto.RegisterExtension(E_NoDefaultUint64)
+ proto.RegisterExtension(E_NoDefaultSint32)
+ proto.RegisterExtension(E_NoDefaultSint64)
+ proto.RegisterExtension(E_NoDefaultFixed32)
+ proto.RegisterExtension(E_NoDefaultFixed64)
+ proto.RegisterExtension(E_NoDefaultSfixed32)
+ proto.RegisterExtension(E_NoDefaultSfixed64)
+ proto.RegisterExtension(E_NoDefaultBool)
+ proto.RegisterExtension(E_NoDefaultString)
+ proto.RegisterExtension(E_NoDefaultBytes)
+ proto.RegisterExtension(E_NoDefaultEnum)
+ proto.RegisterExtension(E_DefaultDouble)
+ proto.RegisterExtension(E_DefaultFloat)
+ proto.RegisterExtension(E_DefaultInt32)
+ proto.RegisterExtension(E_DefaultInt64)
+ proto.RegisterExtension(E_DefaultUint32)
+ proto.RegisterExtension(E_DefaultUint64)
+ proto.RegisterExtension(E_DefaultSint32)
+ proto.RegisterExtension(E_DefaultSint64)
+ proto.RegisterExtension(E_DefaultFixed32)
+ proto.RegisterExtension(E_DefaultFixed64)
+ proto.RegisterExtension(E_DefaultSfixed32)
+ proto.RegisterExtension(E_DefaultSfixed64)
+ proto.RegisterExtension(E_DefaultBool)
+ proto.RegisterExtension(E_DefaultString)
+ proto.RegisterExtension(E_DefaultBytes)
+ proto.RegisterExtension(E_DefaultEnum)
+ proto.RegisterExtension(E_X201)
+ proto.RegisterExtension(E_X202)
+ proto.RegisterExtension(E_X203)
+ proto.RegisterExtension(E_X204)
+ proto.RegisterExtension(E_X205)
+ proto.RegisterExtension(E_X206)
+ proto.RegisterExtension(E_X207)
+ proto.RegisterExtension(E_X208)
+ proto.RegisterExtension(E_X209)
+ proto.RegisterExtension(E_X210)
+ proto.RegisterExtension(E_X211)
+ proto.RegisterExtension(E_X212)
+ proto.RegisterExtension(E_X213)
+ proto.RegisterExtension(E_X214)
+ proto.RegisterExtension(E_X215)
+ proto.RegisterExtension(E_X216)
+ proto.RegisterExtension(E_X217)
+ proto.RegisterExtension(E_X218)
+ proto.RegisterExtension(E_X219)
+ proto.RegisterExtension(E_X220)
+ proto.RegisterExtension(E_X221)
+ proto.RegisterExtension(E_X222)
+ proto.RegisterExtension(E_X223)
+ proto.RegisterExtension(E_X224)
+ proto.RegisterExtension(E_X225)
+ proto.RegisterExtension(E_X226)
+ proto.RegisterExtension(E_X227)
+ proto.RegisterExtension(E_X228)
+ proto.RegisterExtension(E_X229)
+ proto.RegisterExtension(E_X230)
+ proto.RegisterExtension(E_X231)
+ proto.RegisterExtension(E_X232)
+ proto.RegisterExtension(E_X233)
+ proto.RegisterExtension(E_X234)
+ proto.RegisterExtension(E_X235)
+ proto.RegisterExtension(E_X236)
+ proto.RegisterExtension(E_X237)
+ proto.RegisterExtension(E_X238)
+ proto.RegisterExtension(E_X239)
+ proto.RegisterExtension(E_X240)
+ proto.RegisterExtension(E_X241)
+ proto.RegisterExtension(E_X242)
+ proto.RegisterExtension(E_X243)
+ proto.RegisterExtension(E_X244)
+ proto.RegisterExtension(E_X245)
+ proto.RegisterExtension(E_X246)
+ proto.RegisterExtension(E_X247)
+ proto.RegisterExtension(E_X248)
+ proto.RegisterExtension(E_X249)
+ proto.RegisterExtension(E_X250)
+}
+
+var fileDescriptor0 = []byte{
+ // 4407 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x5a, 0x59, 0x77, 0xdb, 0x48,
+ 0x76, 0x36, 0xc0, 0xfd, 0x92, 0x12, 0xa1, 0xb2, 0xda, 0x4d, 0x4b, 0x5e, 0x60, 0xce, 0x74, 0x37,
+ 0xbd, 0x69, 0x24, 0x10, 0xa2, 0x6d, 0xba, 0xd3, 0xe7, 0x78, 0xa1, 0x64, 0x9d, 0xb1, 0x44, 0x05,
+ 0x52, 0x77, 0x9f, 0xe9, 0x3c, 0xf0, 0x50, 0x22, 0x48, 0xb3, 0x4d, 0x02, 0x34, 0x09, 0xc5, 0x52,
+ 0xf2, 0xd2, 0x2f, 0xc9, 0x6b, 0xb6, 0x97, 0xbc, 0xe6, 0x29, 0x4f, 0x49, 0xce, 0xc9, 0x9f, 0x48,
+ 0xba, 0x7b, 0xd6, 0x9e, 0x35, 0xeb, 0x64, 0x5f, 0x26, 0xfb, 0x36, 0x93, 0xe4, 0xa5, 0xe7, 0xd4,
+ 0xad, 0x02, 0x50, 0x00, 0x09, 0x48, 0x7e, 0x12, 0x51, 0xf5, 0x7d, 0xb7, 0x6e, 0x15, 0xbe, 0xba,
+ 0xb7, 0x6e, 0x41, 0x00, 0x8e, 0x39, 0x71, 0x56, 0x46, 0x63, 0xdb, 0xb1, 0x49, 0x96, 0xfe, 0xee,
+ 0xb4, 0x9d, 0x76, 0xf9, 0x3a, 0xa4, 0x37, 0xed, 0x86, 0x75, 0x34, 0x24, 0x57, 0x21, 0xd1, 0xb5,
+ 0xed, 0x92, 0xa4, 0xca, 0x95, 0x79, 0x6d, 0x6e, 0xc5, 0x45, 0xac, 0x6c, 0x34, 0x9b, 0x06, 0xed,
+ 0x29, 0xdf, 0x81, 0xfc, 0xa6, 0xbd, 0x6f, 0x4e, 0x9c, 0x8d, 0xbe, 0x39, 0xe8, 0x90, 0x45, 0x48,
+ 0x3d, 0x6d, 0x1f, 0x98, 0x03, 0x64, 0xe4, 0x8c, 0xd4, 0x80, 0x3e, 0x10, 0x02, 0xc9, 0xfd, 0x93,
+ 0x91, 0x59, 0x92, 0xb1, 0x31, 0xe9, 0x9c, 0x8c, 0xcc, 0xf2, 0xaf, 0x5c, 0xa1, 0x83, 0x50, 0x26,
+ 0xb9, 0x0e, 0xc9, 0x2f, 0xf7, 0xad, 0x0e, 0x1f, 0xe5, 0x35, 0x7f, 0x14, 0xd6, 0xbf, 0xf2, 0xe5,
+ 0xad, 0x9d, 0xc7, 0x46, 0xf2, 0x79, 0xdf, 0x42, 0xfb, 0xfb, 0xed, 0x83, 0x01, 0x35, 0x25, 0x51,
+ 0xfb, 0x0e, 0x7d, 0xa0, 0xad, 0xbb, 0xed, 0x71, 0x7b, 0x58, 0x4a, 0xa8, 0x52, 0x25, 0x65, 0xa4,
+ 0x46, 0xf4, 0x81, 0xdc, 0x87, 0x39, 0xc3, 0x7c, 0x71, 0xd4, 0x1f, 0x9b, 0x1d, 0x74, 0xae, 0x94,
+ 0x54, 0xe5, 0x4a, 0x7e, 0xda, 0x3e, 0x76, 0x1a, 0x73, 0x63, 0x11, 0xcb, 0xc8, 0x23, 0xb3, 0xed,
+ 0xb8, 0xe4, 0x94, 0x9a, 0x88, 0x25, 0x0b, 0x58, 0x4a, 0x6e, 0x8e, 0x9c, 0xbe, 0x6d, 0xb5, 0x07,
+ 0x8c, 0x9c, 0x56, 0xa5, 0x18, 0xb2, 0x2d, 0x62, 0xc9, 0x9b, 0x50, 0xdc, 0x68, 0x3d, 0xb4, 0xed,
+ 0x41, 0xcb, 0xf5, 0xa8, 0x04, 0xaa, 0x5c, 0xc9, 0x1a, 0x73, 0x5d, 0xda, 0xea, 0x4e, 0x89, 0x54,
+ 0x40, 0xd9, 0x68, 0x6d, 0x59, 0x4e, 0x55, 0xf3, 0x81, 0x79, 0x55, 0xae, 0xa4, 0x8c, 0xf9, 0x2e,
+ 0x36, 0x4f, 0x21, 0x6b, 0xba, 0x8f, 0x2c, 0xa8, 0x72, 0x25, 0xc1, 0x90, 0x35, 0xdd, 0x43, 0xde,
+ 0x02, 0xb2, 0xd1, 0xda, 0xe8, 0x1f, 0x9b, 0x1d, 0xd1, 0xea, 0x9c, 0x2a, 0x57, 0x32, 0x86, 0xd2,
+ 0xe5, 0x1d, 0x33, 0xd0, 0xa2, 0xe5, 0x79, 0x55, 0xae, 0xa4, 0x5d, 0xb4, 0x60, 0xfb, 0x06, 0x2c,
+ 0x6c, 0xb4, 0xde, 0xed, 0x07, 0x1d, 0x2e, 0xaa, 0x72, 0x65, 0xce, 0x28, 0x76, 0x59, 0xfb, 0x34,
+ 0x56, 0x34, 0xac, 0xa8, 0x72, 0x25, 0xc9, 0xb1, 0x82, 0x5d, 0x9c, 0xdd, 0xc6, 0xc0, 0x6e, 0x3b,
+ 0x3e, 0x74, 0x41, 0x95, 0x2b, 0xb2, 0x31, 0xdf, 0xc5, 0xe6, 0xa0, 0xd5, 0xc7, 0xf6, 0xd1, 0xc1,
+ 0xc0, 0xf4, 0xa1, 0x44, 0x95, 0x2b, 0x92, 0x51, 0xec, 0xb2, 0xf6, 0x20, 0x76, 0xcf, 0x19, 0xf7,
+ 0xad, 0x9e, 0x8f, 0x3d, 0x8f, 0xfa, 0x2d, 0x76, 0x59, 0x7b, 0xd0, 0x83, 0x87, 0x27, 0x8e, 0x39,
+ 0xf1, 0xa1, 0xa6, 0x2a, 0x57, 0x0a, 0xc6, 0x7c, 0x17, 0x9b, 0x43, 0x56, 0x43, 0x6b, 0xd0, 0x55,
+ 0xe5, 0xca, 0x02, 0xb5, 0x3a, 0x63, 0x0d, 0xf6, 0x42, 0x6b, 0xd0, 0x53, 0xe5, 0x0a, 0xe1, 0x58,
+ 0x61, 0x0d, 0x44, 0xcd, 0x30, 0x21, 0x96, 0x16, 0xd5, 0x84, 0xa0, 0x19, 0xd6, 0x18, 0xd4, 0x0c,
+ 0x07, 0xbe, 0xa6, 0x26, 0x44, 0xcd, 0x84, 0x90, 0x38, 0x38, 0x47, 0x5e, 0x50, 0x13, 0xa2, 0x66,
+ 0x38, 0x32, 0xa4, 0x19, 0x8e, 0x7d, 0x5d, 0x4d, 0x04, 0x35, 0x33, 0x85, 0x16, 0x2d, 0x97, 0xd4,
+ 0x44, 0x50, 0x33, 0x1c, 0x1d, 0xd4, 0x0c, 0x07, 0x5f, 0x54, 0x13, 0x01, 0xcd, 0x84, 0xb1, 0xa2,
+ 0xe1, 0x25, 0x35, 0x11, 0xd0, 0x8c, 0x38, 0x3b, 0x57, 0x33, 0x1c, 0xba, 0xac, 0x26, 0x44, 0xcd,
+ 0x88, 0x56, 0x3d, 0xcd, 0x70, 0xe8, 0x25, 0x35, 0x11, 0xd0, 0x8c, 0x88, 0xf5, 0x34, 0xc3, 0xb1,
+ 0x97, 0xd5, 0x44, 0x40, 0x33, 0x1c, 0x7b, 0x5d, 0xd4, 0x0c, 0x87, 0x7e, 0x2c, 0xa9, 0x09, 0x51,
+ 0x34, 0x1c, 0x7a, 0x33, 0x20, 0x1a, 0x8e, 0xfd, 0x84, 0x62, 0x45, 0xd5, 0x84, 0xc1, 0xe2, 0x2a,
+ 0x7c, 0x4a, 0xc1, 0xa2, 0x6c, 0x38, 0xd8, 0x97, 0x8d, 0x1b, 0x82, 0x4a, 0x57, 0x54, 0xc9, 0x93,
+ 0x8d, 0x1b, 0xc3, 0x44, 0xd9, 0x78, 0xc0, 0xab, 0x18, 0x6a, 0xb9, 0x6c, 0xa6, 0x90, 0x35, 0xdd,
+ 0x47, 0xaa, 0xaa, 0xe4, 0xcb, 0xc6, 0x43, 0x06, 0x64, 0xe3, 0x61, 0xaf, 0xa9, 0x92, 0x28, 0x9b,
+ 0x19, 0x68, 0xd1, 0x72, 0x59, 0x95, 0x44, 0xd9, 0x78, 0x68, 0x51, 0x36, 0x1e, 0xf8, 0x0b, 0xaa,
+ 0x24, 0xc8, 0x66, 0x1a, 0x2b, 0x1a, 0xfe, 0xa2, 0x2a, 0x09, 0xb2, 0x09, 0xce, 0x8e, 0xc9, 0xc6,
+ 0x83, 0xbe, 0xa1, 0x4a, 0xbe, 0x6c, 0x82, 0x56, 0xb9, 0x6c, 0x3c, 0xe8, 0x9b, 0xaa, 0x24, 0xc8,
+ 0x26, 0x88, 0xe5, 0xb2, 0xf1, 0xb0, 0x6f, 0x61, 0x7e, 0x73, 0x65, 0xe3, 0x61, 0x05, 0xd9, 0x78,
+ 0xd0, 0xdf, 0xa1, 0xb9, 0xd0, 0x93, 0x8d, 0x07, 0x15, 0x65, 0xe3, 0x61, 0x7f, 0x97, 0x62, 0x7d,
+ 0xd9, 0x4c, 0x83, 0xc5, 0x55, 0xf8, 0x3d, 0x0a, 0xf6, 0x65, 0xe3, 0x81, 0x57, 0xd0, 0x09, 0x2a,
+ 0x9b, 0x8e, 0xd9, 0x6d, 0x1f, 0x0d, 0xa8, 0xc4, 0x2a, 0x54, 0x37, 0xf5, 0xa4, 0x33, 0x3e, 0x32,
+ 0xa9, 0x27, 0xb6, 0x3d, 0x78, 0xec, 0xf6, 0x91, 0x15, 0x6a, 0x9c, 0xc9, 0xc7, 0x27, 0x5c, 0xa7,
+ 0xfa, 0xa9, 0xcb, 0x55, 0xcd, 0x28, 0x32, 0x0d, 0x4d, 0xe3, 0x6b, 0xba, 0x80, 0xbf, 0x41, 0x55,
+ 0x54, 0x97, 0x6b, 0x3a, 0xc3, 0xd7, 0x74, 0x1f, 0x5f, 0x85, 0xf3, 0xbe, 0x94, 0x7c, 0xc6, 0x4d,
+ 0xaa, 0xa5, 0x7a, 0xa2, 0xaa, 0xad, 0x1a, 0x0b, 0xae, 0xa0, 0x66, 0x91, 0x02, 0xc3, 0xdc, 0xa2,
+ 0x92, 0xaa, 0x27, 0x6a, 0xba, 0x47, 0x12, 0x47, 0xd2, 0xa8, 0x0c, 0xb9, 0xb0, 0x7c, 0xce, 0x6d,
+ 0xaa, 0xac, 0x7a, 0xb2, 0xaa, 0xad, 0xae, 0x1a, 0x0a, 0xd7, 0xd7, 0x0c, 0x4e, 0x60, 0x9c, 0x15,
+ 0xaa, 0xb0, 0x7a, 0xb2, 0xa6, 0x7b, 0x9c, 0xe0, 0x38, 0x0b, 0xae, 0xd0, 0x7c, 0xca, 0x97, 0xa8,
+ 0xd2, 0xea, 0xe9, 0xea, 0x9a, 0xbe, 0xb6, 0x7e, 0xcf, 0x28, 0x32, 0xc5, 0xf9, 0x1c, 0x9d, 0x8e,
+ 0xc3, 0x25, 0xe7, 0x93, 0x56, 0xa9, 0xe6, 0xea, 0x69, 0xed, 0xce, 0xda, 0x5d, 0xed, 0xae, 0xa1,
+ 0x70, 0xed, 0xf9, 0xac, 0x77, 0x28, 0x8b, 0x8b, 0xcf, 0x67, 0xad, 0x51, 0xf5, 0xd5, 0x95, 0x67,
+ 0xe6, 0x60, 0x60, 0xdf, 0x52, 0xcb, 0x2f, 0xed, 0xf1, 0xa0, 0x73, 0xad, 0x0c, 0x86, 0xc2, 0xf5,
+ 0x28, 0x8e, 0xba, 0xe0, 0x0a, 0xd2, 0xa7, 0xff, 0x1a, 0x3d, 0x87, 0x15, 0xea, 0x99, 0x87, 0xfd,
+ 0x9e, 0x65, 0x4f, 0x4c, 0xa3, 0xc8, 0xa4, 0x19, 0x5a, 0x93, 0xbd, 0xf0, 0x3a, 0xfe, 0x3a, 0xa5,
+ 0x2d, 0xd4, 0x13, 0xb7, 0xab, 0x1a, 0x1d, 0x69, 0xd6, 0x3a, 0xee, 0x85, 0xd7, 0xf1, 0x37, 0x28,
+ 0x87, 0xd4, 0x13, 0xb7, 0x6b, 0x3a, 0xe7, 0x88, 0xeb, 0x78, 0x07, 0x2e, 0x84, 0xf2, 0x62, 0x6b,
+ 0xd4, 0x3e, 0x7c, 0x6e, 0x76, 0x4a, 0x1a, 0x4d, 0x8f, 0x0f, 0x65, 0x45, 0x32, 0xce, 0x07, 0x52,
+ 0xe4, 0x2e, 0x76, 0x93, 0x7b, 0xf0, 0x7a, 0x38, 0x51, 0xba, 0xcc, 0x2a, 0xcd, 0x97, 0xc8, 0x5c,
+ 0x0c, 0xe6, 0xcc, 0x10, 0x55, 0x08, 0xc0, 0x2e, 0x55, 0xa7, 0x09, 0xd4, 0xa7, 0xfa, 0x91, 0x98,
+ 0x53, 0x7f, 0x06, 0x2e, 0x4e, 0xa7, 0x52, 0x97, 0xbc, 0x4e, 0x33, 0x2a, 0x92, 0x2f, 0x84, 0xb3,
+ 0xea, 0x14, 0x7d, 0xc6, 0xd8, 0x35, 0x9a, 0x62, 0x45, 0xfa, 0xd4, 0xe8, 0xf7, 0xa1, 0x34, 0x95,
+ 0x6c, 0x5d, 0xf6, 0x1d, 0x9a, 0x73, 0x91, 0xfd, 0x5a, 0x28, 0xef, 0x86, 0xc9, 0x33, 0x86, 0xbe,
+ 0x4b, 0x93, 0xb0, 0x40, 0x9e, 0x1a, 0x19, 0x97, 0x2c, 0x98, 0x8e, 0x5d, 0xee, 0x3d, 0x9a, 0x95,
+ 0xf9, 0x92, 0x05, 0x32, 0xb3, 0x38, 0x6e, 0x28, 0x3f, 0xbb, 0xdc, 0x3a, 0x4d, 0xd3, 0x7c, 0xdc,
+ 0x60, 0xaa, 0xe6, 0xe4, 0xb7, 0x29, 0x79, 0x6f, 0xf6, 0x8c, 0x7f, 0x9c, 0xa0, 0x09, 0x96, 0xb3,
+ 0xf7, 0x66, 0x4d, 0xd9, 0x63, 0xcf, 0x98, 0xf2, 0x4f, 0x28, 0x9b, 0x08, 0xec, 0xa9, 0x39, 0x3f,
+ 0x06, 0xaf, 0xe2, 0xe8, 0x8d, 0xed, 0xa3, 0x51, 0x69, 0x43, 0x95, 0x2b, 0xa0, 0x5d, 0x99, 0xaa,
+ 0x7e, 0xdc, 0x43, 0xde, 0x26, 0x45, 0x19, 0x41, 0x12, 0xb3, 0xc2, 0xec, 0x32, 0x2b, 0xbb, 0x6a,
+ 0x22, 0xc2, 0x0a, 0x43, 0x79, 0x56, 0x04, 0x12, 0xb5, 0xe2, 0x06, 0x7d, 0x66, 0xe5, 0x03, 0x55,
+ 0x9a, 0x69, 0xc5, 0x4d, 0x01, 0xdc, 0x4a, 0x80, 0xb4, 0xb4, 0xee, 0xd7, 0x5b, 0xd8, 0x4f, 0xbe,
+ 0x18, 0x2e, 0xc0, 0x36, 0xf1, 0xfc, 0x1c, 0xac, 0xb4, 0x18, 0x4d, 0x70, 0x6e, 0x9a, 0xf6, 0xb3,
+ 0x11, 0xb4, 0x80, 0x37, 0xd3, 0xb4, 0x9f, 0x9b, 0x41, 0x2b, 0xff, 0xa6, 0x04, 0x49, 0x5a, 0x4f,
+ 0x92, 0x2c, 0x24, 0xdf, 0x6b, 0x6e, 0x3d, 0x56, 0xce, 0xd1, 0x5f, 0x0f, 0x9b, 0xcd, 0xa7, 0x8a,
+ 0x44, 0x72, 0x90, 0x7a, 0xf8, 0x95, 0xfd, 0xc6, 0x9e, 0x22, 0x93, 0x22, 0xe4, 0x37, 0xb6, 0x76,
+ 0x36, 0x1b, 0xc6, 0xae, 0xb1, 0xb5, 0xb3, 0xaf, 0x24, 0x68, 0xdf, 0xc6, 0xd3, 0xe6, 0x83, 0x7d,
+ 0x25, 0x49, 0x32, 0x90, 0xa0, 0x6d, 0x29, 0x02, 0x90, 0xde, 0xdb, 0x37, 0xb6, 0x76, 0x36, 0x95,
+ 0x34, 0xb5, 0xb2, 0xbf, 0xb5, 0xdd, 0x50, 0x32, 0x14, 0xb9, 0xff, 0xee, 0xee, 0xd3, 0x86, 0x92,
+ 0xa5, 0x3f, 0x1f, 0x18, 0xc6, 0x83, 0xaf, 0x28, 0x39, 0x4a, 0xda, 0x7e, 0xb0, 0xab, 0x00, 0x76,
+ 0x3f, 0x78, 0xf8, 0xb4, 0xa1, 0xe4, 0x49, 0x01, 0xb2, 0x1b, 0xef, 0xee, 0x3c, 0xda, 0xdf, 0x6a,
+ 0xee, 0x28, 0x85, 0xf2, 0x6f, 0xc9, 0x00, 0x9b, 0xf6, 0xde, 0xf3, 0xfe, 0x08, 0xab, 0xe2, 0xcb,
+ 0x00, 0x93, 0xe7, 0xfd, 0x51, 0x0b, 0xa5, 0xc7, 0x2b, 0xbb, 0x1c, 0x6d, 0xc1, 0xa0, 0x43, 0xae,
+ 0x41, 0x01, 0xbb, 0xbb, 0x2c, 0x14, 0x60, 0x41, 0x97, 0x31, 0xf2, 0xb4, 0x8d, 0x47, 0x87, 0x20,
+ 0xa4, 0xa6, 0x63, 0x1d, 0x97, 0x16, 0x20, 0x35, 0x9d, 0x5c, 0x05, 0x7c, 0x6c, 0x4d, 0x30, 0xac,
+ 0x63, 0xed, 0x96, 0x33, 0x70, 0x5c, 0x16, 0xe8, 0xc9, 0xdb, 0x80, 0x63, 0x32, 0x59, 0x14, 0xa7,
+ 0x25, 0xea, 0xba, 0xbb, 0x42, 0x7f, 0x30, 0x59, 0xf8, 0x84, 0xa5, 0x26, 0xe4, 0xbc, 0x76, 0x3a,
+ 0x16, 0xb6, 0xf2, 0x19, 0x29, 0x38, 0x23, 0xc0, 0x26, 0x6f, 0x4a, 0x0c, 0xc0, 0xbd, 0x59, 0x40,
+ 0x6f, 0x18, 0x89, 0xb9, 0x53, 0xbe, 0x0c, 0x73, 0x3b, 0xb6, 0xc5, 0xb6, 0x10, 0xae, 0x52, 0x01,
+ 0xa4, 0x76, 0x49, 0xc2, 0x12, 0x46, 0x6a, 0x97, 0xaf, 0x00, 0x08, 0x7d, 0x0a, 0x48, 0x07, 0xac,
+ 0x0f, 0x37, 0xa2, 0x74, 0x50, 0xbe, 0x09, 0xe9, 0xed, 0xf6, 0xf1, 0x7e, 0xbb, 0x47, 0xae, 0x01,
+ 0x0c, 0xda, 0x13, 0xa7, 0xd5, 0x45, 0xa9, 0x7c, 0xfe, 0xf9, 0xe7, 0x9f, 0x4b, 0x78, 0xe2, 0xca,
+ 0xd1, 0x56, 0x26, 0x95, 0x17, 0x00, 0xcd, 0x41, 0x67, 0xdb, 0x9c, 0x4c, 0xda, 0x3d, 0x93, 0x54,
+ 0x21, 0x6d, 0x99, 0x13, 0x9a, 0x72, 0x24, 0x2c, 0xe6, 0x97, 0xfd, 0x55, 0xf0, 0x51, 0x2b, 0x3b,
+ 0x08, 0x31, 0x38, 0x94, 0x28, 0x90, 0xb0, 0x8e, 0x86, 0x78, 0x59, 0x91, 0x32, 0xe8, 0xcf, 0xa5,
+ 0x4b, 0x90, 0x66, 0x18, 0x42, 0x20, 0x69, 0xb5, 0x87, 0x66, 0x89, 0x8d, 0x8b, 0xbf, 0xcb, 0xbf,
+ 0x2a, 0x01, 0xec, 0x98, 0x2f, 0xcf, 0x30, 0xa6, 0x8f, 0x8a, 0x19, 0x33, 0xc1, 0xc6, 0xbc, 0x1f,
+ 0x37, 0x26, 0xd5, 0x59, 0xd7, 0xb6, 0x3b, 0x2d, 0xf6, 0x8a, 0xd9, 0xbd, 0x4a, 0x8e, 0xb6, 0xe0,
+ 0x5b, 0x2b, 0x7f, 0x00, 0x85, 0x2d, 0xcb, 0x32, 0xc7, 0xae, 0x4f, 0x04, 0x92, 0xcf, 0xec, 0x89,
+ 0xc3, 0x2f, 0x78, 0xf0, 0x37, 0x29, 0x41, 0x72, 0x64, 0x8f, 0x1d, 0x36, 0xcf, 0x7a, 0x52, 0x5f,
+ 0x5d, 0x5d, 0x35, 0xb0, 0x85, 0x5c, 0x82, 0xdc, 0xa1, 0x6d, 0x59, 0xe6, 0x21, 0x9d, 0x44, 0x02,
+ 0x6b, 0x0b, 0xbf, 0xa1, 0xfc, 0xcb, 0x12, 0x14, 0x9a, 0xce, 0x33, 0xdf, 0xb8, 0x02, 0x89, 0xe7,
+ 0xe6, 0x09, 0xba, 0x97, 0x30, 0xe8, 0x4f, 0xb2, 0x08, 0xa9, 0x9f, 0x6f, 0x0f, 0x8e, 0xd8, 0x85,
+ 0x4f, 0xc1, 0x60, 0x0f, 0xe4, 0x02, 0xa4, 0x5f, 0x9a, 0xfd, 0xde, 0x33, 0x07, 0x6d, 0xca, 0x06,
+ 0x7f, 0x22, 0xb7, 0x20, 0xd5, 0xa7, 0xce, 0x96, 0x92, 0xb8, 0x5e, 0x17, 0xfc, 0xf5, 0x12, 0xe7,
+ 0x60, 0x30, 0xd0, 0x8d, 0x6c, 0xb6, 0xa3, 0x7c, 0xf4, 0xd1, 0x47, 0x1f, 0xc9, 0xe5, 0x2e, 0x2c,
+ 0xba, 0xb1, 0x23, 0x30, 0xd9, 0x1d, 0x28, 0x0d, 0x4c, 0xbb, 0xd5, 0xed, 0x5b, 0xed, 0xc1, 0xe0,
+ 0xa4, 0xf5, 0xd2, 0xb6, 0x5a, 0x6d, 0xab, 0x65, 0x4f, 0x0e, 0xdb, 0x63, 0x5c, 0x80, 0xe8, 0x21,
+ 0x16, 0x07, 0xa6, 0xbd, 0xc1, 0x68, 0xef, 0xdb, 0xd6, 0x03, 0xab, 0x49, 0x39, 0xe5, 0x3f, 0x48,
+ 0x42, 0x6e, 0xfb, 0xc4, 0xb5, 0xbe, 0x08, 0xa9, 0x43, 0xfb, 0xc8, 0x62, 0x6b, 0x99, 0x32, 0xd8,
+ 0x83, 0xf7, 0x8e, 0x64, 0xe1, 0x1d, 0x2d, 0x42, 0xea, 0xc5, 0x91, 0xed, 0x98, 0x38, 0xdd, 0x9c,
+ 0xc1, 0x1e, 0xe8, 0x6a, 0x8d, 0x4c, 0xa7, 0x94, 0xc4, 0x0a, 0x93, 0xfe, 0xf4, 0xe7, 0x9f, 0x3a,
+ 0xc3, 0xfc, 0xc9, 0x0a, 0xa4, 0x6d, 0xba, 0xfa, 0x93, 0x52, 0x1a, 0x2f, 0xb7, 0x04, 0xb8, 0xf8,
+ 0x56, 0x0c, 0x8e, 0x22, 0x5b, 0xb0, 0xf0, 0xd2, 0x6c, 0x0d, 0x8f, 0x26, 0x4e, 0xab, 0x67, 0xb7,
+ 0x3a, 0xa6, 0x39, 0x32, 0xc7, 0xa5, 0x39, 0x1c, 0x49, 0x88, 0x09, 0xb3, 0x16, 0xd2, 0x98, 0x7f,
+ 0x69, 0x6e, 0x1f, 0x4d, 0x9c, 0x4d, 0xfb, 0x31, 0xb2, 0x48, 0x15, 0x72, 0x63, 0x93, 0x46, 0x02,
+ 0xea, 0x6c, 0x21, 0x3c, 0x7a, 0x80, 0x9a, 0x1d, 0x9b, 0x23, 0x6c, 0x20, 0xeb, 0x90, 0x3d, 0xe8,
+ 0x3f, 0x37, 0x27, 0xcf, 0xcc, 0x4e, 0x29, 0xa3, 0x4a, 0x95, 0x79, 0xed, 0xa2, 0xcf, 0xf1, 0x96,
+ 0x75, 0xe5, 0x91, 0x3d, 0xb0, 0xc7, 0x86, 0x07, 0x25, 0xf7, 0x21, 0x37, 0xb1, 0x87, 0x26, 0xd3,
+ 0x77, 0x16, 0x33, 0xdb, 0xe5, 0x59, 0xbc, 0x3d, 0x7b, 0x68, 0xba, 0x11, 0xcc, 0xc5, 0x93, 0x65,
+ 0xe6, 0xe8, 0x01, 0x3d, 0xbf, 0x96, 0x00, 0xeb, 0x73, 0xea, 0x10, 0x9e, 0x67, 0xc9, 0x12, 0x75,
+ 0xa8, 0xd7, 0xa5, 0xc7, 0x92, 0x52, 0x1e, 0x8b, 0x3b, 0xef, 0x79, 0xe9, 0x16, 0xe4, 0x3c, 0x83,
+ 0x7e, 0xe8, 0x63, 0xe1, 0x26, 0x87, 0xf1, 0x80, 0x85, 0x3e, 0x16, 0x6b, 0xde, 0x80, 0x14, 0xba,
+ 0x4d, 0xd3, 0x84, 0xd1, 0xa0, 0x59, 0x29, 0x07, 0xa9, 0x4d, 0xa3, 0xd1, 0xd8, 0x51, 0x24, 0x4c,
+ 0x50, 0x4f, 0xdf, 0x6d, 0x28, 0xb2, 0xa0, 0xd8, 0xdf, 0x96, 0x20, 0xd1, 0x38, 0x46, 0xb5, 0xd0,
+ 0x69, 0xb8, 0x3b, 0x9a, 0xfe, 0xd6, 0x6a, 0x90, 0x1c, 0xda, 0x63, 0x93, 0x9c, 0x9f, 0x31, 0xcb,
+ 0x52, 0x0f, 0xdf, 0x97, 0x70, 0x95, 0xdb, 0x38, 0x76, 0x0c, 0xc4, 0x6b, 0x6f, 0x41, 0xd2, 0x31,
+ 0x8f, 0x9d, 0xd9, 0xbc, 0x67, 0x6c, 0x00, 0x0a, 0xd0, 0x6e, 0x42, 0xda, 0x3a, 0x1a, 0x1e, 0x98,
+ 0xe3, 0xd9, 0xd0, 0x3e, 0x4e, 0x8f, 0x43, 0xca, 0xef, 0x81, 0xf2, 0xc8, 0x1e, 0x8e, 0x06, 0xe6,
+ 0x71, 0xe3, 0xd8, 0x31, 0xad, 0x49, 0xdf, 0xb6, 0xa8, 0x9e, 0xbb, 0xfd, 0x31, 0x46, 0x11, 0xbc,
+ 0xb0, 0xc5, 0x07, 0xba, 0xab, 0x27, 0xe6, 0xa1, 0x6d, 0x75, 0x78, 0xc0, 0xe4, 0x4f, 0x14, 0xed,
+ 0x3c, 0xeb, 0x8f, 0x69, 0x00, 0xa1, 0x71, 0x9e, 0x3d, 0x94, 0x37, 0xa1, 0xc8, 0x0f, 0xfa, 0x13,
+ 0x3e, 0x70, 0xf9, 0x06, 0x14, 0xdc, 0x26, 0xbc, 0xbd, 0xce, 0x42, 0xf2, 0x83, 0x86, 0xd1, 0x54,
+ 0xce, 0xd1, 0x65, 0x6d, 0xee, 0x34, 0x14, 0x89, 0xfe, 0xd8, 0x7f, 0xbf, 0x19, 0x58, 0xca, 0x4b,
+ 0x50, 0xf0, 0x7c, 0xdf, 0x33, 0x1d, 0xec, 0xa1, 0x09, 0x21, 0x53, 0x97, 0xb3, 0x52, 0x39, 0x03,
+ 0xa9, 0xc6, 0x70, 0xe4, 0x9c, 0x94, 0x7f, 0x11, 0xf2, 0x1c, 0xf4, 0xb4, 0x3f, 0x71, 0xc8, 0x1d,
+ 0xc8, 0x0c, 0xf9, 0x7c, 0x25, 0x3c, 0x73, 0x89, 0x9a, 0xf2, 0x71, 0xee, 0x6f, 0xc3, 0x45, 0x2f,
+ 0x55, 0x21, 0x23, 0xc4, 0x52, 0xbe, 0xd5, 0x65, 0x71, 0xab, 0xb3, 0xa0, 0x90, 0x10, 0x82, 0x42,
+ 0x79, 0x1b, 0x32, 0x2c, 0x03, 0x4e, 0x30, 0xab, 0xb3, 0x7a, 0x8d, 0x89, 0x89, 0xbd, 0xf9, 0x3c,
+ 0x6b, 0x63, 0x57, 0xc8, 0x57, 0x21, 0x8f, 0x82, 0xe5, 0x08, 0x16, 0x3a, 0x01, 0x9b, 0x98, 0xdc,
+ 0x7e, 0x3f, 0x05, 0x59, 0x77, 0xa5, 0xc8, 0x32, 0xa4, 0x59, 0x91, 0x84, 0xa6, 0xdc, 0x22, 0x3e,
+ 0x85, 0x65, 0x11, 0x59, 0x86, 0x0c, 0x2f, 0x84, 0x78, 0x74, 0xa7, 0x15, 0x7b, 0x9a, 0x15, 0x3e,
+ 0x5e, 0x67, 0x4d, 0xc7, 0xc0, 0xc4, 0xca, 0xf3, 0x34, 0x2b, 0x6d, 0x88, 0x0a, 0x39, 0xaf, 0x98,
+ 0xc1, 0x78, 0xcc, 0x6b, 0xf1, 0xac, 0x5b, 0xbd, 0x08, 0x88, 0x9a, 0x8e, 0x11, 0x8b, 0x17, 0xde,
+ 0xd9, 0xae, 0x7f, 0x3c, 0xc9, 0xba, 0x25, 0x09, 0xde, 0xa1, 0xbb, 0x55, 0x76, 0x86, 0x17, 0x21,
+ 0x3e, 0xa0, 0xa6, 0x63, 0x48, 0x70, 0x4b, 0xea, 0x0c, 0x2f, 0x34, 0xc8, 0x55, 0xea, 0x22, 0x16,
+ 0x0e, 0xb8, 0xf5, 0xfd, 0xfa, 0x39, 0xcd, 0xca, 0x09, 0x72, 0x8d, 0x5a, 0x60, 0xd5, 0x01, 0xee,
+ 0x4b, 0xbf, 0x58, 0xce, 0xf0, 0xa2, 0x81, 0xdc, 0xa4, 0x10, 0xb6, 0xfc, 0x25, 0x88, 0xa8, 0x8c,
+ 0x33, 0xbc, 0x32, 0x26, 0x2a, 0x1d, 0x10, 0xc3, 0x03, 0x86, 0x04, 0xa1, 0x0a, 0x4e, 0xb3, 0x2a,
+ 0x98, 0x5c, 0x41, 0x73, 0x6c, 0x52, 0x05, 0xbf, 0xe2, 0xcd, 0xf0, 0x2a, 0xc3, 0xef, 0xc7, 0x23,
+ 0x9b, 0x57, 0xdd, 0x66, 0x78, 0x1d, 0x41, 0x6a, 0xf4, 0x7d, 0x51, 0x7d, 0x97, 0xe6, 0x31, 0x08,
+ 0x96, 0x7c, 0xe1, 0xb9, 0xef, 0x94, 0xc5, 0xc0, 0x3a, 0x8b, 0x20, 0x46, 0xaa, 0x8b, 0xbb, 0x61,
+ 0x89, 0xf2, 0x76, 0xfb, 0x56, 0xb7, 0x54, 0xc4, 0x95, 0x48, 0xf4, 0xad, 0xae, 0x91, 0xea, 0xd2,
+ 0x16, 0xa6, 0x81, 0x1d, 0xda, 0xa7, 0x60, 0x5f, 0xf2, 0x36, 0xeb, 0xa4, 0x4d, 0xa4, 0x04, 0xa9,
+ 0x8d, 0xd6, 0x4e, 0xdb, 0x2a, 0x2d, 0x30, 0x9e, 0xd5, 0xb6, 0x8c, 0x64, 0x77, 0xa7, 0x6d, 0x91,
+ 0xb7, 0x20, 0x31, 0x39, 0x3a, 0x28, 0x91, 0xf0, 0xe7, 0x8d, 0xbd, 0xa3, 0x03, 0xd7, 0x15, 0x83,
+ 0x22, 0xc8, 0x32, 0x64, 0x27, 0xce, 0xb8, 0xf5, 0x0b, 0xe6, 0xd8, 0x2e, 0x9d, 0xc7, 0x25, 0x3c,
+ 0x67, 0x64, 0x26, 0xce, 0xf8, 0x03, 0x73, 0x6c, 0x9f, 0x31, 0xf8, 0x95, 0xaf, 0x40, 0x5e, 0xb0,
+ 0x4b, 0x8a, 0x20, 0x59, 0xec, 0xa4, 0x50, 0x97, 0xee, 0x18, 0x92, 0x55, 0xde, 0x87, 0x82, 0x5b,
+ 0x48, 0xe0, 0x7c, 0x35, 0xba, 0x93, 0x06, 0xf6, 0x18, 0xf7, 0xe7, 0xbc, 0x76, 0x49, 0x4c, 0x51,
+ 0x3e, 0x8c, 0xa7, 0x0b, 0x06, 0x2d, 0x2b, 0x21, 0x57, 0xa4, 0xf2, 0x0f, 0x25, 0x28, 0x6c, 0xdb,
+ 0x63, 0xff, 0x96, 0x77, 0x11, 0x52, 0x07, 0xb6, 0x3d, 0x98, 0xa0, 0xd9, 0xac, 0xc1, 0x1e, 0xc8,
+ 0x1b, 0x50, 0xc0, 0x1f, 0x6e, 0x01, 0x28, 0x7b, 0xf7, 0x0b, 0x79, 0x6c, 0xe7, 0x55, 0x1f, 0x81,
+ 0x64, 0xdf, 0x72, 0x26, 0x3c, 0x92, 0xe1, 0x6f, 0xf2, 0x05, 0xc8, 0xd3, 0xbf, 0x2e, 0x33, 0xe9,
+ 0x1d, 0x58, 0x81, 0x36, 0x73, 0xe2, 0x5b, 0x30, 0x87, 0x6f, 0xdf, 0x83, 0x65, 0xbc, 0xbb, 0x84,
+ 0x02, 0xeb, 0xe0, 0xc0, 0x12, 0x64, 0x58, 0x28, 0x98, 0xe0, 0x27, 0xab, 0x9c, 0xe1, 0x3e, 0xd2,
+ 0xf0, 0x8a, 0x95, 0x00, 0x4b, 0xf7, 0x19, 0x83, 0x3f, 0x95, 0x1f, 0x40, 0x16, 0xb3, 0x54, 0x73,
+ 0xd0, 0x21, 0x65, 0x90, 0x7a, 0x25, 0x13, 0x73, 0xe4, 0xa2, 0x70, 0xcc, 0xe7, 0xdd, 0x2b, 0x9b,
+ 0x86, 0xd4, 0x5b, 0x5a, 0x00, 0x69, 0x93, 0x9e, 0xbb, 0x8f, 0x79, 0x98, 0x96, 0x8e, 0xcb, 0x4d,
+ 0x6e, 0x62, 0xc7, 0x7c, 0x19, 0x67, 0x62, 0xc7, 0x7c, 0xc9, 0x4c, 0x5c, 0x9d, 0x32, 0x41, 0x9f,
+ 0x4e, 0xf8, 0xf7, 0x3b, 0xe9, 0x84, 0x9e, 0xf3, 0x71, 0x7b, 0xf6, 0xad, 0xde, 0xae, 0xdd, 0xb7,
+ 0xf0, 0x9c, 0xdf, 0xc5, 0x73, 0x92, 0x64, 0x48, 0xdd, 0xf2, 0x67, 0x49, 0x98, 0xe7, 0x41, 0xf4,
+ 0xfd, 0xbe, 0xf3, 0x6c, 0xbb, 0x3d, 0x22, 0x4f, 0xa1, 0x40, 0xe3, 0x67, 0x6b, 0xd8, 0x1e, 0x8d,
+ 0xe8, 0x46, 0x95, 0xf0, 0x50, 0x71, 0x7d, 0x2a, 0x28, 0x73, 0xfc, 0xca, 0x4e, 0x7b, 0x68, 0x6e,
+ 0x33, 0x6c, 0xc3, 0x72, 0xc6, 0x27, 0x46, 0xde, 0xf2, 0x5b, 0xc8, 0x16, 0xe4, 0x87, 0x93, 0x9e,
+ 0x67, 0x4c, 0x46, 0x63, 0x95, 0x48, 0x63, 0xdb, 0x93, 0x5e, 0xc0, 0x16, 0x0c, 0xbd, 0x06, 0xea,
+ 0x18, 0x8d, 0xbc, 0x9e, 0xad, 0xc4, 0x29, 0x8e, 0xd1, 0x20, 0x11, 0x74, 0xec, 0xc0, 0x6f, 0x21,
+ 0x8f, 0x01, 0xe8, 0x46, 0x72, 0x6c, 0x5a, 0x24, 0xa1, 0x56, 0xf2, 0xda, 0x9b, 0x91, 0xb6, 0xf6,
+ 0x9c, 0xf1, 0xbe, 0xbd, 0xe7, 0x8c, 0x99, 0x21, 0xba, 0x05, 0xf1, 0x71, 0xe9, 0x1d, 0x50, 0xc2,
+ 0xf3, 0x17, 0xcf, 0xde, 0xa9, 0x19, 0x67, 0xef, 0x1c, 0x3f, 0x7b, 0xd7, 0xe5, 0xbb, 0xd2, 0xd2,
+ 0x7b, 0x50, 0x0c, 0x4d, 0x59, 0xa4, 0x13, 0x46, 0xbf, 0x2d, 0xd2, 0xf3, 0xda, 0xeb, 0xc2, 0xd7,
+ 0x63, 0xf1, 0xd5, 0x8a, 0x76, 0xdf, 0x01, 0x25, 0x3c, 0x7d, 0xd1, 0x70, 0x36, 0xa6, 0x26, 0x40,
+ 0xfe, 0x7d, 0x98, 0x0b, 0x4c, 0x59, 0x24, 0xe7, 0x4e, 0x99, 0x54, 0xf9, 0x97, 0x52, 0x90, 0x6a,
+ 0x5a, 0xa6, 0xdd, 0x25, 0xaf, 0x07, 0x33, 0xe2, 0x93, 0x73, 0x6e, 0x36, 0xbc, 0x18, 0xca, 0x86,
+ 0x4f, 0xce, 0x79, 0xb9, 0xf0, 0x62, 0x28, 0x17, 0xba, 0x5d, 0x35, 0x9d, 0x5c, 0x9e, 0xca, 0x84,
+ 0x4f, 0xce, 0x09, 0x69, 0xf0, 0xf2, 0x54, 0x1a, 0xf4, 0xbb, 0x6b, 0x3a, 0x0d, 0x9d, 0xc1, 0x1c,
+ 0xf8, 0xe4, 0x9c, 0x9f, 0xff, 0x96, 0xc3, 0xf9, 0xcf, 0xeb, 0xac, 0xe9, 0xcc, 0x25, 0x21, 0xf7,
+ 0xa1, 0x4b, 0x2c, 0xeb, 0x2d, 0x87, 0xb3, 0x1e, 0xf2, 0x78, 0xbe, 0x5b, 0x0e, 0xe7, 0x3b, 0xec,
+ 0xe4, 0xf9, 0xed, 0x62, 0x28, 0xbf, 0xa1, 0x51, 0x96, 0xd8, 0x96, 0xc3, 0x89, 0x8d, 0xf1, 0x04,
+ 0x4f, 0xc5, 0xac, 0xe6, 0x75, 0xd6, 0x74, 0xa2, 0x85, 0x52, 0x5a, 0xf4, 0xb9, 0x1e, 0xdf, 0x05,
+ 0x86, 0x77, 0x9d, 0x2e, 0x9b, 0x7b, 0xe4, 0x2c, 0xc6, 0x7c, 0x60, 0xc7, 0xd5, 0x74, 0x8f, 0x5c,
+ 0x1a, 0x64, 0xba, 0xbc, 0xd4, 0x55, 0x30, 0x46, 0x09, 0xb2, 0xc4, 0x97, 0xbf, 0xb2, 0xd1, 0xc2,
+ 0x58, 0x85, 0xf3, 0x62, 0xa7, 0xf7, 0x0a, 0xcc, 0x6d, 0xb4, 0x9e, 0xb6, 0xc7, 0x3d, 0x73, 0xe2,
+ 0xb4, 0xf6, 0xdb, 0x3d, 0xef, 0xba, 0x80, 0xbe, 0xff, 0x7c, 0x97, 0xf7, 0xec, 0xb7, 0x7b, 0xe4,
+ 0x82, 0x2b, 0xae, 0x0e, 0xf6, 0x4a, 0x5c, 0x5e, 0x4b, 0xaf, 0xd3, 0x45, 0x63, 0xc6, 0x30, 0xea,
+ 0x2d, 0xf0, 0xa8, 0xf7, 0x30, 0x03, 0xa9, 0x23, 0xab, 0x6f, 0x5b, 0x0f, 0x73, 0x90, 0x71, 0xec,
+ 0xf1, 0xb0, 0xed, 0xd8, 0xe5, 0x1f, 0x49, 0x00, 0x8f, 0xec, 0xe1, 0xf0, 0xc8, 0xea, 0xbf, 0x38,
+ 0x32, 0xc9, 0x15, 0xc8, 0x0f, 0xdb, 0xcf, 0xcd, 0xd6, 0xd0, 0x6c, 0x1d, 0x8e, 0xdd, 0x7d, 0x90,
+ 0xa3, 0x4d, 0xdb, 0xe6, 0xa3, 0xf1, 0x09, 0x29, 0xb9, 0x87, 0x71, 0xd4, 0x0e, 0x4a, 0x92, 0x1f,
+ 0xce, 0x17, 0xf9, 0xf1, 0x32, 0xcd, 0xdf, 0xa1, 0x7b, 0xc0, 0x64, 0x15, 0x43, 0x86, 0xbf, 0x3d,
+ 0x7c, 0xa2, 0x92, 0x77, 0xcc, 0xe1, 0xa8, 0x75, 0x88, 0x52, 0xa1, 0x72, 0x48, 0xd1, 0xe7, 0x47,
+ 0xe4, 0x36, 0x24, 0x0e, 0xed, 0x01, 0x8a, 0xe4, 0x94, 0xf7, 0x42, 0x71, 0xe4, 0x0d, 0x48, 0x0c,
+ 0x27, 0x4c, 0x36, 0x79, 0x6d, 0x41, 0x38, 0x11, 0xb0, 0x24, 0x44, 0x61, 0xc3, 0x49, 0xcf, 0x9b,
+ 0xf7, 0x8d, 0x22, 0x24, 0x36, 0x9a, 0x4d, 0x9a, 0xe5, 0x37, 0x9a, 0xcd, 0x35, 0x45, 0xaa, 0x7f,
+ 0x09, 0xb2, 0xbd, 0xb1, 0x69, 0xd2, 0xf0, 0x30, 0xbb, 0xba, 0xf8, 0x10, 0xb3, 0x9a, 0x07, 0xaa,
+ 0x6f, 0x43, 0xe6, 0x90, 0xd5, 0x17, 0x24, 0xa2, 0x80, 0x2d, 0xfd, 0x21, 0xbb, 0x3e, 0x59, 0xf2,
+ 0xbb, 0xc3, 0x15, 0x89, 0xe1, 0xda, 0xa8, 0xef, 0x42, 0x6e, 0xdc, 0x3a, 0xcd, 0xe0, 0xc7, 0x2c,
+ 0xbb, 0xc4, 0x19, 0xcc, 0x8e, 0x79, 0x53, 0xbd, 0x01, 0x0b, 0x96, 0xed, 0x7e, 0xb2, 0x68, 0x75,
+ 0xd8, 0x1e, 0xbb, 0x38, 0x7d, 0x68, 0x73, 0x8d, 0x9b, 0xec, 0x33, 0xa1, 0x65, 0xf3, 0x0e, 0xb6,
+ 0x2b, 0xeb, 0x8f, 0x40, 0x11, 0xcc, 0x60, 0x91, 0x19, 0x67, 0xa5, 0xcb, 0xbe, 0x4b, 0x7a, 0x56,
+ 0x70, 0xdf, 0x87, 0x8c, 0xb0, 0x9d, 0x19, 0x63, 0xa4, 0xc7, 0x3e, 0xf2, 0x7a, 0x46, 0x30, 0xd4,
+ 0x4d, 0x1b, 0xa1, 0xb1, 0x26, 0xda, 0xc8, 0x33, 0xf6, 0xfd, 0x57, 0x34, 0x52, 0xd3, 0x43, 0xab,
+ 0x72, 0x74, 0xaa, 0x2b, 0x7d, 0xf6, 0xf9, 0xd6, 0xb3, 0xc2, 0x02, 0xe0, 0x0c, 0x33, 0xf1, 0xce,
+ 0x7c, 0xc8, 0xbe, 0xec, 0x06, 0xcc, 0x4c, 0x79, 0x33, 0x39, 0xd5, 0x9b, 0xe7, 0xec, 0x33, 0xaa,
+ 0x67, 0x66, 0x6f, 0x96, 0x37, 0x93, 0x53, 0xbd, 0x19, 0xb0, 0x0f, 0xac, 0x01, 0x33, 0x35, 0xbd,
+ 0xbe, 0x09, 0x44, 0x7c, 0xd5, 0x3c, 0x4f, 0xc4, 0xd8, 0x19, 0xb2, 0xcf, 0xe6, 0xfe, 0xcb, 0x66,
+ 0x94, 0x59, 0x86, 0xe2, 0x1d, 0xb2, 0xd8, 0x17, 0xf5, 0xa0, 0xa1, 0x9a, 0x5e, 0xdf, 0x82, 0xf3,
+ 0xe2, 0xc4, 0xce, 0xe0, 0x92, 0xad, 0x4a, 0x95, 0xa2, 0xb1, 0xe0, 0x4f, 0x8d, 0x73, 0x66, 0x9a,
+ 0x8a, 0x77, 0x6a, 0xa4, 0x4a, 0x15, 0x65, 0xca, 0x54, 0x4d, 0xaf, 0x3f, 0x80, 0xa2, 0x60, 0xea,
+ 0x00, 0x33, 0x74, 0xb4, 0x99, 0x17, 0xec, 0x5f, 0x1b, 0x3c, 0x33, 0x34, 0xa3, 0x87, 0xdf, 0x18,
+ 0xcf, 0x71, 0xd1, 0x46, 0xc6, 0xec, 0xbb, 0xbc, 0xef, 0x0b, 0x32, 0x42, 0x5b, 0x02, 0x2b, 0xed,
+ 0x38, 0x2b, 0x13, 0xf6, 0xc5, 0xde, 0x77, 0x85, 0x12, 0xea, 0xfd, 0xc0, 0x74, 0x4c, 0x9a, 0xe4,
+ 0x62, 0x6c, 0x38, 0x18, 0x91, 0xdf, 0x8c, 0x04, 0xac, 0x88, 0x57, 0x21, 0xc2, 0xb4, 0xe9, 0x63,
+ 0x7d, 0x0b, 0xe6, 0xcf, 0x1e, 0x90, 0x3e, 0x96, 0x58, 0x5d, 0x5c, 0x5d, 0xa1, 0xa5, 0xb3, 0x31,
+ 0xd7, 0x09, 0xc4, 0xa5, 0x06, 0xcc, 0x9d, 0x39, 0x28, 0x7d, 0x22, 0xb1, 0xea, 0x92, 0x5a, 0x32,
+ 0x0a, 0x9d, 0x60, 0x64, 0x9a, 0x3b, 0x73, 0x58, 0xfa, 0x54, 0x62, 0x57, 0x11, 0xba, 0xe6, 0x19,
+ 0x71, 0x23, 0xd3, 0xdc, 0x99, 0xc3, 0xd2, 0x57, 0x59, 0xed, 0x28, 0xeb, 0x55, 0xd1, 0x08, 0xc6,
+ 0x82, 0xf9, 0xb3, 0x87, 0xa5, 0xaf, 0x49, 0x78, 0x2d, 0x21, 0xeb, 0xba, 0xb7, 0x2e, 0x5e, 0x64,
+ 0x9a, 0x3f, 0x7b, 0x58, 0xfa, 0xba, 0x84, 0x97, 0x17, 0xb2, 0xbe, 0x1e, 0x30, 0x13, 0xf4, 0xe6,
+ 0xf4, 0xb0, 0xf4, 0x0d, 0x09, 0xef, 0x13, 0x64, 0xbd, 0xe6, 0x99, 0xd9, 0x9b, 0xf2, 0xe6, 0xf4,
+ 0xb0, 0xf4, 0x4d, 0x3c, 0xc5, 0xd7, 0x65, 0xfd, 0x4e, 0xc0, 0x0c, 0x46, 0xa6, 0xe2, 0x2b, 0x84,
+ 0xa5, 0x6f, 0x49, 0x78, 0xed, 0x23, 0xeb, 0x77, 0x0d, 0x77, 0x74, 0x3f, 0x32, 0x15, 0x5f, 0x21,
+ 0x2c, 0x7d, 0x26, 0xe1, 0xed, 0x90, 0xac, 0xdf, 0x0b, 0x1a, 0xc2, 0xc8, 0xa4, 0xbc, 0x4a, 0x58,
+ 0xfa, 0x36, 0xb5, 0x54, 0xac, 0xcb, 0xeb, 0xab, 0x86, 0xeb, 0x80, 0x10, 0x99, 0x94, 0x57, 0x09,
+ 0x4b, 0xdf, 0xa1, 0xa6, 0x94, 0xba, 0xbc, 0xbe, 0x16, 0x32, 0x55, 0xd3, 0xeb, 0x8f, 0xa0, 0x70,
+ 0xd6, 0xb0, 0xf4, 0x5d, 0xf1, 0xd6, 0x2d, 0xdf, 0x11, 0x62, 0xd3, 0xae, 0xf0, 0xce, 0x4e, 0x0d,
+ 0x4c, 0xdf, 0xc3, 0x1a, 0xa7, 0x3e, 0xf7, 0x84, 0xdd, 0x4c, 0x31, 0x82, 0xff, 0xfa, 0x58, 0x98,
+ 0xda, 0xf6, 0xf7, 0xc7, 0xa9, 0x31, 0xea, 0xfb, 0x12, 0x5e, 0x5f, 0x15, 0xb8, 0x41, 0xc4, 0x7b,
+ 0x3b, 0x85, 0x05, 0xac, 0x0f, 0xfd, 0x59, 0x9e, 0x16, 0xad, 0x7e, 0x20, 0xbd, 0x4a, 0xb8, 0xaa,
+ 0x27, 0x9a, 0x3b, 0x0d, 0x6f, 0x31, 0xb0, 0xe5, 0x6d, 0x48, 0x1e, 0x6b, 0xab, 0x6b, 0xe2, 0x91,
+ 0x4c, 0xbc, 0xb5, 0x65, 0x41, 0x2a, 0xaf, 0x15, 0x85, 0x8b, 0xed, 0xe1, 0xc8, 0x39, 0x31, 0x90,
+ 0xc5, 0xd9, 0x5a, 0x24, 0xfb, 0x93, 0x18, 0xb6, 0xc6, 0xd9, 0xd5, 0x48, 0xf6, 0xa7, 0x31, 0xec,
+ 0x2a, 0x67, 0xeb, 0x91, 0xec, 0xaf, 0xc6, 0xb0, 0x75, 0xce, 0x5e, 0x8f, 0x64, 0x7f, 0x2d, 0x86,
+ 0xbd, 0xce, 0xd9, 0xb5, 0x48, 0xf6, 0xd7, 0x63, 0xd8, 0x35, 0xce, 0xbe, 0x13, 0xc9, 0xfe, 0x46,
+ 0x0c, 0xfb, 0x0e, 0x67, 0xdf, 0x8d, 0x64, 0x7f, 0x33, 0x86, 0x7d, 0x97, 0xb3, 0xef, 0x45, 0xb2,
+ 0xbf, 0x15, 0xc3, 0xbe, 0xc7, 0xd8, 0x6b, 0xab, 0x91, 0xec, 0xcf, 0xa2, 0xd9, 0x6b, 0xab, 0x9c,
+ 0x1d, 0xad, 0xb5, 0x6f, 0xc7, 0xb0, 0xb9, 0xd6, 0xd6, 0xa2, 0xb5, 0xf6, 0x9d, 0x18, 0x36, 0xd7,
+ 0xda, 0x5a, 0xb4, 0xd6, 0xbe, 0x1b, 0xc3, 0xe6, 0x5a, 0x5b, 0x8b, 0xd6, 0xda, 0xf7, 0x62, 0xd8,
+ 0x5c, 0x6b, 0x6b, 0xd1, 0x5a, 0xfb, 0x7e, 0x0c, 0x9b, 0x6b, 0x6d, 0x2d, 0x5a, 0x6b, 0x3f, 0x88,
+ 0x61, 0x73, 0xad, 0xad, 0x45, 0x6b, 0xed, 0x8f, 0x62, 0xd8, 0x5c, 0x6b, 0x6b, 0xd1, 0x5a, 0xfb,
+ 0xe3, 0x18, 0x36, 0xd7, 0xda, 0x5a, 0xb4, 0xd6, 0xfe, 0x24, 0x86, 0xcd, 0xb5, 0xa6, 0x45, 0x6b,
+ 0xed, 0x4f, 0xa3, 0xd9, 0x1a, 0xd7, 0x9a, 0x16, 0xad, 0xb5, 0x3f, 0x8b, 0x61, 0x73, 0xad, 0x69,
+ 0xd1, 0x5a, 0xfb, 0xf3, 0x18, 0x36, 0xd7, 0x9a, 0x16, 0xad, 0xb5, 0x1f, 0xc6, 0xb0, 0xb9, 0xd6,
+ 0xb4, 0x68, 0xad, 0xfd, 0x45, 0x0c, 0x9b, 0x6b, 0x4d, 0x8b, 0xd6, 0xda, 0x5f, 0xc6, 0xb0, 0xb9,
+ 0xd6, 0xb4, 0x68, 0xad, 0xfd, 0x55, 0x0c, 0x9b, 0x6b, 0x4d, 0x8b, 0xd6, 0xda, 0x5f, 0xc7, 0xb0,
+ 0xb9, 0xd6, 0xb4, 0x68, 0xad, 0xfd, 0x4d, 0x0c, 0x9b, 0x6b, 0x4d, 0x8b, 0xd6, 0xda, 0xdf, 0xc6,
+ 0xb0, 0xb9, 0xd6, 0xaa, 0xd1, 0x5a, 0xfb, 0xbb, 0x68, 0x76, 0x95, 0x6b, 0xad, 0x1a, 0xad, 0xb5,
+ 0xbf, 0x8f, 0x61, 0x73, 0xad, 0x55, 0xa3, 0xb5, 0xf6, 0x0f, 0x31, 0x6c, 0xae, 0xb5, 0x6a, 0xb4,
+ 0xd6, 0xfe, 0x31, 0x86, 0xcd, 0xb5, 0x56, 0x8d, 0xd6, 0xda, 0x8f, 0x62, 0xd8, 0x5c, 0x6b, 0xd5,
+ 0x68, 0xad, 0xfd, 0x53, 0x0c, 0x9b, 0x6b, 0xad, 0x1a, 0xad, 0xb5, 0x7f, 0x8e, 0x61, 0x73, 0xad,
+ 0x55, 0xa3, 0xb5, 0xf6, 0x2f, 0x31, 0x6c, 0xae, 0xb5, 0x6a, 0xb4, 0xd6, 0xfe, 0x35, 0x86, 0xcd,
+ 0xb5, 0x56, 0x8d, 0xd6, 0xda, 0xbf, 0xc5, 0xb0, 0xb9, 0xd6, 0xf4, 0x68, 0xad, 0xfd, 0x7b, 0x34,
+ 0x5b, 0xe7, 0x5a, 0xd3, 0xa3, 0xb5, 0xf6, 0x1f, 0x31, 0x6c, 0xae, 0x35, 0x3d, 0x5a, 0x6b, 0xff,
+ 0x19, 0xc3, 0xe6, 0x5a, 0xd3, 0xa3, 0xb5, 0xf6, 0x5f, 0x31, 0x6c, 0xae, 0x35, 0x3d, 0x5a, 0x6b,
+ 0xff, 0x1d, 0xc3, 0xe6, 0x5a, 0xd3, 0xa3, 0xb5, 0xf6, 0x3f, 0x31, 0x6c, 0xae, 0x35, 0x3d, 0x5a,
+ 0x6b, 0x3f, 0x8e, 0x61, 0x73, 0xad, 0xe9, 0xd1, 0x5a, 0xfb, 0x49, 0x0c, 0x9b, 0x6b, 0x4d, 0x8f,
+ 0xd6, 0xda, 0xff, 0xc6, 0xb0, 0xb9, 0xd6, 0xf4, 0x68, 0xad, 0xfd, 0x5f, 0x0c, 0x9b, 0x6b, 0x6d,
+ 0x3d, 0x5a, 0x6b, 0xff, 0x1f, 0xcd, 0x5e, 0x5f, 0xfd, 0x69, 0x00, 0x00, 0x00, 0xff, 0xff, 0x81,
+ 0x23, 0xc6, 0xe6, 0xc6, 0x38, 0x00, 0x00,
+}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/testdata/test.proto b/vendor/github.com/matttproud/golang_protobuf_extensions/testdata/test.proto
new file mode 100644
index 000000000..f60711369
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/testdata/test.proto
@@ -0,0 +1,540 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// A feature-rich test file for the protocol compiler and libraries.
+
+syntax = "proto2";
+
+package testdata;
+
+enum FOO { FOO1 = 1; };
+
+message GoEnum {
+ required FOO foo = 1;
+}
+
+message GoTestField {
+ required string Label = 1;
+ required string Type = 2;
+}
+
+message GoTest {
+ // An enum, for completeness.
+ enum KIND {
+ VOID = 0;
+
+ // Basic types
+ BOOL = 1;
+ BYTES = 2;
+ FINGERPRINT = 3;
+ FLOAT = 4;
+ INT = 5;
+ STRING = 6;
+ TIME = 7;
+
+ // Groupings
+ TUPLE = 8;
+ ARRAY = 9;
+ MAP = 10;
+
+ // Table types
+ TABLE = 11;
+
+ // Functions
+ FUNCTION = 12; // last tag
+ };
+
+ // Some typical parameters
+ required KIND Kind = 1;
+ optional string Table = 2;
+ optional int32 Param = 3;
+
+ // Required, repeated and optional foreign fields.
+ required GoTestField RequiredField = 4;
+ repeated GoTestField RepeatedField = 5;
+ optional GoTestField OptionalField = 6;
+
+ // Required fields of all basic types
+ required bool F_Bool_required = 10;
+ required int32 F_Int32_required = 11;
+ required int64 F_Int64_required = 12;
+ required fixed32 F_Fixed32_required = 13;
+ required fixed64 F_Fixed64_required = 14;
+ required uint32 F_Uint32_required = 15;
+ required uint64 F_Uint64_required = 16;
+ required float F_Float_required = 17;
+ required double F_Double_required = 18;
+ required string F_String_required = 19;
+ required bytes F_Bytes_required = 101;
+ required sint32 F_Sint32_required = 102;
+ required sint64 F_Sint64_required = 103;
+
+ // Repeated fields of all basic types
+ repeated bool F_Bool_repeated = 20;
+ repeated int32 F_Int32_repeated = 21;
+ repeated int64 F_Int64_repeated = 22;
+ repeated fixed32 F_Fixed32_repeated = 23;
+ repeated fixed64 F_Fixed64_repeated = 24;
+ repeated uint32 F_Uint32_repeated = 25;
+ repeated uint64 F_Uint64_repeated = 26;
+ repeated float F_Float_repeated = 27;
+ repeated double F_Double_repeated = 28;
+ repeated string F_String_repeated = 29;
+ repeated bytes F_Bytes_repeated = 201;
+ repeated sint32 F_Sint32_repeated = 202;
+ repeated sint64 F_Sint64_repeated = 203;
+
+ // Optional fields of all basic types
+ optional bool F_Bool_optional = 30;
+ optional int32 F_Int32_optional = 31;
+ optional int64 F_Int64_optional = 32;
+ optional fixed32 F_Fixed32_optional = 33;
+ optional fixed64 F_Fixed64_optional = 34;
+ optional uint32 F_Uint32_optional = 35;
+ optional uint64 F_Uint64_optional = 36;
+ optional float F_Float_optional = 37;
+ optional double F_Double_optional = 38;
+ optional string F_String_optional = 39;
+ optional bytes F_Bytes_optional = 301;
+ optional sint32 F_Sint32_optional = 302;
+ optional sint64 F_Sint64_optional = 303;
+
+ // Default-valued fields of all basic types
+ optional bool F_Bool_defaulted = 40 [default=true];
+ optional int32 F_Int32_defaulted = 41 [default=32];
+ optional int64 F_Int64_defaulted = 42 [default=64];
+ optional fixed32 F_Fixed32_defaulted = 43 [default=320];
+ optional fixed64 F_Fixed64_defaulted = 44 [default=640];
+ optional uint32 F_Uint32_defaulted = 45 [default=3200];
+ optional uint64 F_Uint64_defaulted = 46 [default=6400];
+ optional float F_Float_defaulted = 47 [default=314159.];
+ optional double F_Double_defaulted = 48 [default=271828.];
+ optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"];
+ optional bytes F_Bytes_defaulted = 401 [default="Bignose"];
+ optional sint32 F_Sint32_defaulted = 402 [default = -32];
+ optional sint64 F_Sint64_defaulted = 403 [default = -64];
+
+ // Packed repeated fields (no string or bytes).
+ repeated bool F_Bool_repeated_packed = 50 [packed=true];
+ repeated int32 F_Int32_repeated_packed = 51 [packed=true];
+ repeated int64 F_Int64_repeated_packed = 52 [packed=true];
+ repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true];
+ repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true];
+ repeated uint32 F_Uint32_repeated_packed = 55 [packed=true];
+ repeated uint64 F_Uint64_repeated_packed = 56 [packed=true];
+ repeated float F_Float_repeated_packed = 57 [packed=true];
+ repeated double F_Double_repeated_packed = 58 [packed=true];
+ repeated sint32 F_Sint32_repeated_packed = 502 [packed=true];
+ repeated sint64 F_Sint64_repeated_packed = 503 [packed=true];
+
+ // Required, repeated, and optional groups.
+ required group RequiredGroup = 70 {
+ required string RequiredField = 71;
+ };
+
+ repeated group RepeatedGroup = 80 {
+ required string RequiredField = 81;
+ };
+
+ optional group OptionalGroup = 90 {
+ required string RequiredField = 91;
+ };
+}
+
+// For testing skipping of unrecognized fields.
+// Numbers are all big, larger than tag numbers in GoTestField,
+// the message used in the corresponding test.
+message GoSkipTest {
+ required int32 skip_int32 = 11;
+ required fixed32 skip_fixed32 = 12;
+ required fixed64 skip_fixed64 = 13;
+ required string skip_string = 14;
+ required group SkipGroup = 15 {
+ required int32 group_int32 = 16;
+ required string group_string = 17;
+ }
+}
+
+// For testing packed/non-packed decoder switching.
+// A serialized instance of one should be deserializable as the other.
+message NonPackedTest {
+ repeated int32 a = 1;
+}
+
+message PackedTest {
+ repeated int32 b = 1 [packed=true];
+}
+
+message MaxTag {
+ // Maximum possible tag number.
+ optional string last_field = 536870911;
+}
+
+message OldMessage {
+ message Nested {
+ optional string name = 1;
+ }
+ optional Nested nested = 1;
+
+ optional int32 num = 2;
+}
+
+// NewMessage is wire compatible with OldMessage;
+// imagine it as a future version.
+message NewMessage {
+ message Nested {
+ optional string name = 1;
+ optional string food_group = 2;
+ }
+ optional Nested nested = 1;
+
+ // This is an int32 in OldMessage.
+ optional int64 num = 2;
+}
+
+// Smaller tests for ASCII formatting.
+
+message InnerMessage {
+ required string host = 1;
+ optional int32 port = 2 [default=4000];
+ optional bool connected = 3;
+}
+
+message OtherMessage {
+ optional int64 key = 1;
+ optional bytes value = 2;
+ optional float weight = 3;
+ optional InnerMessage inner = 4;
+
+ extensions 100 to max;
+}
+
+message RequiredInnerMessage {
+ required InnerMessage leo_finally_won_an_oscar = 1;
+}
+
+message MyMessage {
+ required int32 count = 1;
+ optional string name = 2;
+ optional string quote = 3;
+ repeated string pet = 4;
+ optional InnerMessage inner = 5;
+ repeated OtherMessage others = 6;
+ optional RequiredInnerMessage we_must_go_deeper = 13;
+ repeated InnerMessage rep_inner = 12;
+
+ enum Color {
+ RED = 0;
+ GREEN = 1;
+ BLUE = 2;
+ };
+ optional Color bikeshed = 7;
+
+ optional group SomeGroup = 8 {
+ optional int32 group_field = 9;
+ }
+
+ // This field becomes [][]byte in the generated code.
+ repeated bytes rep_bytes = 10;
+
+ optional double bigfloat = 11;
+
+ extensions 100 to max;
+}
+
+message Ext {
+ extend MyMessage {
+ optional Ext more = 103;
+ optional string text = 104;
+ optional int32 number = 105;
+ }
+
+ optional string data = 1;
+}
+
+extend MyMessage {
+ repeated string greeting = 106;
+}
+
+message ComplexExtension {
+ optional int32 first = 1;
+ optional int32 second = 2;
+ repeated int32 third = 3;
+}
+
+extend OtherMessage {
+ optional ComplexExtension complex = 200;
+ repeated ComplexExtension r_complex = 201;
+}
+
+message DefaultsMessage {
+ enum DefaultsEnum {
+ ZERO = 0;
+ ONE = 1;
+ TWO = 2;
+ };
+ extensions 100 to max;
+}
+
+extend DefaultsMessage {
+ optional double no_default_double = 101;
+ optional float no_default_float = 102;
+ optional int32 no_default_int32 = 103;
+ optional int64 no_default_int64 = 104;
+ optional uint32 no_default_uint32 = 105;
+ optional uint64 no_default_uint64 = 106;
+ optional sint32 no_default_sint32 = 107;
+ optional sint64 no_default_sint64 = 108;
+ optional fixed32 no_default_fixed32 = 109;
+ optional fixed64 no_default_fixed64 = 110;
+ optional sfixed32 no_default_sfixed32 = 111;
+ optional sfixed64 no_default_sfixed64 = 112;
+ optional bool no_default_bool = 113;
+ optional string no_default_string = 114;
+ optional bytes no_default_bytes = 115;
+ optional DefaultsMessage.DefaultsEnum no_default_enum = 116;
+
+ optional double default_double = 201 [default = 3.1415];
+ optional float default_float = 202 [default = 3.14];
+ optional int32 default_int32 = 203 [default = 42];
+ optional int64 default_int64 = 204 [default = 43];
+ optional uint32 default_uint32 = 205 [default = 44];
+ optional uint64 default_uint64 = 206 [default = 45];
+ optional sint32 default_sint32 = 207 [default = 46];
+ optional sint64 default_sint64 = 208 [default = 47];
+ optional fixed32 default_fixed32 = 209 [default = 48];
+ optional fixed64 default_fixed64 = 210 [default = 49];
+ optional sfixed32 default_sfixed32 = 211 [default = 50];
+ optional sfixed64 default_sfixed64 = 212 [default = 51];
+ optional bool default_bool = 213 [default = true];
+ optional string default_string = 214 [default = "Hello, string"];
+ optional bytes default_bytes = 215 [default = "Hello, bytes"];
+ optional DefaultsMessage.DefaultsEnum default_enum = 216 [default = ONE];
+}
+
+message MyMessageSet {
+ option message_set_wire_format = true;
+ extensions 100 to max;
+}
+
+message Empty {
+}
+
+extend MyMessageSet {
+ optional Empty x201 = 201;
+ optional Empty x202 = 202;
+ optional Empty x203 = 203;
+ optional Empty x204 = 204;
+ optional Empty x205 = 205;
+ optional Empty x206 = 206;
+ optional Empty x207 = 207;
+ optional Empty x208 = 208;
+ optional Empty x209 = 209;
+ optional Empty x210 = 210;
+ optional Empty x211 = 211;
+ optional Empty x212 = 212;
+ optional Empty x213 = 213;
+ optional Empty x214 = 214;
+ optional Empty x215 = 215;
+ optional Empty x216 = 216;
+ optional Empty x217 = 217;
+ optional Empty x218 = 218;
+ optional Empty x219 = 219;
+ optional Empty x220 = 220;
+ optional Empty x221 = 221;
+ optional Empty x222 = 222;
+ optional Empty x223 = 223;
+ optional Empty x224 = 224;
+ optional Empty x225 = 225;
+ optional Empty x226 = 226;
+ optional Empty x227 = 227;
+ optional Empty x228 = 228;
+ optional Empty x229 = 229;
+ optional Empty x230 = 230;
+ optional Empty x231 = 231;
+ optional Empty x232 = 232;
+ optional Empty x233 = 233;
+ optional Empty x234 = 234;
+ optional Empty x235 = 235;
+ optional Empty x236 = 236;
+ optional Empty x237 = 237;
+ optional Empty x238 = 238;
+ optional Empty x239 = 239;
+ optional Empty x240 = 240;
+ optional Empty x241 = 241;
+ optional Empty x242 = 242;
+ optional Empty x243 = 243;
+ optional Empty x244 = 244;
+ optional Empty x245 = 245;
+ optional Empty x246 = 246;
+ optional Empty x247 = 247;
+ optional Empty x248 = 248;
+ optional Empty x249 = 249;
+ optional Empty x250 = 250;
+}
+
+message MessageList {
+ repeated group Message = 1 {
+ required string name = 2;
+ required int32 count = 3;
+ }
+}
+
+message Strings {
+ optional string string_field = 1;
+ optional bytes bytes_field = 2;
+}
+
+message Defaults {
+ enum Color {
+ RED = 0;
+ GREEN = 1;
+ BLUE = 2;
+ }
+
+ // Default-valued fields of all basic types.
+ // Same as GoTest, but copied here to make testing easier.
+ optional bool F_Bool = 1 [default=true];
+ optional int32 F_Int32 = 2 [default=32];
+ optional int64 F_Int64 = 3 [default=64];
+ optional fixed32 F_Fixed32 = 4 [default=320];
+ optional fixed64 F_Fixed64 = 5 [default=640];
+ optional uint32 F_Uint32 = 6 [default=3200];
+ optional uint64 F_Uint64 = 7 [default=6400];
+ optional float F_Float = 8 [default=314159.];
+ optional double F_Double = 9 [default=271828.];
+ optional string F_String = 10 [default="hello, \"world!\"\n"];
+ optional bytes F_Bytes = 11 [default="Bignose"];
+ optional sint32 F_Sint32 = 12 [default=-32];
+ optional sint64 F_Sint64 = 13 [default=-64];
+ optional Color F_Enum = 14 [default=GREEN];
+
+ // More fields with crazy defaults.
+ optional float F_Pinf = 15 [default=inf];
+ optional float F_Ninf = 16 [default=-inf];
+ optional float F_Nan = 17 [default=nan];
+
+ // Sub-message.
+ optional SubDefaults sub = 18;
+
+ // Redundant but explicit defaults.
+ optional string str_zero = 19 [default=""];
+}
+
+message SubDefaults {
+ optional int64 n = 1 [default=7];
+}
+
+message RepeatedEnum {
+ enum Color {
+ RED = 1;
+ }
+ repeated Color color = 1;
+}
+
+message MoreRepeated {
+ repeated bool bools = 1;
+ repeated bool bools_packed = 2 [packed=true];
+ repeated int32 ints = 3;
+ repeated int32 ints_packed = 4 [packed=true];
+ repeated int64 int64s_packed = 7 [packed=true];
+ repeated string strings = 5;
+ repeated fixed32 fixeds = 6;
+}
+
+// GroupOld and GroupNew have the same wire format.
+// GroupNew has a new field inside a group.
+
+message GroupOld {
+ optional group G = 101 {
+ optional int32 x = 2;
+ }
+}
+
+message GroupNew {
+ optional group G = 101 {
+ optional int32 x = 2;
+ optional int32 y = 3;
+ }
+}
+
+message FloatingPoint {
+ required double f = 1;
+}
+
+message MessageWithMap {
+ map<int32, string> name_mapping = 1;
+ map<sint64, FloatingPoint> msg_mapping = 2;
+ map<bool, bytes> byte_mapping = 3;
+ map<string, string> str_to_str = 4;
+}
+
+message Oneof {
+ oneof union {
+ bool F_Bool = 1;
+ int32 F_Int32 = 2;
+ int64 F_Int64 = 3;
+ fixed32 F_Fixed32 = 4;
+ fixed64 F_Fixed64 = 5;
+ uint32 F_Uint32 = 6;
+ uint64 F_Uint64 = 7;
+ float F_Float = 8;
+ double F_Double = 9;
+ string F_String = 10;
+ bytes F_Bytes = 11;
+ sint32 F_Sint32 = 12;
+ sint64 F_Sint64 = 13;
+ MyMessage.Color F_Enum = 14;
+ GoTestField F_Message = 15;
+ group F_Group = 16 {
+ optional int32 x = 17;
+ }
+ int32 F_Largest_Tag = 536870911;
+ }
+
+ oneof tormato {
+ int32 value = 100;
+ }
+}
+
+message Communique {
+ optional bool make_me_cry = 1;
+
+ // This is a oneof, called "union".
+ oneof union {
+ int32 number = 5;
+ string name = 6;
+ bytes data = 7;
+ double temp_c = 8;
+ MyMessage.Color col = 9;
+ Strings msg = 10;
+ }
+}
diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md
index e6dbad25d..1ad23c751 100644
--- a/vendor/github.com/miekg/dns/README.md
+++ b/vendor/github.com/miekg/dns/README.md
@@ -62,6 +62,8 @@ A not-so-up-to-date-list-that-may-be-actually-current:
* https://dnssectest.net/
* https://dns.apebits.com
* https://github.com/oif/apex
+* https://github.com/jedisct1/dnscrypt-proxy
+* https://github.com/jedisct1/rpdns
Send pull request if you want to be listed here.
diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go
index b6ce5b5f6..685753f43 100644
--- a/vendor/github.com/miekg/dns/server.go
+++ b/vendor/github.com/miekg/dns/server.go
@@ -472,11 +472,14 @@ func (srv *Server) serveTCP(l net.Listener) error {
}
return err
}
- m, err := reader.ReadTCP(rw, rtimeout)
- if err != nil {
- continue
- }
- go srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw)
+ go func() {
+ m, err := reader.ReadTCP(rw, rtimeout)
+ if err != nil {
+ rw.Close()
+ return
+ }
+ srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw)
+ }()
}
}
diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go
index 0e23cc3e1..e41d2b3ca 100644
--- a/vendor/github.com/miekg/dns/version.go
+++ b/vendor/github.com/miekg/dns/version.go
@@ -3,7 +3,7 @@ package dns
import "fmt"
// Version is current version of this library.
-var Version = V{1, 0, 3}
+var Version = V{1, 0, 4}
// V holds the version of this library.
type V struct {
diff --git a/vendor/github.com/minio/go-homedir/README.md b/vendor/github.com/minio/go-homedir/README.md
deleted file mode 100644
index 2d9456fa5..000000000
--- a/vendor/github.com/minio/go-homedir/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
-# go-homedir
-
-This is a Go library for detecting the user's home directory without
-the use of cgo, so the library can be used in cross-compilation environments.
-
-Usage is incredibly simple, just call `homedir.Dir()` to get the home directory
-for a user, and `homedir.Expand()` to expand the `~` in a path to the home
-directory.
-
-**Why not just use `os/user`?** The built-in `os/user` package is not
-available on certain architectures such as i386 or PNaCl. Additionally
-it has a cgo dependency on Darwin systems. This means that any Go code
-that uses that package cannot cross compile. But 99% of the time the
-use for `os/user` is just to retrieve the home directory, which we can
-do for the current user without cgo. This library does that, enabling
-cross-compilation.
diff --git a/vendor/github.com/minio/go-homedir/dir_posix.go b/vendor/github.com/minio/go-homedir/dir_posix.go
deleted file mode 100644
index f1de21488..000000000
--- a/vendor/github.com/minio/go-homedir/dir_posix.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// +build !windows
-
-package homedir
-
-import (
- "bytes"
- "errors"
- "os"
- "os/exec"
- "os/user"
- "strconv"
- "strings"
-)
-
-// dir returns the homedir of current user for all POSIX compatible
-// operating systems.
-func dir() (string, error) {
- // First prefer the HOME environmental variable
- if home := os.Getenv("HOME"); home != "" {
- return home, nil
- }
-
- // user.Current is not implemented for i386 and PNaCL like environments.
- if currUser, err := user.Current(); err == nil {
- return currUser.HomeDir, nil
- }
-
- // If that fails, try getent
- var stdout bytes.Buffer
- cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
- cmd.Stdout = &stdout
- if err := cmd.Run(); err != nil {
- // If "getent" is missing, ignore it
- if err != exec.ErrNotFound {
- return "", err
- }
- } else {
- if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
- // username:password:uid:gid:gecos:home:shell
- passwdParts := strings.SplitN(passwd, ":", 7)
- if len(passwdParts) > 5 {
- return passwdParts[5], nil
- }
- }
- }
-
- // If all else fails, try the shell
- stdout.Reset()
- cmd = exec.Command("sh", "-c", "cd && pwd")
- cmd.Stdout = &stdout
- if err := cmd.Run(); err != nil {
- return "", err
- }
-
- result := strings.TrimSpace(stdout.String())
- if result == "" {
- return "", errors.New("blank output when reading home directory")
- }
-
- return result, nil
-}
diff --git a/vendor/github.com/minio/go-homedir/dir_windows.go b/vendor/github.com/minio/go-homedir/dir_windows.go
deleted file mode 100644
index e1ac9cf77..000000000
--- a/vendor/github.com/minio/go-homedir/dir_windows.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package homedir
-
-import (
- "errors"
- "os"
-)
-
-// dir returns the homedir of current user for MS Windows OS.
-func dir() (string, error) {
- // First prefer the HOME environmental variable
- if home := os.Getenv("HOME"); home != "" {
- return home, nil
- }
- drive := os.Getenv("HOMEDRIVE")
- path := os.Getenv("HOMEPATH")
- home := drive + path
- if drive == "" || path == "" {
- home = os.Getenv("USERPROFILE")
- }
- if home == "" {
- return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank")
- }
-
- return home, nil
-}
diff --git a/vendor/github.com/minio/go-homedir/homedir.go b/vendor/github.com/minio/go-homedir/homedir.go
deleted file mode 100644
index ecc9c5e4b..000000000
--- a/vendor/github.com/minio/go-homedir/homedir.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Package homedir implements a portable function to determine current user's homedir.
-package homedir
-
-import (
- "errors"
- "path/filepath"
- "sync"
-)
-
-// DisableCache will disable caching of the home directory. Caching is enabled
-// by default.
-var DisableCache bool
-
-var homedirCache string
-var cacheLock sync.RWMutex
-
-// Dir returns the home directory for the executing user.
-//
-// This uses an OS-specific method for discovering the home directory.
-// An error is returned if a home directory cannot be detected.
-func Dir() (string, error) {
- if !DisableCache {
- cacheLock.RLock()
- cached := homedirCache
- cacheLock.RUnlock()
- if cached != "" {
- return cached, nil
- }
- }
-
- cacheLock.Lock()
- defer cacheLock.Unlock()
-
- // Determine OS speific current homedir.
- result, err := dir()
- if err != nil {
- return "", err
- }
-
- // Cache for future lookups.
- homedirCache = result
- return result, nil
-}
-
-// Expand expands the path to include the home directory if the path
-// is prefixed with `~`. If it isn't prefixed with `~`, the path is
-// returned as-is.
-func Expand(path string) (string, error) {
- if len(path) == 0 {
- return path, nil
- }
-
- if path[0] != '~' {
- return path, nil
- }
-
- if len(path) > 1 && path[1] != '/' && path[1] != '\\' {
- return "", errors.New("cannot expand user-specific home dir")
- }
-
- dir, err := Dir()
- if err != nil {
- return "", err
- }
-
- return filepath.Join(dir, path[1:]), nil
-}
diff --git a/vendor/github.com/minio/minio-go/.gitignore b/vendor/github.com/minio/minio-go/.gitignore
index acf19db3a..fa967abd7 100644
--- a/vendor/github.com/minio/minio-go/.gitignore
+++ b/vendor/github.com/minio/minio-go/.gitignore
@@ -1,2 +1,3 @@
*~
-*.test \ No newline at end of file
+*.test
+validator
diff --git a/vendor/github.com/minio/minio-go/.travis.yml b/vendor/github.com/minio/minio-go/.travis.yml
index 3d260fa61..4ae1eadf0 100644
--- a/vendor/github.com/minio/minio-go/.travis.yml
+++ b/vendor/github.com/minio/minio-go/.travis.yml
@@ -9,18 +9,22 @@ env:
- ARCH=i686
go:
-- 1.5.3
-- 1.6
- 1.7.4
-- 1.8
+- 1.8.x
+- 1.9.x
+- tip
+
+matrix:
+ fast_finish: true
+ allow_failures:
+ - go: tip
+
+addons:
+ apt:
+ packages:
+ - devscripts
script:
- diff -au <(gofmt -d .) <(printf "")
-- go get -u github.com/cheggaaa/pb/...
-- go get -u github.com/sirupsen/logrus/...
-- go get -u github.com/dustin/go-humanize/...
-- go vet ./...
-- SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go test -race -v ./...
-- SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go run functional_tests.go
-- mkdir /tmp/examples \
- && for i in $(echo examples/s3/*); do go build -o /tmp/examples/$(basename ${i:0:-3}) ${i}; done
+- diff -au <(licensecheck --check '.go$' --recursive --lines 0 * | grep -v -w 'Apache (v2.0)') <(printf "")
+- make
diff --git a/vendor/github.com/minio/minio-go/MAINTAINERS.md b/vendor/github.com/minio/minio-go/MAINTAINERS.md
index e2a957137..17973078e 100644
--- a/vendor/github.com/minio/minio-go/MAINTAINERS.md
+++ b/vendor/github.com/minio/minio-go/MAINTAINERS.md
@@ -5,24 +5,25 @@
Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522)
### Making new releases
-Edit `libraryVersion` constant in `api.go`.
-
+Tag and sign your release commit, additionally this step requires you to have access to Minio's trusted private key.
+```sh
+$ export GNUPGHOME=/media/${USER}/minio/trusted
+$ git tag -s 4.0.0
+$ git push
+$ git push --tags
```
+
+### Update version
+Once release has been made update `libraryVersion` constant in `api.go` to next to be released version.
+
+```sh
$ grep libraryVersion api.go
- libraryVersion = "0.3.0"
+ libraryVersion = "4.0.1"
```
Commit your changes
```
-$ git commit -a -m "Bump to new release 0.3.0" --author "Minio Trusted <trusted@minio.io>"
-```
-
-Tag and sign your release commit, additionally this step requires you to have access to Minio's trusted private key.
-```
-$ export GNUPGHOME=/path/to/trusted/key
-$ git tag -s 0.3.0
-$ git push
-$ git push --tags
+$ git commit -a -m "Update version for next release" --author "Minio Trusted <trusted@minio.io>"
```
### Announce
@@ -30,5 +31,5 @@ Announce new release by adding release notes at https://github.com/minio/minio-g
To generate `changelog`
```sh
-git log --no-color --pretty=format:'-%d %s (%cr) <%an>' <latest_release_tag>..<last_release_tag>
+$ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' <last_release_tag>..<latest_release_tag>
```
diff --git a/vendor/github.com/minio/minio-go/Makefile b/vendor/github.com/minio/minio-go/Makefile
new file mode 100644
index 000000000..05081c723
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/Makefile
@@ -0,0 +1,15 @@
+all: checks
+
+checks:
+ @go get -t ./...
+ @go vet ./...
+ @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go test -race -v ./...
+ @go get github.com/dustin/go-humanize/...
+ @go get github.com/sirupsen/logrus/...
+ @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go run functional_tests.go
+ @mkdir -p /tmp/examples && for i in $(echo examples/s3/*); do go build -o /tmp/examples/$(basename ${i:0:-3}) ${i}; done
+ @go get -u github.com/a8m/mark/...
+ @go get -u github.com/minio/cli/...
+ @go get -u golang.org/x/tools/cmd/goimports
+ @go get -u github.com/gernest/wow/...
+ @go build docs/validator.go && ./validator -m docs/API.md -t docs/checker.go.tpl
diff --git a/vendor/github.com/minio/minio-go/NOTICE b/vendor/github.com/minio/minio-go/NOTICE
new file mode 100644
index 000000000..c521791c5
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/NOTICE
@@ -0,0 +1,2 @@
+minio-go
+Copyright 2015-2017 Minio, Inc. \ No newline at end of file
diff --git a/vendor/github.com/minio/minio-go/README.md b/vendor/github.com/minio/minio-go/README.md
index 5eb6656d5..2dedc1a28 100644
--- a/vendor/github.com/minio/minio-go/README.md
+++ b/vendor/github.com/minio/minio-go/README.md
@@ -1,19 +1,7 @@
-# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge)
+# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE)
The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
-**Supported cloud storage providers:**
-
-- AWS Signature Version 4
- - Amazon S3
- - Minio
-
-- AWS Signature Version 2
- - Google Cloud Storage (Compatibility Mode)
- - Openstack Swift + Swift3 middleware
- - Ceph Object Gateway
- - Riak CS
-
This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference).
This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang).
@@ -55,6 +43,7 @@ func main() {
}
log.Printf("%#v\n", minioClient) // minioClient is now setup
+}
```
## Quick Start Example - File Uploader
@@ -105,7 +94,7 @@ func main() {
contentType := "application/zip"
// Upload the zip file with FPutObject
- n, err := minioClient.FPutObject(bucketName, objectName, filePath, contentType)
+ n, err := minioClient.FPutObject(bucketName, objectName, filePath, minio.PutObjectOptions{ContentType:contentType})
if err != nil {
log.Fatalln(err)
}
@@ -152,10 +141,14 @@ The full API Reference is available here.
### API Reference : File Object Operations
* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
+* [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext)
+* [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext)
### API Reference : Object Operations
* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
+* [`GetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#GetObjectWithContext)
+* [`PutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectWithContext)
* [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming)
* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject)
* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject)
@@ -204,10 +197,14 @@ The full API Reference is available here.
### Full Examples : File Object Operations
* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
+* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go)
+* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go)
### Full Examples : Object Operations
* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
+* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go)
+* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go)
* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
@@ -217,6 +214,7 @@ The full API Reference is available here.
### Full Examples : Encrypted Object Operations
* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go)
* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go)
+* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go)
### Full Examples : Presigned Operations
* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
@@ -235,3 +233,5 @@ The full API Reference is available here.
[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go)
[![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go)
+## License
+This SDK is distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](./LICENSE) and [NOTICE](./NOTICE) for more information.
diff --git a/vendor/github.com/minio/minio-go/README_zh_CN.md b/vendor/github.com/minio/minio-go/README_zh_CN.md
new file mode 100644
index 000000000..5584f4255
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/README_zh_CN.md
@@ -0,0 +1,246 @@
+# 适用于与Amazon S3兼容云存储的Minio Go SDK [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge)
+
+Minio Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对象存储服务。
+
+**支持的云存储:**
+
+- AWS Signature Version 4
+ - Amazon S3
+ - Minio
+
+- AWS Signature Version 2
+ - Google Cloud Storage (兼容模式)
+ - Openstack Swift + Swift3 middleware
+ - Ceph Object Gateway
+ - Riak CS
+
+本文我们将学习如何安装Minio client SDK,连接到Minio,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference)。
+
+本文假设你已经有 [Go开发环境](https://docs.minio.io/docs/how-to-install-golang)。
+
+## 从Github下载
+```sh
+go get -u github.com/minio/minio-go
+```
+
+## 初始化Minio Client
+Minio client需要以下4个参数来连接与Amazon S3兼容的对象存储。
+
+| 参数 | 描述|
+| :--- | :--- |
+| endpoint | 对象存储服务的URL |
+| accessKeyID | Access key是唯一标识你的账户的用户ID。 |
+| secretAccessKey | Secret key是你账户的密码。 |
+| secure | true代表使用HTTPS |
+
+
+```go
+package main
+
+import (
+ "github.com/minio/minio-go"
+ "log"
+)
+
+func main() {
+ endpoint := "play.minio.io:9000"
+ accessKeyID := "Q3AM3UQ867SPQQA43P2F"
+ secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
+ useSSL := true
+
+ // 初使化 minio client对象。
+ minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ log.Printf("%#v\n", minioClient) // minioClient初使化成功
+}
+```
+
+## 示例-文件上传
+本示例连接到一个对象存储服务,创建一个存储桶并上传一个文件到存储桶中。
+
+我们在本示例中使用运行在 [https://play.minio.io:9000](https://play.minio.io:9000) 上的Minio服务,你可以用这个服务来开发和测试。示例中的访问凭据是公开的。
+
+### FileUploader.go
+```go
+package main
+
+import (
+ "github.com/minio/minio-go"
+ "log"
+)
+
+func main() {
+ endpoint := "play.minio.io:9000"
+ accessKeyID := "Q3AM3UQ867SPQQA43P2F"
+ secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
+ useSSL := true
+
+ // 初使化minio client对象。
+ minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // 创建一个叫mymusic的存储桶。
+ bucketName := "mymusic"
+ location := "us-east-1"
+
+ err = minioClient.MakeBucket(bucketName, location)
+ if err != nil {
+ // 检查存储桶是否已经存在。
+ exists, err := minioClient.BucketExists(bucketName)
+ if err == nil && exists {
+ log.Printf("We already own %s\n", bucketName)
+ } else {
+ log.Fatalln(err)
+ }
+ }
+ log.Printf("Successfully created %s\n", bucketName)
+
+ // 上传一个zip文件。
+ objectName := "golden-oldies.zip"
+ filePath := "/tmp/golden-oldies.zip"
+ contentType := "application/zip"
+
+ // 使用FPutObject上传一个zip文件。
+ n, err := minioClient.FPutObject(bucketName, objectName, filePath, minio.PutObjectOptions{ContentType:contentType})
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ log.Printf("Successfully uploaded %s of size %d\n", objectName, n)
+}
+```
+
+### 运行FileUploader
+```sh
+go run file-uploader.go
+2016/08/13 17:03:28 Successfully created mymusic
+2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
+
+mc ls play/mymusic/
+[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip
+```
+
+## API文档
+完整的API文档在这里。
+* [完整API文档](https://docs.minio.io/docs/golang-client-api-reference)
+
+### API文档 : 操作存储桶
+* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket)
+* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets)
+* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists)
+* [`RemoveBucket`](https://docs.minio.io/docs/golang-client-api-reference#RemoveBucket)
+* [`ListObjects`](https://docs.minio.io/docs/golang-client-api-reference#ListObjects)
+* [`ListObjectsV2`](https://docs.minio.io/docs/golang-client-api-reference#ListObjectsV2)
+* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads)
+
+### API文档 : 存储桶策略
+* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy)
+* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy)
+* [`ListBucketPolicies`](https://docs.minio.io/docs/golang-client-api-reference#ListBucketPolicies)
+
+### API文档 : 存储桶通知
+* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification)
+* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification)
+* [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification)
+* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension)
+
+### API文档 : 操作文件对象
+* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
+* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
+* [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext)
+* [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext)
+
+### API文档 : 操作对象
+* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
+* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
+* [`GetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#GetObjectWithContext)
+* [`PutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectWithContext)
+* [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming)
+* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject)
+* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject)
+* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject)
+* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects)
+* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
+
+### API文档: 操作加密对象
+* [`GetEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#GetEncryptedObject)
+* [`PutEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#PutEncryptedObject)
+
+### API文档 : Presigned操作
+* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject)
+* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject)
+* [`PresignedHeadObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedHeadObject)
+* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy)
+
+### API文档 : 客户端自定义设置
+* [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo)
+* [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport)
+* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn)
+* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff)
+
+## 完整示例
+
+### 完整示例 : 操作存储桶
+* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
+* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
+* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
+* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go)
+* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
+* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
+* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
+
+### 完整示例 : 存储桶策略
+* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
+* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
+* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
+
+### 完整示例 : 存储桶通知
+* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
+* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
+* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
+* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio扩展)
+
+### 完整示例 : 操作文件对象
+* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
+* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
+* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go)
+* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go)
+
+### 完整示例 : 操作对象
+* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
+* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
+* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go)
+* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go)
+* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
+* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
+* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
+* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go)
+* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
+
+### 完整示例 : 操作加密对象
+* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go)
+* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go)
+* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go)
+
+### 完整示例 : Presigned操作
+* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
+* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
+* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go)
+* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
+
+## 了解更多
+* [完整文档](https://docs.minio.io)
+* [Minio Go Client SDK API文档](https://docs.minio.io/docs/golang-client-api-reference)
+* [Go 音乐播放器完整示例](https://docs.minio.io/docs/go-music-player-app)
+
+## 贡献
+[贡献指南](https://github.com/minio/minio-go/blob/master/docs/zh_CN/CONTRIBUTING.md)
+
+[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go)
+[![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go)
+
diff --git a/vendor/github.com/minio/minio-go/api-compose-object.go b/vendor/github.com/minio/minio-go/api-compose-object.go
index 4fa88b818..81314e3b4 100644
--- a/vendor/github.com/minio/minio-go/api-compose-object.go
+++ b/vendor/github.com/minio/minio-go/api-compose-object.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,6 +18,7 @@
package minio
import (
+ "context"
"encoding/base64"
"fmt"
"net/http"
@@ -58,7 +60,7 @@ func (s *SSEInfo) getSSEHeaders(isCopySource bool) map[string]string {
return map[string]string{
"x-amz-" + cs + "server-side-encryption-customer-algorithm": s.algo,
"x-amz-" + cs + "server-side-encryption-customer-key": base64.StdEncoding.EncodeToString(s.key),
- "x-amz-" + cs + "server-side-encryption-customer-key-MD5": base64.StdEncoding.EncodeToString(sumMD5(s.key)),
+ "x-amz-" + cs + "server-side-encryption-customer-key-MD5": sumMD5Base64(s.key),
}
}
@@ -115,7 +117,7 @@ func NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo,
k = k[len("x-amz-meta-"):]
}
if _, ok := m[k]; ok {
- return d, fmt.Errorf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k)
+ return d, ErrInvalidArgument(fmt.Sprintf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k))
}
m[k] = v
}
@@ -243,13 +245,13 @@ func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[s
// Get object info - need size and etag here. Also, decryption
// headers are added to the stat request if given.
var objInfo ObjectInfo
- rh := NewGetReqHeaders()
+ opts := StatObjectOptions{}
for k, v := range s.decryptKey.getSSEHeaders(false) {
- rh.Set(k, v)
+ opts.Set(k, v)
}
- objInfo, err = c.statObject(s.bucket, s.object, rh)
+ objInfo, err = c.statObject(context.Background(), s.bucket, s.object, opts)
if err != nil {
- err = fmt.Errorf("Could not stat object - %s/%s: %v", s.bucket, s.object, err)
+ err = ErrInvalidArgument(fmt.Sprintf("Could not stat object - %s/%s: %v", s.bucket, s.object, err))
} else {
size = objInfo.Size
etag = objInfo.ETag
@@ -265,10 +267,105 @@ func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[s
return
}
+// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy.
+func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string,
+ metadata map[string]string) (ObjectInfo, error) {
+
+ // Build headers.
+ headers := make(http.Header)
+
+ // Set all the metadata headers.
+ for k, v := range metadata {
+ headers.Set(k, v)
+ }
+
+ // Set the source header
+ headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
+
+ // Send upload-part-copy request
+ resp, err := c.executeMethod(ctx, "PUT", requestMetadata{
+ bucketName: destBucket,
+ objectName: destObject,
+ customHeader: headers,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return ObjectInfo{}, err
+ }
+
+ // Check if we got an error response.
+ if resp.StatusCode != http.StatusOK {
+ return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject)
+ }
+
+ cpObjRes := copyObjectResult{}
+ err = xmlDecoder(resp.Body, &cpObjRes)
+ if err != nil {
+ return ObjectInfo{}, err
+ }
+
+ objInfo := ObjectInfo{
+ Key: destObject,
+ ETag: strings.Trim(cpObjRes.ETag, "\""),
+ LastModified: cpObjRes.LastModified,
+ }
+ return objInfo, nil
+}
+
+func (c Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string,
+ partID int, startOffset int64, length int64, metadata map[string]string) (p CompletePart, err error) {
+
+ headers := make(http.Header)
+
+ // Set source
+ headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
+
+ if startOffset < 0 {
+ return p, ErrInvalidArgument("startOffset must be non-negative")
+ }
+
+ if length >= 0 {
+ headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
+ }
+
+ for k, v := range metadata {
+ headers.Set(k, v)
+ }
+
+ queryValues := make(url.Values)
+ queryValues.Set("partNumber", strconv.Itoa(partID))
+ queryValues.Set("uploadId", uploadID)
+
+ resp, err := c.executeMethod(ctx, "PUT", requestMetadata{
+ bucketName: destBucket,
+ objectName: destObject,
+ customHeader: headers,
+ queryValues: queryValues,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return
+ }
+
+ // Check if we got an error response.
+ if resp.StatusCode != http.StatusOK {
+ return p, httpRespToErrorResponse(resp, destBucket, destObject)
+ }
+
+ // Decode copy-part response on success.
+ cpObjRes := copyObjectResult{}
+ err = xmlDecoder(resp.Body, &cpObjRes)
+ if err != nil {
+ return p, err
+ }
+ p.PartNumber, p.ETag = partID, cpObjRes.ETag
+ return p, nil
+}
+
// uploadPartCopy - helper function to create a part in a multipart
// upload via an upload-part-copy request
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
-func (c Client) uploadPartCopy(bucket, object, uploadID string, partNumber int,
+func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int,
headers http.Header) (p CompletePart, err error) {
// Build query parameters
@@ -277,7 +374,7 @@ func (c Client) uploadPartCopy(bucket, object, uploadID string, partNumber int,
urlValues.Set("uploadId", uploadID)
// Send upload-part-copy request
- resp, err := c.executeMethod("PUT", requestMetadata{
+ resp, err := c.executeMethod(ctx, "PUT", requestMetadata{
bucketName: bucket,
objectName: object,
customHeader: headers,
@@ -311,7 +408,7 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
if len(srcs) < 1 || len(srcs) > maxPartsCount {
return ErrInvalidArgument("There must be as least one and up to 10000 source objects.")
}
-
+ ctx := context.Background()
srcSizes := make([]int64, len(srcs))
var totalSize, size, totalParts int64
var srcUserMeta map[string]string
@@ -320,7 +417,7 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
for i, src := range srcs {
size, etag, srcUserMeta, err = src.getProps(c)
if err != nil {
- return fmt.Errorf("Could not get source props for %s/%s: %v", src.bucket, src.object, err)
+ return err
}
// Error out if client side encryption is used in this source object when
@@ -396,7 +493,7 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
}
// Send copy request
- resp, err := c.executeMethod("PUT", requestMetadata{
+ resp, err := c.executeMethod(ctx, "PUT", requestMetadata{
bucketName: dst.bucket,
objectName: dst.object,
customHeader: h,
@@ -426,13 +523,13 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
if len(userMeta) == 0 && len(srcs) == 1 {
metaMap = srcUserMeta
}
- metaHeaders := make(map[string][]string)
+ metaHeaders := make(map[string]string)
for k, v := range metaMap {
- metaHeaders[k] = append(metaHeaders[k], v)
+ metaHeaders[k] = v
}
- uploadID, err := c.newUploadID(dst.bucket, dst.object, metaHeaders)
+ uploadID, err := c.newUploadID(ctx, dst.bucket, dst.object, PutObjectOptions{UserMetadata: metaHeaders})
if err != nil {
- return fmt.Errorf("Error creating new upload: %v", err)
+ return err
}
// 2. Perform copy part uploads
@@ -457,10 +554,10 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
fmt.Sprintf("bytes=%d-%d", start, end))
// make upload-part-copy request
- complPart, err := c.uploadPartCopy(dst.bucket,
+ complPart, err := c.uploadPartCopy(ctx, dst.bucket,
dst.object, uploadID, partIndex, h)
if err != nil {
- return fmt.Errorf("Error in upload-part-copy - %v", err)
+ return err
}
objParts = append(objParts, complPart)
partIndex++
@@ -468,12 +565,12 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
}
// 3. Make final complete-multipart request.
- _, err = c.completeMultipartUpload(dst.bucket, dst.object, uploadID,
+ _, err = c.completeMultipartUpload(ctx, dst.bucket, dst.object, uploadID,
completeMultipartUpload{Parts: objParts})
if err != nil {
- err = fmt.Errorf("Error in complete-multipart request - %v", err)
+ return err
}
- return err
+ return nil
}
// partsRequired is ceiling(size / copyPartSize)
diff --git a/vendor/github.com/minio/minio-go/api-compose-object_test.go b/vendor/github.com/minio/minio-go/api-compose-object_test.go
index 5339d2027..0f22a960b 100644
--- a/vendor/github.com/minio/minio-go/api-compose-object_test.go
+++ b/vendor/github.com/minio/minio-go/api-compose-object_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/api-datatypes.go b/vendor/github.com/minio/minio-go/api-datatypes.go
index ab2aa4af2..63fc08905 100644
--- a/vendor/github.com/minio/minio-go/api-datatypes.go
+++ b/vendor/github.com/minio/minio-go/api-datatypes.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -43,7 +44,7 @@ type ObjectInfo struct {
// Collection of additional metadata on the object.
// eg: x-amz-meta-*, content-encoding etc.
- Metadata http.Header `json:"metadata"`
+ Metadata http.Header `json:"metadata" xml:"-"`
// Owner name.
Owner struct {
diff --git a/vendor/github.com/minio/minio-go/api-error-response.go b/vendor/github.com/minio/minio-go/api-error-response.go
index e0019a334..655991cff 100644
--- a/vendor/github.com/minio/minio-go/api-error-response.go
+++ b/vendor/github.com/minio/minio-go/api-error-response.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016, 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -20,7 +21,6 @@ import (
"encoding/xml"
"fmt"
"net/http"
- "strconv"
)
/* **** SAMPLE ERROR RESPONSE ****
@@ -49,6 +49,9 @@ type ErrorResponse struct {
// only in HEAD bucket and ListObjects response.
Region string
+ // Underlying HTTP status code for the returned error
+ StatusCode int `xml:"-" json:"-"`
+
// Headers of the returned S3 XML error
Headers http.Header `xml:"-" json:"-"`
}
@@ -100,7 +103,10 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
msg := "Response is empty. " + reportIssue
return ErrInvalidArgument(msg)
}
- var errResp ErrorResponse
+
+ errResp := ErrorResponse{
+ StatusCode: resp.StatusCode,
+ }
err := xmlDecoder(resp.Body, &errResp)
// Xml decoding failed with no body, fall back to HTTP headers.
@@ -109,12 +115,14 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
case http.StatusNotFound:
if objectName == "" {
errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
Code: "NoSuchBucket",
Message: "The specified bucket does not exist.",
BucketName: bucketName,
}
} else {
errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
Code: "NoSuchKey",
Message: "The specified key does not exist.",
BucketName: bucketName,
@@ -123,6 +131,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
}
case http.StatusForbidden:
errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
Code: "AccessDenied",
Message: "Access Denied.",
BucketName: bucketName,
@@ -130,12 +139,14 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
}
case http.StatusConflict:
errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
Code: "Conflict",
Message: "Bucket not empty.",
BucketName: bucketName,
}
case http.StatusPreconditionFailed:
errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
Code: "PreconditionFailed",
Message: s3ErrorResponseMap["PreconditionFailed"],
BucketName: bucketName,
@@ -143,6 +154,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
}
default:
errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
Code: resp.Status,
Message: resp.Status,
BucketName: bucketName,
@@ -150,7 +162,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
}
}
- // Save hodID, requestID and region information
+ // Save hostID, requestID and region information
// from headers if not available through error XML.
if errResp.RequestID == "" {
errResp.RequestID = resp.Header.Get("x-amz-request-id")
@@ -162,7 +174,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
errResp.Region = resp.Header.Get("x-amz-bucket-region")
}
if errResp.Code == "InvalidRegion" && errResp.Region != "" {
- errResp.Message = fmt.Sprintf("Region does not match, expecting region '%s'.", errResp.Region)
+ errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region)
}
// Save headers returned in the API XML error
@@ -173,10 +185,10 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
// ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration.
func ErrTransferAccelerationBucket(bucketName string) error {
- msg := fmt.Sprintf("The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").")
return ErrorResponse{
+ StatusCode: http.StatusBadRequest,
Code: "InvalidArgument",
- Message: msg,
+ Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.",
BucketName: bucketName,
}
}
@@ -185,6 +197,7 @@ func ErrTransferAccelerationBucket(bucketName string) error {
func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize)
return ErrorResponse{
+ StatusCode: http.StatusBadRequest,
Code: "EntityTooLarge",
Message: msg,
BucketName: bucketName,
@@ -194,9 +207,10 @@ func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName st
// ErrEntityTooSmall - Input size is smaller than supported minimum.
func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error {
- msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size '0B' for single PUT operation.", totalSize)
+ msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize)
return ErrorResponse{
- Code: "EntityTooLarge",
+ StatusCode: http.StatusBadRequest,
+ Code: "EntityTooSmall",
Message: msg,
BucketName: bucketName,
Key: objectName,
@@ -205,9 +219,9 @@ func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error {
// ErrUnexpectedEOF - Unexpected end of file reached.
func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error {
- msg := fmt.Sprintf("Data read ‘%s’ is not equal to the size ‘%s’ of the input Reader.",
- strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10))
+ msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize)
return ErrorResponse{
+ StatusCode: http.StatusBadRequest,
Code: "UnexpectedEOF",
Message: msg,
BucketName: bucketName,
@@ -218,18 +232,20 @@ func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string)
// ErrInvalidBucketName - Invalid bucket name response.
func ErrInvalidBucketName(message string) error {
return ErrorResponse{
- Code: "InvalidBucketName",
- Message: message,
- RequestID: "minio",
+ StatusCode: http.StatusBadRequest,
+ Code: "InvalidBucketName",
+ Message: message,
+ RequestID: "minio",
}
}
// ErrInvalidObjectName - Invalid object name response.
func ErrInvalidObjectName(message string) error {
return ErrorResponse{
- Code: "NoSuchKey",
- Message: message,
- RequestID: "minio",
+ StatusCode: http.StatusNotFound,
+ Code: "NoSuchKey",
+ Message: message,
+ RequestID: "minio",
}
}
@@ -240,9 +256,10 @@ var ErrInvalidObjectPrefix = ErrInvalidObjectName
// ErrInvalidArgument - Invalid argument response.
func ErrInvalidArgument(message string) error {
return ErrorResponse{
- Code: "InvalidArgument",
- Message: message,
- RequestID: "minio",
+ StatusCode: http.StatusBadRequest,
+ Code: "InvalidArgument",
+ Message: message,
+ RequestID: "minio",
}
}
@@ -250,9 +267,10 @@ func ErrInvalidArgument(message string) error {
// The specified bucket does not have a bucket policy.
func ErrNoSuchBucketPolicy(message string) error {
return ErrorResponse{
- Code: "NoSuchBucketPolicy",
- Message: message,
- RequestID: "minio",
+ StatusCode: http.StatusNotFound,
+ Code: "NoSuchBucketPolicy",
+ Message: message,
+ RequestID: "minio",
}
}
@@ -260,8 +278,9 @@ func ErrNoSuchBucketPolicy(message string) error {
// The specified API call is not supported
func ErrAPINotSupported(message string) error {
return ErrorResponse{
- Code: "APINotSupported",
- Message: message,
- RequestID: "minio",
+ StatusCode: http.StatusNotImplemented,
+ Code: "APINotSupported",
+ Message: message,
+ RequestID: "minio",
}
}
diff --git a/vendor/github.com/minio/minio-go/api-error-response_test.go b/vendor/github.com/minio/minio-go/api-error-response_test.go
index 595cb50bd..bf10941b4 100644
--- a/vendor/github.com/minio/minio-go/api-error-response_test.go
+++ b/vendor/github.com/minio/minio-go/api-error-response_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -7,7 +8,7 @@
*
* http://www.apache.org/licenses/LICENSE-2.0
*
- * Unless required bZy applicable law or agreed to in writing, software
+ * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
@@ -32,20 +33,23 @@ func TestHttpRespToErrorResponse(t *testing.T) {
// 'genAPIErrorResponse' generates ErrorResponse for given APIError.
// provides a encodable populated response values.
genAPIErrorResponse := func(err APIError, bucketName string) ErrorResponse {
- var errResp = ErrorResponse{}
- errResp.Code = err.Code
- errResp.Message = err.Description
- errResp.BucketName = bucketName
- return errResp
+ return ErrorResponse{
+ Code: err.Code,
+ Message: err.Description,
+ BucketName: bucketName,
+ }
}
// Encodes the response headers into XML format.
- encodeErr := func(response interface{}) []byte {
- var bytesBuffer bytes.Buffer
- bytesBuffer.WriteString(xml.Header)
- encode := xml.NewEncoder(&bytesBuffer)
- encode.Encode(response)
- return bytesBuffer.Bytes()
+ encodeErr := func(response ErrorResponse) []byte {
+ buf := &bytes.Buffer{}
+ buf.WriteString(xml.Header)
+ encoder := xml.NewEncoder(buf)
+ err := encoder.Encode(response)
+ if err != nil {
+ t.Fatalf("error encoding response: %v", err)
+ }
+ return buf.Bytes()
}
// `createAPIErrorResponse` Mocks XML error response from the server.
@@ -65,6 +69,7 @@ func TestHttpRespToErrorResponse(t *testing.T) {
// 'genErrResponse' contructs error response based http Status Code
genErrResponse := func(resp *http.Response, code, message, bucketName, objectName string) ErrorResponse {
errResp := ErrorResponse{
+ StatusCode: resp.StatusCode,
Code: code,
Message: message,
BucketName: bucketName,
@@ -80,9 +85,10 @@ func TestHttpRespToErrorResponse(t *testing.T) {
// Generate invalid argument error.
genInvalidError := func(message string) error {
errResp := ErrorResponse{
- Code: "InvalidArgument",
- Message: message,
- RequestID: "minio",
+ StatusCode: http.StatusBadRequest,
+ Code: "InvalidArgument",
+ Message: message,
+ RequestID: "minio",
}
return errResp
}
@@ -101,22 +107,22 @@ func TestHttpRespToErrorResponse(t *testing.T) {
// Set the StatusCode to the argument supplied.
// Sets common headers.
genEmptyBodyResponse := func(statusCode int) *http.Response {
- resp := &http.Response{}
- // set empty response body.
- resp.Body = ioutil.NopCloser(bytes.NewBuffer([]byte("")))
- // set headers.
+ resp := &http.Response{
+ StatusCode: statusCode,
+ Body: ioutil.NopCloser(bytes.NewReader(nil)),
+ }
setCommonHeaders(resp)
- // set status code.
- resp.StatusCode = statusCode
return resp
}
// Decode XML error message from the http response body.
- decodeXMLError := func(resp *http.Response, t *testing.T) error {
- var errResp ErrorResponse
+ decodeXMLError := func(resp *http.Response) error {
+ errResp := ErrorResponse{
+ StatusCode: resp.StatusCode,
+ }
err := xmlDecoder(resp.Body, &errResp)
if err != nil {
- t.Fatal("XML decoding of response body failed")
+ t.Fatalf("XML decoding of response body failed: %v", err)
}
return errResp
}
@@ -134,12 +140,12 @@ func TestHttpRespToErrorResponse(t *testing.T) {
// Used for asserting the actual response.
expectedErrResponse := []error{
genInvalidError("Response is empty. " + "Please report this issue at https://github.com/minio/minio-go/issues."),
- decodeXMLError(createAPIErrorResponse(APIErrors[0], "minio-bucket"), t),
- genErrResponse(setCommonHeaders(&http.Response{}), "NoSuchBucket", "The specified bucket does not exist.", "minio-bucket", ""),
- genErrResponse(setCommonHeaders(&http.Response{}), "NoSuchKey", "The specified key does not exist.", "minio-bucket", "Asia/"),
- genErrResponse(setCommonHeaders(&http.Response{}), "AccessDenied", "Access Denied.", "minio-bucket", ""),
- genErrResponse(setCommonHeaders(&http.Response{}), "Conflict", "Bucket not empty.", "minio-bucket", ""),
- genErrResponse(setCommonHeaders(&http.Response{}), "Bad Request", "Bad Request", "minio-bucket", ""),
+ decodeXMLError(createAPIErrorResponse(APIErrors[0], "minio-bucket")),
+ genErrResponse(setCommonHeaders(&http.Response{StatusCode: http.StatusNotFound}), "NoSuchBucket", "The specified bucket does not exist.", "minio-bucket", ""),
+ genErrResponse(setCommonHeaders(&http.Response{StatusCode: http.StatusNotFound}), "NoSuchKey", "The specified key does not exist.", "minio-bucket", "Asia/"),
+ genErrResponse(setCommonHeaders(&http.Response{StatusCode: http.StatusForbidden}), "AccessDenied", "Access Denied.", "minio-bucket", ""),
+ genErrResponse(setCommonHeaders(&http.Response{StatusCode: http.StatusConflict}), "Conflict", "Bucket not empty.", "minio-bucket", ""),
+ genErrResponse(setCommonHeaders(&http.Response{StatusCode: http.StatusBadRequest}), "Bad Request", "Bad Request", "minio-bucket", ""),
}
// List of http response to be used as input.
@@ -182,6 +188,7 @@ func TestHttpRespToErrorResponse(t *testing.T) {
func TestErrEntityTooLarge(t *testing.T) {
msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", 1000000, 99999)
expectedResult := ErrorResponse{
+ StatusCode: http.StatusBadRequest,
Code: "EntityTooLarge",
Message: msg,
BucketName: "minio-bucket",
@@ -189,22 +196,23 @@ func TestErrEntityTooLarge(t *testing.T) {
}
actualResult := ErrEntityTooLarge(1000000, 99999, "minio-bucket", "Asia/")
if !reflect.DeepEqual(expectedResult, actualResult) {
- t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
+ t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult)
}
}
// Test validates 'ErrEntityTooSmall' error response.
func TestErrEntityTooSmall(t *testing.T) {
- msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size '0B' for single PUT operation.", -1)
+ msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", -1)
expectedResult := ErrorResponse{
- Code: "EntityTooLarge",
+ StatusCode: http.StatusBadRequest,
+ Code: "EntityTooSmall",
Message: msg,
BucketName: "minio-bucket",
Key: "Asia/",
}
actualResult := ErrEntityTooSmall(-1, "minio-bucket", "Asia/")
if !reflect.DeepEqual(expectedResult, actualResult) {
- t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
+ t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult)
}
}
@@ -213,6 +221,7 @@ func TestErrUnexpectedEOF(t *testing.T) {
msg := fmt.Sprintf("Data read ‘%s’ is not equal to the size ‘%s’ of the input Reader.",
strconv.FormatInt(100, 10), strconv.FormatInt(101, 10))
expectedResult := ErrorResponse{
+ StatusCode: http.StatusBadRequest,
Code: "UnexpectedEOF",
Message: msg,
BucketName: "minio-bucket",
@@ -220,46 +229,49 @@ func TestErrUnexpectedEOF(t *testing.T) {
}
actualResult := ErrUnexpectedEOF(100, 101, "minio-bucket", "Asia/")
if !reflect.DeepEqual(expectedResult, actualResult) {
- t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
+ t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult)
}
}
// Test validates 'ErrInvalidBucketName' error response.
func TestErrInvalidBucketName(t *testing.T) {
expectedResult := ErrorResponse{
- Code: "InvalidBucketName",
- Message: "Invalid Bucket name",
- RequestID: "minio",
+ StatusCode: http.StatusBadRequest,
+ Code: "InvalidBucketName",
+ Message: "Invalid Bucket name",
+ RequestID: "minio",
}
actualResult := ErrInvalidBucketName("Invalid Bucket name")
if !reflect.DeepEqual(expectedResult, actualResult) {
- t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
+ t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult)
}
}
// Test validates 'ErrInvalidObjectName' error response.
func TestErrInvalidObjectName(t *testing.T) {
expectedResult := ErrorResponse{
- Code: "NoSuchKey",
- Message: "Invalid Object Key",
- RequestID: "minio",
+ StatusCode: http.StatusNotFound,
+ Code: "NoSuchKey",
+ Message: "Invalid Object Key",
+ RequestID: "minio",
}
actualResult := ErrInvalidObjectName("Invalid Object Key")
if !reflect.DeepEqual(expectedResult, actualResult) {
- t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
+ t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult)
}
}
// Test validates 'ErrInvalidArgument' response.
func TestErrInvalidArgument(t *testing.T) {
expectedResult := ErrorResponse{
- Code: "InvalidArgument",
- Message: "Invalid Argument",
- RequestID: "minio",
+ StatusCode: http.StatusBadRequest,
+ Code: "InvalidArgument",
+ Message: "Invalid Argument",
+ RequestID: "minio",
}
actualResult := ErrInvalidArgument("Invalid Argument")
if !reflect.DeepEqual(expectedResult, actualResult) {
- t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
+ t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult)
}
}
diff --git a/vendor/github.com/minio/minio-go/transport_1_5.go b/vendor/github.com/minio/minio-go/api-get-object-context.go
index 468daafd3..f8dfac7d6 100644
--- a/vendor/github.com/minio/minio-go/transport_1_5.go
+++ b/vendor/github.com/minio/minio-go/api-get-object-context.go
@@ -1,8 +1,6 @@
-// +build go1.5,!go1.6,!go1.7,!go1.8
-
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,21 +17,10 @@
package minio
-import (
- "net/http"
- "time"
-)
+import "context"
-// This default transport is similar to http.DefaultTransport
-// but with additional DisableCompression:
-var defaultMinioTransport http.RoundTripper = &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- TLSHandshakeTimeout: 10 * time.Second,
- // Set this value so that the underlying transport round-tripper
- // doesn't try to auto decode the body of objects with
- // content-encoding set to `gzip`.
- //
- // Refer:
- // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
- DisableCompression: true,
+// GetObjectWithContext - returns an seekable, readable object.
+// The options can be used to specify the GET request further.
+func (c Client) GetObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) {
+ return c.getObjectWithContext(ctx, bucketName, objectName, opts)
}
diff --git a/vendor/github.com/minio/minio-go/api-get-object-file.go b/vendor/github.com/minio/minio-go/api-get-object-file.go
index c4193e934..2b58220a6 100644
--- a/vendor/github.com/minio/minio-go/api-get-object-file.go
+++ b/vendor/github.com/minio/minio-go/api-get-object-file.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,11 +22,34 @@ import (
"os"
"path/filepath"
+ "github.com/minio/minio-go/pkg/encrypt"
+
+ "context"
+
"github.com/minio/minio-go/pkg/s3utils"
)
+// FGetObjectWithContext - download contents of an object to a local file.
+// The options can be used to specify the GET request further.
+func (c Client) FGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error {
+ return c.fGetObjectWithContext(ctx, bucketName, objectName, filePath, opts)
+}
+
// FGetObject - download contents of an object to a local file.
-func (c Client) FGetObject(bucketName, objectName, filePath string) error {
+func (c Client) FGetObject(bucketName, objectName, filePath string, opts GetObjectOptions) error {
+ return c.fGetObjectWithContext(context.Background(), bucketName, objectName, filePath, opts)
+}
+
+// FGetEncryptedObject - Decrypt and store an object at filePath.
+func (c Client) FGetEncryptedObject(bucketName, objectName, filePath string, materials encrypt.Materials) error {
+ if materials == nil {
+ return ErrInvalidArgument("Unable to recognize empty encryption properties")
+ }
+ return c.FGetObject(bucketName, objectName, filePath, GetObjectOptions{Materials: materials})
+}
+
+// fGetObjectWithContext - fgetObject wrapper function with context
+func (c Client) fGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
@@ -60,7 +84,7 @@ func (c Client) FGetObject(bucketName, objectName, filePath string) error {
}
// Gather md5sum.
- objectStat, err := c.StatObject(bucketName, objectName)
+ objectStat, err := c.StatObject(bucketName, objectName, StatObjectOptions{opts})
if err != nil {
return err
}
@@ -82,13 +106,12 @@ func (c Client) FGetObject(bucketName, objectName, filePath string) error {
// Initialize get object request headers to set the
// appropriate range offsets to read from.
- reqHeaders := NewGetReqHeaders()
if st.Size() > 0 {
- reqHeaders.SetRange(st.Size(), 0)
+ opts.SetRange(st.Size(), 0)
}
// Seek to current position for incoming reader.
- objectReader, objectStat, err := c.getObject(bucketName, objectName, reqHeaders)
+ objectReader, objectStat, err := c.getObject(ctx, bucketName, objectName, opts)
if err != nil {
return err
}
diff --git a/vendor/github.com/minio/minio-go/api-get-object.go b/vendor/github.com/minio/minio-go/api-get-object.go
index 9bd784ffa..50bbc2201 100644
--- a/vendor/github.com/minio/minio-go/api-get-object.go
+++ b/vendor/github.com/minio/minio-go/api-get-object.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016, 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,6 +18,7 @@
package minio
import (
+ "context"
"errors"
"fmt"
"io"
@@ -36,27 +38,16 @@ func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMateria
return nil, ErrInvalidArgument("Unable to recognize empty encryption properties")
}
- // Fetch encrypted object
- encReader, err := c.GetObject(bucketName, objectName)
- if err != nil {
- return nil, err
- }
- // Stat object to get its encryption metadata
- st, err := encReader.Stat()
- if err != nil {
- return nil, err
- }
-
- // Setup object for decrytion, object is transparently
- // decrypted as the consumer starts reading.
- encryptMaterials.SetupDecryptMode(encReader, st.Metadata.Get(amzHeaderIV), st.Metadata.Get(amzHeaderKey))
-
- // Success.
- return encryptMaterials, nil
+ return c.GetObject(bucketName, objectName, GetObjectOptions{Materials: encryptMaterials})
}
// GetObject - returns an seekable, readable object.
-func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
+func (c Client) GetObject(bucketName, objectName string, opts GetObjectOptions) (*Object, error) {
+ return c.getObjectWithContext(context.Background(), bucketName, objectName, opts)
+}
+
+// GetObject wrapper function that accepts a request context
+func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, err
@@ -102,34 +93,26 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
if req.isFirstReq {
// First request is a Read/ReadAt.
if req.isReadOp {
- reqHeaders := NewGetReqHeaders()
// Differentiate between wanting the whole object and just a range.
if req.isReadAt {
// If this is a ReadAt request only get the specified range.
// Range is set with respect to the offset and length of the buffer requested.
// Do not set objectInfo from the first readAt request because it will not get
// the whole object.
- reqHeaders.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
- httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders)
- } else {
- if req.Offset > 0 {
- reqHeaders.SetRange(req.Offset, 0)
- }
-
- // First request is a Read request.
- httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders)
+ opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
+ } else if req.Offset > 0 {
+ opts.SetRange(req.Offset, 0)
}
+ httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts)
if err != nil {
- resCh <- getResponse{
- Error: err,
- }
+ resCh <- getResponse{Error: err}
return
}
etag = objectInfo.ETag
// Read at least firstReq.Buffer bytes, if not we have
// reached our EOF.
size, err := io.ReadFull(httpReader, req.Buffer)
- if err == io.ErrUnexpectedEOF {
+ if size > 0 && err == io.ErrUnexpectedEOF {
// If an EOF happens after reading some but not
// all the bytes ReadFull returns ErrUnexpectedEOF
err = io.EOF
@@ -144,7 +127,7 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
} else {
// First request is a Stat or Seek call.
// Only need to run a StatObject until an actual Read or ReadAt request comes through.
- objectInfo, err = c.StatObject(bucketName, objectName)
+ objectInfo, err = c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts})
if err != nil {
resCh <- getResponse{
Error: err,
@@ -159,11 +142,10 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
}
}
} else if req.settingObjectInfo { // Request is just to get objectInfo.
- reqHeaders := NewGetReqHeaders()
if etag != "" {
- reqHeaders.SetMatchETag(etag)
+ opts.SetMatchETag(etag)
}
- objectInfo, err := c.statObject(bucketName, objectName, reqHeaders)
+ objectInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts})
if err != nil {
resCh <- getResponse{
Error: err,
@@ -183,9 +165,8 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
// new ones when they haven't been already.
// All readAt requests are new requests.
if req.DidOffsetChange || !req.beenRead {
- reqHeaders := NewGetReqHeaders()
if etag != "" {
- reqHeaders.SetMatchETag(etag)
+ opts.SetMatchETag(etag)
}
if httpReader != nil {
// Close previously opened http reader.
@@ -194,16 +175,11 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
// If this request is a readAt only get the specified range.
if req.isReadAt {
// Range is set with respect to the offset and length of the buffer requested.
- reqHeaders.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
- httpReader, _, err = c.getObject(bucketName, objectName, reqHeaders)
- } else {
- // Range is set with respect to the offset.
- if req.Offset > 0 {
- reqHeaders.SetRange(req.Offset, 0)
- }
-
- httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders)
+ opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
+ } else if req.Offset > 0 { // Range is set with respect to the offset.
+ opts.SetRange(req.Offset, 0)
}
+ httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts)
if err != nil {
resCh <- getResponse{
Error: err,
@@ -626,7 +602,7 @@ func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<-
//
// For more information about the HTTP Range header.
// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
-func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) {
+func (c Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) {
// Validate input arguments.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, ObjectInfo{}, err
@@ -635,18 +611,12 @@ func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeade
return nil, ObjectInfo{}, err
}
- // Set all the necessary reqHeaders.
- customHeader := make(http.Header)
- for key, value := range reqHeaders.Header {
- customHeader[key] = value
- }
-
// Execute GET on objectName.
- resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- customHeader: customHeader,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(ctx, "GET", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ customHeader: opts.Header(),
+ contentSHA256Hex: emptySHA256Hex,
})
if err != nil {
return nil, ObjectInfo{}, err
@@ -692,6 +662,15 @@ func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeade
Metadata: extractObjMetadata(resp.Header),
}
+ reader := resp.Body
+ if opts.Materials != nil {
+ err = opts.Materials.SetupDecryptMode(reader, objectStat.Metadata.Get(amzHeaderIV), objectStat.Metadata.Get(amzHeaderKey))
+ if err != nil {
+ return nil, ObjectInfo{}, err
+ }
+ reader = opts.Materials
+ }
+
// do not close body here, caller will close
- return resp.Body, objectStat, nil
+ return reader, objectStat, nil
}
diff --git a/vendor/github.com/minio/minio-go/request-headers.go b/vendor/github.com/minio/minio-go/api-get-options.go
index 76c87202d..dd70415cd 100644
--- a/vendor/github.com/minio/minio-go/request-headers.go
+++ b/vendor/github.com/minio/minio-go/api-get-options.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016-17 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -20,80 +21,94 @@ import (
"fmt"
"net/http"
"time"
+
+ "github.com/minio/minio-go/pkg/encrypt"
)
-// RequestHeaders - implement methods for setting special
-// request headers for GET, HEAD object operations.
-// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
-type RequestHeaders struct {
- http.Header
+// GetObjectOptions are used to specify additional headers or options
+// during GET requests.
+type GetObjectOptions struct {
+ headers map[string]string
+
+ Materials encrypt.Materials
+}
+
+// StatObjectOptions are used to specify additional headers or options
+// during GET info/stat requests.
+type StatObjectOptions struct {
+ GetObjectOptions
}
-// NewGetReqHeaders - initializes a new request headers for GET request.
-func NewGetReqHeaders() RequestHeaders {
- return RequestHeaders{
- Header: make(http.Header),
+// Header returns the http.Header representation of the GET options.
+func (o GetObjectOptions) Header() http.Header {
+ headers := make(http.Header, len(o.headers))
+ for k, v := range o.headers {
+ headers.Set(k, v)
}
+ return headers
}
-// NewHeadReqHeaders - initializes a new request headers for HEAD request.
-func NewHeadReqHeaders() RequestHeaders {
- return RequestHeaders{
- Header: make(http.Header),
+// Set adds a key value pair to the options. The
+// key-value pair will be part of the HTTP GET request
+// headers.
+func (o *GetObjectOptions) Set(key, value string) {
+ if o.headers == nil {
+ o.headers = make(map[string]string)
}
+ o.headers[http.CanonicalHeaderKey(key)] = value
}
// SetMatchETag - set match etag.
-func (c RequestHeaders) SetMatchETag(etag string) error {
+func (o *GetObjectOptions) SetMatchETag(etag string) error {
if etag == "" {
return ErrInvalidArgument("ETag cannot be empty.")
}
- c.Set("If-Match", "\""+etag+"\"")
+ o.Set("If-Match", "\""+etag+"\"")
return nil
}
// SetMatchETagExcept - set match etag except.
-func (c RequestHeaders) SetMatchETagExcept(etag string) error {
+func (o *GetObjectOptions) SetMatchETagExcept(etag string) error {
if etag == "" {
return ErrInvalidArgument("ETag cannot be empty.")
}
- c.Set("If-None-Match", "\""+etag+"\"")
+ o.Set("If-None-Match", "\""+etag+"\"")
return nil
}
// SetUnmodified - set unmodified time since.
-func (c RequestHeaders) SetUnmodified(modTime time.Time) error {
+func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error {
if modTime.IsZero() {
return ErrInvalidArgument("Modified since cannot be empty.")
}
- c.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat))
+ o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat))
return nil
}
// SetModified - set modified time since.
-func (c RequestHeaders) SetModified(modTime time.Time) error {
+func (o *GetObjectOptions) SetModified(modTime time.Time) error {
if modTime.IsZero() {
return ErrInvalidArgument("Modified since cannot be empty.")
}
- c.Set("If-Modified-Since", modTime.Format(http.TimeFormat))
+ o.Set("If-Modified-Since", modTime.Format(http.TimeFormat))
return nil
}
// SetRange - set the start and end offset of the object to be read.
// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference.
-func (c RequestHeaders) SetRange(start, end int64) error {
+func (o *GetObjectOptions) SetRange(start, end int64) error {
switch {
case start == 0 && end < 0:
// Read last '-end' bytes. `bytes=-N`.
- c.Set("Range", fmt.Sprintf("bytes=%d", end))
+ o.Set("Range", fmt.Sprintf("bytes=%d", end))
case 0 < start && end == 0:
// Read everything starting from offset
// 'start'. `bytes=N-`.
- c.Set("Range", fmt.Sprintf("bytes=%d-", start))
+ o.Set("Range", fmt.Sprintf("bytes=%d-", start))
case 0 <= start && start <= end:
// Read everything starting at 'start' till the
// 'end'. `bytes=N-M`
- c.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
+ o.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
default:
// All other cases such as
// bytes=-3-
diff --git a/vendor/github.com/minio/minio-go/api-get-policy.go b/vendor/github.com/minio/minio-go/api-get-policy.go
index 10ccdc66b..a4259c9d7 100644
--- a/vendor/github.com/minio/minio-go/api-get-policy.go
+++ b/vendor/github.com/minio/minio-go/api-get-policy.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,6 +18,7 @@
package minio
import (
+ "context"
"encoding/json"
"io/ioutil"
"net/http"
@@ -79,10 +81,10 @@ func (c Client) getBucketPolicy(bucketName string) (policy.BucketAccessPolicy, e
urlValues.Set("policy", "")
// Execute GET on bucket to list objects.
- resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
diff --git a/vendor/github.com/minio/minio-go/api-list.go b/vendor/github.com/minio/minio-go/api-list.go
index 6de1fe9b3..3cfb47d37 100644
--- a/vendor/github.com/minio/minio-go/api-list.go
+++ b/vendor/github.com/minio/minio-go/api-list.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,6 +18,7 @@
package minio
import (
+ "context"
"errors"
"fmt"
"net/http"
@@ -38,7 +40,7 @@ import (
//
func (c Client) ListBuckets() ([]BucketInfo, error) {
// Execute GET on service.
- resp, err := c.executeMethod("GET", requestMetadata{contentSHA256Bytes: emptySHA256})
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{contentSHA256Hex: emptySHA256Hex})
defer closeResponse(resp)
if err != nil {
return nil, err
@@ -215,10 +217,10 @@ func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken s
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
// Execute GET on bucket to list objects.
- resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
if err != nil {
@@ -393,10 +395,10 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
// Execute GET on bucket to list objects.
- resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
if err != nil {
@@ -572,10 +574,10 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker,
urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
// Execute GET on bucketName to list multipart uploads.
- resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
if err != nil {
@@ -690,11 +692,11 @@ func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, pa
urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
// Execute GET on objectName to get list of parts.
- resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- queryValues: urlValues,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
if err != nil {
diff --git a/vendor/github.com/minio/minio-go/api-notification.go b/vendor/github.com/minio/minio-go/api-notification.go
index 25a283af5..578fdea8e 100644
--- a/vendor/github.com/minio/minio-go/api-notification.go
+++ b/vendor/github.com/minio/minio-go/api-notification.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,6 +19,7 @@ package minio
import (
"bufio"
+ "context"
"encoding/json"
"io"
"net/http"
@@ -46,10 +48,10 @@ func (c Client) getBucketNotification(bucketName string) (BucketNotification, er
urlValues.Set("notification", "")
// Execute GET on bucket to list objects.
- resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
@@ -148,9 +150,9 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
}
// Check ARN partition to verify if listening bucket is supported
- if s3utils.IsAmazonEndpoint(c.endpointURL) || s3utils.IsGoogleEndpoint(c.endpointURL) {
+ if s3utils.IsAmazonEndpoint(*c.endpointURL) || s3utils.IsGoogleEndpoint(*c.endpointURL) {
notificationInfoCh <- NotificationInfo{
- Err: ErrAPINotSupported("Listening bucket notification is specific only to `minio` partitions"),
+ Err: ErrAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"),
}
return
}
@@ -170,13 +172,16 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
urlValues["events"] = events
// Execute GET on bucket to list objects.
- resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
})
if err != nil {
- continue
+ notificationInfoCh <- NotificationInfo{
+ Err: err,
+ }
+ return
}
// Validate http response, upon error return quickly.
diff --git a/vendor/github.com/minio/minio-go/api-presigned.go b/vendor/github.com/minio/minio-go/api-presigned.go
index c645828df..8b0258948 100644
--- a/vendor/github.com/minio/minio-go/api-presigned.go
+++ b/vendor/github.com/minio/minio-go/api-presigned.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,6 +19,7 @@ package minio
import (
"errors"
+ "net/http"
"net/url"
"time"
@@ -25,16 +27,6 @@ import (
"github.com/minio/minio-go/pkg/s3utils"
)
-// supportedGetReqParams - supported request parameters for GET presigned request.
-var supportedGetReqParams = map[string]struct{}{
- "response-expires": {},
- "response-content-type": {},
- "response-cache-control": {},
- "response-content-language": {},
- "response-content-encoding": {},
- "response-content-disposition": {},
-}
-
// presignURL - Returns a presigned URL for an input 'method'.
// Expires maximum is 7days - ie. 604800 and minimum is 1.
func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
@@ -42,42 +34,27 @@ func (c Client) presignURL(method string, bucketName string, objectName string,
if method == "" {
return nil, ErrInvalidArgument("method cannot be empty.")
}
- if err := s3utils.CheckValidBucketName(bucketName); err != nil {
- return nil, err
- }
- if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, err
}
- if err := isValidExpiry(expires); err != nil {
+ if err = isValidExpiry(expires); err != nil {
return nil, err
}
// Convert expires into seconds.
expireSeconds := int64(expires / time.Second)
reqMetadata := requestMetadata{
- presignURL: true,
- bucketName: bucketName,
- objectName: objectName,
- expires: expireSeconds,
- }
-
- // For "GET" we are handling additional request parameters to
- // override its response headers.
- if method == "GET" {
- // Verify if input map has unsupported params, if yes exit.
- for k := range reqParams {
- if _, ok := supportedGetReqParams[k]; !ok {
- return nil, ErrInvalidArgument(k + " unsupported request parameter for presigned GET.")
- }
- }
- // Save the request parameters to be used in presigning for GET request.
- reqMetadata.queryValues = reqParams
+ presignURL: true,
+ bucketName: bucketName,
+ objectName: objectName,
+ expires: expireSeconds,
+ queryValues: reqParams,
}
// Instantiate a new request.
// Since expires is set newRequest will presign the request.
- req, err := c.newRequest(method, reqMetadata)
- if err != nil {
+ var req *http.Request
+ if req, err = c.newRequest(method, reqMetadata); err != nil {
return nil, err
}
return req.URL, nil
@@ -88,6 +65,9 @@ func (c Client) presignURL(method string, bucketName string, objectName string,
// upto 7days or a minimum of 1sec. Additionally you can override
// a set of response headers using the query parameters.
func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return nil, err
+ }
return c.presignURL("GET", bucketName, objectName, expires, reqParams)
}
@@ -96,6 +76,9 @@ func (c Client) PresignedGetObject(bucketName string, objectName string, expires
// upto 7days or a minimum of 1sec. Additionally you can override
// a set of response headers using the query parameters.
func (c Client) PresignedHeadObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return nil, err
+ }
return c.presignURL("HEAD", bucketName, objectName, expires, reqParams)
}
@@ -103,6 +86,9 @@ func (c Client) PresignedHeadObject(bucketName string, objectName string, expire
// without credentials. URL can have a maximum expiry of upto 7days
// or a minimum of 1sec.
func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) {
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return nil, err
+ }
return c.presignURL("PUT", bucketName, objectName, expires, nil)
}
@@ -162,7 +148,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
policyBase64 := p.base64()
p.formData["policy"] = policyBase64
// For Google endpoint set this value to be 'GoogleAccessId'.
- if s3utils.IsGoogleEndpoint(c.endpointURL) {
+ if s3utils.IsGoogleEndpoint(*c.endpointURL) {
p.formData["GoogleAccessId"] = accessKeyID
} else {
// For all other endpoints set this value to be 'AWSAccessKeyId'.
diff --git a/vendor/github.com/minio/minio-go/api-put-bucket.go b/vendor/github.com/minio/minio-go/api-put-bucket.go
index fd37dc192..bb583a78f 100644
--- a/vendor/github.com/minio/minio-go/api-put-bucket.go
+++ b/vendor/github.com/minio/minio-go/api-put-bucket.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2015, 2016, 2017 Minio, Inc.
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,6 +19,7 @@ package minio
import (
"bytes"
+ "context"
"encoding/json"
"encoding/xml"
"fmt"
@@ -75,14 +76,14 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) {
if err != nil {
return err
}
- reqMetadata.contentMD5Bytes = sumMD5(createBucketConfigBytes)
- reqMetadata.contentSHA256Bytes = sum256(createBucketConfigBytes)
+ reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes)
+ reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes)
reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes)
reqMetadata.contentLength = int64(len(createBucketConfigBytes))
}
// Execute PUT to create a new bucket.
- resp, err := c.executeMethod("PUT", reqMetadata)
+ resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return err
@@ -161,16 +162,16 @@ func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAcces
policyBuffer := bytes.NewReader(policyBytes)
reqMetadata := requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentBody: policyBuffer,
- contentLength: int64(len(policyBytes)),
- contentMD5Bytes: sumMD5(policyBytes),
- contentSHA256Bytes: sum256(policyBytes),
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: policyBuffer,
+ contentLength: int64(len(policyBytes)),
+ contentMD5Base64: sumMD5Base64(policyBytes),
+ contentSHA256Hex: sum256Hex(policyBytes),
}
// Execute PUT to upload a new bucket policy.
- resp, err := c.executeMethod("PUT", reqMetadata)
+ resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return err
@@ -195,10 +196,10 @@ func (c Client) removeBucketPolicy(bucketName string) error {
urlValues.Set("policy", "")
// Execute DELETE on objectName.
- resp, err := c.executeMethod("DELETE", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
if err != nil {
@@ -226,16 +227,16 @@ func (c Client) SetBucketNotification(bucketName string, bucketNotification Buck
notifBuffer := bytes.NewReader(notifBytes)
reqMetadata := requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentBody: notifBuffer,
- contentLength: int64(len(notifBytes)),
- contentMD5Bytes: sumMD5(notifBytes),
- contentSHA256Bytes: sum256(notifBytes),
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: notifBuffer,
+ contentLength: int64(len(notifBytes)),
+ contentMD5Base64: sumMD5Base64(notifBytes),
+ contentSHA256Hex: sum256Hex(notifBytes),
}
// Execute PUT to upload a new bucket notification.
- resp, err := c.executeMethod("PUT", reqMetadata)
+ resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return err
diff --git a/vendor/github.com/minio/minio-go/api-put-object-common.go b/vendor/github.com/minio/minio-go/api-put-object-common.go
index 0158bc1d8..c16c3c69a 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-common.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-common.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,6 +18,7 @@
package minio
import (
+ "context"
"io"
"math"
"os"
@@ -24,12 +26,6 @@ import (
"github.com/minio/minio-go/pkg/s3utils"
)
-// Verify if reader is *os.File
-func isFile(reader io.Reader) (ok bool) {
- _, ok = reader.(*os.File)
- return
-}
-
// Verify if reader is *minio.Object
func isObject(reader io.Reader) (ok bool) {
_, ok = reader.(*Object)
@@ -39,6 +35,26 @@ func isObject(reader io.Reader) (ok bool) {
// Verify if reader is a generic ReaderAt
func isReadAt(reader io.Reader) (ok bool) {
_, ok = reader.(io.ReaderAt)
+ if ok {
+ var v *os.File
+ v, ok = reader.(*os.File)
+ if ok {
+ // Stdin, Stdout and Stderr all have *os.File type
+ // which happen to also be io.ReaderAt compatible
+ // we need to add special conditions for them to
+ // be ignored by this function.
+ for _, f := range []string{
+ "/dev/stdin",
+ "/dev/stdout",
+ "/dev/stderr",
+ } {
+ if f == v.Name() {
+ ok = false
+ break
+ }
+ }
+ }
+ }
return
}
@@ -77,7 +93,7 @@ func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, las
// getUploadID - fetch upload id if already present for an object name
// or initiate a new request to fetch a new upload id.
-func (c Client) newUploadID(bucketName, objectName string, metaData map[string][]string) (uploadID string, err error) {
+func (c Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return "", err
@@ -87,7 +103,7 @@ func (c Client) newUploadID(bucketName, objectName string, metaData map[string][
}
// Initiate multipart upload for an object.
- initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, metaData)
+ initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts)
if err != nil {
return "", err
}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-context.go b/vendor/github.com/minio/minio-go/api-put-object-context.go
new file mode 100644
index 000000000..a6f23dcaa
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object-context.go
@@ -0,0 +1,39 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "io"
+)
+
+// PutObjectWithContext - Identical to PutObject call, but accepts context to facilitate request cancellation.
+func (c Client) PutObjectWithContext(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64,
+ opts PutObjectOptions) (n int64, err error) {
+ err = opts.validate()
+ if err != nil {
+ return 0, err
+ }
+ if opts.EncryptMaterials != nil {
+ if err = opts.EncryptMaterials.SetupEncryptMode(reader); err != nil {
+ return 0, err
+ }
+ return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, opts.EncryptMaterials, opts)
+ }
+ return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts)
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-copy.go b/vendor/github.com/minio/minio-go/api-put-object-copy.go
index 32fa873d8..8032009dc 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-copy.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-copy.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/api-put-object-encrypted.go b/vendor/github.com/minio/minio-go/api-put-object-encrypted.go
index 534a21ecf..87dd1ab1a 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-encrypted.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-encrypted.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,13 +18,14 @@
package minio
import (
+ "context"
"io"
"github.com/minio/minio-go/pkg/encrypt"
)
// PutEncryptedObject - Encrypt and store object.
-func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials) (n int64, err error) {
if encryptMaterials == nil {
return 0, ErrInvalidArgument("Unable to recognize empty encryption properties")
@@ -33,14 +35,10 @@ func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Read
return 0, err
}
- if metadata == nil {
- metadata = make(map[string][]string)
- }
-
- // Set the necessary encryption headers, for future decryption.
- metadata[amzHeaderIV] = []string{encryptMaterials.GetIV()}
- metadata[amzHeaderKey] = []string{encryptMaterials.GetKey()}
- metadata[amzHeaderMatDesc] = []string{encryptMaterials.GetDesc()}
+ return c.PutObjectWithContext(context.Background(), bucketName, objectName, reader, -1, PutObjectOptions{EncryptMaterials: encryptMaterials})
+}
- return c.putObjectMultipartStreamNoLength(bucketName, objectName, encryptMaterials, metadata, progress)
+// FPutEncryptedObject - Encrypt and store an object with contents from file at filePath.
+func (c Client) FPutEncryptedObject(bucketName, objectName, filePath string, encryptMaterials encrypt.Materials) (n int64, err error) {
+ return c.FPutObjectWithContext(context.Background(), bucketName, objectName, filePath, PutObjectOptions{EncryptMaterials: encryptMaterials})
}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-file-context.go b/vendor/github.com/minio/minio-go/api-put-object-file-context.go
new file mode 100644
index 000000000..140a9c069
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object-file-context.go
@@ -0,0 +1,64 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "mime"
+ "os"
+ "path/filepath"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// FPutObjectWithContext - Create an object in a bucket, with contents from file at filePath. Allows request cancellation.
+func (c Client) FPutObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Open the referenced file.
+ fileReader, err := os.Open(filePath)
+ // If any error fail quickly here.
+ if err != nil {
+ return 0, err
+ }
+ defer fileReader.Close()
+
+ // Save the file stat.
+ fileStat, err := fileReader.Stat()
+ if err != nil {
+ return 0, err
+ }
+
+ // Save the file size.
+ fileSize := fileStat.Size()
+
+ // Set contentType based on filepath extension if not given or default
+ // value of "application/octet-stream" if the extension has no associated type.
+ if opts.ContentType == "" {
+ if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" {
+ opts.ContentType = "application/octet-stream"
+ }
+ }
+ return c.PutObjectWithContext(ctx, bucketName, objectName, fileReader, fileSize, opts)
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-file.go b/vendor/github.com/minio/minio-go/api-put-object-file.go
index 81cdf5c2c..7c8e05117 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-file.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-file.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,50 +18,10 @@
package minio
import (
- "mime"
- "os"
- "path/filepath"
-
- "github.com/minio/minio-go/pkg/s3utils"
+ "context"
)
-// FPutObject - Create an object in a bucket, with contents from file at filePath.
-func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) {
- // Input validation.
- if err := s3utils.CheckValidBucketName(bucketName); err != nil {
- return 0, err
- }
- if err := s3utils.CheckValidObjectName(objectName); err != nil {
- return 0, err
- }
-
- // Open the referenced file.
- fileReader, err := os.Open(filePath)
- // If any error fail quickly here.
- if err != nil {
- return 0, err
- }
- defer fileReader.Close()
-
- // Save the file stat.
- fileStat, err := fileReader.Stat()
- if err != nil {
- return 0, err
- }
-
- // Save the file size.
- fileSize := fileStat.Size()
-
- objMetadata := make(map[string][]string)
-
- // Set contentType based on filepath extension if not given or default
- // value of "binary/octet-stream" if the extension has no associated type.
- if contentType == "" {
- if contentType = mime.TypeByExtension(filepath.Ext(filePath)); contentType == "" {
- contentType = "application/octet-stream"
- }
- }
-
- objMetadata["Content-Type"] = []string{contentType}
- return c.putObjectCommon(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
+// FPutObject - Create an object in a bucket, with contents from file at filePath
+func (c Client) FPutObject(bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) {
+ return c.FPutObjectWithContext(context.Background(), bucketName, objectName, filePath, opts)
}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
index aefeb5f26..f5b8893e6 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-multipart.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,6 +19,9 @@ package minio
import (
"bytes"
+ "context"
+ "encoding/base64"
+ "encoding/hex"
"encoding/xml"
"fmt"
"io"
@@ -32,9 +36,9 @@ import (
"github.com/minio/minio-go/pkg/s3utils"
)
-func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64,
- metadata map[string][]string, progress io.Reader) (n int64, err error) {
- n, err = c.putObjectMultipartNoStream(bucketName, objectName, reader, metadata, progress)
+func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64,
+ opts PutObjectOptions) (n int64, err error) {
+ n, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts)
if err != nil {
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
@@ -45,13 +49,13 @@ func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Read
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
- return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
+ return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
}
}
return n, err
}
-func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
@@ -74,14 +78,14 @@ func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader
}
// Initiate a new multipart upload.
- uploadID, err := c.newUploadID(bucketName, objectName, metadata)
+ uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
if err != nil {
return 0, err
}
defer func() {
if err != nil {
- c.abortMultipartUpload(bucketName, objectName, uploadID)
+ c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
}
}()
@@ -117,12 +121,24 @@ func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader
// Update progress reader appropriately to the latest offset
// as we read from the source.
- rd := newHook(bytes.NewReader(buf[:length]), progress)
+ rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
+
+ // Checksums..
+ var (
+ md5Base64 string
+ sha256Hex string
+ )
+ if hashSums["md5"] != nil {
+ md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"])
+ }
+ if hashSums["sha256"] != nil {
+ sha256Hex = hex.EncodeToString(hashSums["sha256"])
+ }
// Proceed to upload the part.
var objPart ObjectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID, rd, partNumber,
- hashSums["md5"], hashSums["sha256"], int64(length), metadata)
+ objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber,
+ md5Base64, sha256Hex, int64(length), opts.UserMetadata)
if err != nil {
return totalUploadedSize, err
}
@@ -158,7 +174,7 @@ func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
- if _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload); err != nil {
+ if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil {
return totalUploadedSize, err
}
@@ -167,7 +183,7 @@ func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader
}
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
-func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata map[string][]string) (initiateMultipartUploadResult, error) {
+func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return initiateMultipartUploadResult{}, err
@@ -181,17 +197,7 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata
urlValues.Set("uploads", "")
// Set ContentType header.
- customHeader := make(http.Header)
- for k, v := range metadata {
- if len(v) > 0 {
- customHeader.Set(k, v[0])
- }
- }
-
- // Set a default content-type header if the latter is not provided
- if v, ok := metadata["Content-Type"]; !ok || len(v) == 0 {
- customHeader.Set("Content-Type", "application/octet-stream")
- }
+ customHeader := opts.Header()
reqMetadata := requestMetadata{
bucketName: bucketName,
@@ -201,7 +207,7 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata
}
// Execute POST on an objectName to initiate multipart upload.
- resp, err := c.executeMethod("POST", reqMetadata)
+ resp, err := c.executeMethod(ctx, "POST", reqMetadata)
defer closeResponse(resp)
if err != nil {
return initiateMultipartUploadResult{}, err
@@ -223,8 +229,8 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata
const serverEncryptionKeyPrefix = "x-amz-server-side-encryption"
// uploadPart - Uploads a part in a multipart upload.
-func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader,
- partNumber int, md5Sum, sha256Sum []byte, size int64, metadata map[string][]string) (ObjectPart, error) {
+func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader,
+ partNumber int, md5Base64, sha256Hex string, size int64, metadata map[string]string) (ObjectPart, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectPart{}, err
@@ -257,24 +263,24 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
for k, v := range metadata {
if len(v) > 0 {
if strings.HasPrefix(strings.ToLower(k), serverEncryptionKeyPrefix) {
- customHeader.Set(k, v[0])
+ customHeader.Set(k, v)
}
}
}
reqMetadata := requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- queryValues: urlValues,
- customHeader: customHeader,
- contentBody: reader,
- contentLength: size,
- contentMD5Bytes: md5Sum,
- contentSHA256Bytes: sha256Sum,
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ customHeader: customHeader,
+ contentBody: reader,
+ contentLength: size,
+ contentMD5Base64: md5Base64,
+ contentSHA256Hex: sha256Hex,
}
// Execute PUT on each part.
- resp, err := c.executeMethod("PUT", reqMetadata)
+ resp, err := c.executeMethod(ctx, "PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return ObjectPart{}, err
@@ -295,7 +301,7 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
}
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
-func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string,
+func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string,
complete completeMultipartUpload) (completeMultipartUploadResult, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
@@ -308,7 +314,6 @@ func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string,
// Initialize url queries.
urlValues := make(url.Values)
urlValues.Set("uploadId", uploadID)
-
// Marshal complete multipart body.
completeMultipartUploadBytes, err := xml.Marshal(complete)
if err != nil {
@@ -318,16 +323,16 @@ func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string,
// Instantiate all the complete multipart buffer.
completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
reqMetadata := requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- queryValues: urlValues,
- contentBody: completeMultipartUploadBuffer,
- contentLength: int64(len(completeMultipartUploadBytes)),
- contentSHA256Bytes: sum256(completeMultipartUploadBytes),
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentBody: completeMultipartUploadBuffer,
+ contentLength: int64(len(completeMultipartUploadBytes)),
+ contentSHA256Hex: sum256Hex(completeMultipartUploadBytes),
}
// Execute POST to complete multipart upload for an objectName.
- resp, err := c.executeMethod("POST", reqMetadata)
+ resp, err := c.executeMethod(ctx, "POST", reqMetadata)
defer closeResponse(resp)
if err != nil {
return completeMultipartUploadResult{}, err
diff --git a/vendor/github.com/minio/minio-go/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/api-put-object-streaming.go
index 40cd5c252..be1dc57ef 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-streaming.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-streaming.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,6 +18,7 @@
package minio
import (
+ "context"
"fmt"
"io"
"net/http"
@@ -26,33 +28,23 @@ import (
"github.com/minio/minio-go/pkg/s3utils"
)
-// PutObjectStreaming using AWS streaming signature V4
-func (c Client) PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int64, err error) {
- return c.PutObjectWithProgress(bucketName, objectName, reader, nil, nil)
-}
-
// putObjectMultipartStream - upload a large object using
// multipart upload and streaming signature for signing payload.
// Comprehensive put object operation involving multipart uploads.
//
// Following code handles these types of readers.
//
-// - *os.File
// - *minio.Object
// - Any reader which has a method 'ReadAt()'
//
-func (c Client) putObjectMultipartStream(bucketName, objectName string,
- reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
-
- // Verify if reader is *minio.Object, *os.File or io.ReaderAt.
- // NOTE: Verification of object is kept for a specific purpose
- // while it is going to be duck typed similar to io.ReaderAt.
- // It is to indicate that *minio.Object implements io.ReaderAt.
- // and such a functionality is used in the subsequent code path.
- if isFile(reader) || !isObject(reader) && isReadAt(reader) {
- n, err = c.putObjectMultipartStreamFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metadata, progress)
+func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string,
+ reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) {
+
+ if !isObject(reader) && isReadAt(reader) {
+ // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader.
+ n, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts)
} else {
- n, err = c.putObjectMultipartStreamNoChecksum(bucketName, objectName, reader, size, metadata, progress)
+ n, err = c.putObjectMultipartStreamNoChecksum(ctx, bucketName, objectName, reader, size, opts)
}
if err != nil {
errResp := ToErrorResponse(err)
@@ -64,7 +56,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string,
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
- return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
+ return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
}
}
return n, err
@@ -94,8 +86,8 @@ type uploadPartReq struct {
// temporary files for staging all the data, these temporary files are
// cleaned automatically when the caller i.e http client closes the
// stream after uploading all the contents successfully.
-func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string,
- reader io.ReaderAt, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string,
+ reader io.ReaderAt, size int64, opts PutObjectOptions) (n int64, err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
@@ -111,7 +103,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string
}
// Initiate a new multipart upload.
- uploadID, err := c.newUploadID(bucketName, objectName, metadata)
+ uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
if err != nil {
return 0, err
}
@@ -122,7 +114,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string
// to relinquish storage space.
defer func() {
if err != nil {
- c.abortMultipartUpload(bucketName, objectName, uploadID)
+ c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
}
}()
@@ -150,9 +142,8 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string
uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
}
close(uploadPartsCh)
-
// Receive each part number from the channel allowing three parallel uploads.
- for w := 1; w <= totalWorkers; w++ {
+ for w := 1; w <= opts.getNumThreads(); w++ {
go func(partSize int64) {
// Each worker will draw from the part channel and upload in parallel.
for uploadReq := range uploadPartsCh {
@@ -170,13 +161,13 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string
}
// Get a section reader on a particular offset.
- sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), progress)
+ sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress)
// Proceed to upload the part.
var objPart ObjectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID,
+ objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID,
sectionReader, uploadReq.PartNum,
- nil, nil, partSize, metadata)
+ "", "", partSize, opts.UserMetadata)
if err != nil {
uploadedPartsCh <- uploadedPartRes{
Size: 0,
@@ -229,7 +220,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
- _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
+ _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload)
if err != nil {
return totalUploadedSize, err
}
@@ -238,8 +229,8 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string
return totalUploadedSize, nil
}
-func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string,
- reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectMultipartStreamNoChecksum(ctx context.Context, bucketName, objectName string,
+ reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
@@ -253,9 +244,8 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string
if err != nil {
return 0, err
}
-
// Initiates a new multipart request
- uploadID, err := c.newUploadID(bucketName, objectName, metadata)
+ uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
if err != nil {
return 0, err
}
@@ -266,7 +256,7 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string
// storage space.
defer func() {
if err != nil {
- c.abortMultipartUpload(bucketName, objectName, uploadID)
+ c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
}
}()
@@ -281,17 +271,16 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string
for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
// Update progress reader appropriately to the latest offset
// as we read from the source.
- hookReader := newHook(reader, progress)
+ hookReader := newHook(reader, opts.Progress)
// Proceed to upload the part.
if partNumber == totalPartsCount {
partSize = lastPartSize
}
-
var objPart ObjectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID,
+ objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID,
io.LimitReader(hookReader, partSize),
- partNumber, nil, nil, partSize, metadata)
+ partNumber, "", "", partSize, opts.UserMetadata)
if err != nil {
return totalUploadedSize, err
}
@@ -328,7 +317,7 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
- _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
+ _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload)
if err != nil {
return totalUploadedSize, err
}
@@ -339,7 +328,7 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string
// putObjectNoChecksum special function used Google Cloud Storage. This special function
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
-func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectNoChecksum(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
@@ -350,22 +339,27 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea
// Size -1 is only supported on Google Cloud Storage, we error
// out in all other situations.
- if size < 0 && !s3utils.IsGoogleEndpoint(c.endpointURL) {
+ if size < 0 && !s3utils.IsGoogleEndpoint(*c.endpointURL) {
return 0, ErrEntityTooSmall(size, bucketName, objectName)
}
if size > 0 {
if isReadAt(reader) && !isObject(reader) {
- reader = io.NewSectionReader(reader.(io.ReaderAt), 0, size)
+ seeker, _ := reader.(io.Seeker)
+ offset, err := seeker.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return 0, ErrInvalidArgument(err.Error())
+ }
+ reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size)
}
}
// Update progress reader appropriately to the latest offset as we
// read from the source.
- readSeeker := newHook(reader, progress)
+ readSeeker := newHook(reader, opts.Progress)
// This function does not calculate sha256 and md5sum for payload.
// Execute put object.
- st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, metaData)
+ st, err := c.putObjectDo(ctx, bucketName, objectName, readSeeker, "", "", size, opts)
if err != nil {
return 0, err
}
@@ -377,7 +371,7 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea
// putObjectDo - executes the put object http operation.
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
-func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) {
+func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (ObjectInfo, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
@@ -385,35 +379,22 @@ func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return ObjectInfo{}, err
}
-
// Set headers.
- customHeader := make(http.Header)
-
- // Set metadata to headers
- for k, v := range metaData {
- if len(v) > 0 {
- customHeader.Set(k, v[0])
- }
- }
-
- // If Content-Type is not provided, set the default application/octet-stream one
- if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
- customHeader.Set("Content-Type", "application/octet-stream")
- }
+ customHeader := opts.Header()
// Populate request metadata.
reqMetadata := requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- customHeader: customHeader,
- contentBody: reader,
- contentLength: size,
- contentMD5Bytes: md5Sum,
- contentSHA256Bytes: sha256Sum,
+ bucketName: bucketName,
+ objectName: objectName,
+ customHeader: customHeader,
+ contentBody: reader,
+ contentLength: size,
+ contentMD5Base64: md5Base64,
+ contentSHA256Hex: sha256Hex,
}
// Execute PUT an objectName.
- resp, err := c.executeMethod("PUT", reqMetadata)
+ resp, err := c.executeMethod(ctx, "PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return ObjectInfo{}, err
diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go
index 94db82593..6d90eab74 100644
--- a/vendor/github.com/minio/minio-go/api-put-object.go
+++ b/vendor/github.com/minio/minio-go/api-put-object.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,119 +19,91 @@ package minio
import (
"bytes"
+ "context"
"fmt"
"io"
- "os"
- "reflect"
- "runtime"
+ "net/http"
"runtime/debug"
"sort"
- "strings"
+ "github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/pkg/s3utils"
+ "golang.org/x/net/lex/httplex"
)
-// toInt - converts go value to its integer representation based
-// on the value kind if it is an integer.
-func toInt(value reflect.Value) (size int64) {
- size = -1
- if value.IsValid() {
- switch value.Kind() {
- case reflect.Int:
- fallthrough
- case reflect.Int8:
- fallthrough
- case reflect.Int16:
- fallthrough
- case reflect.Int32:
- fallthrough
- case reflect.Int64:
- size = value.Int()
- }
+// PutObjectOptions represents options specified by user for PutObject call
+type PutObjectOptions struct {
+ UserMetadata map[string]string
+ Progress io.Reader
+ ContentType string
+ ContentEncoding string
+ ContentDisposition string
+ CacheControl string
+ EncryptMaterials encrypt.Materials
+ NumThreads uint
+ StorageClass string
+}
+
+// getNumThreads - gets the number of threads to be used in the multipart
+// put object operation
+func (opts PutObjectOptions) getNumThreads() (numThreads int) {
+ if opts.NumThreads > 0 {
+ numThreads = int(opts.NumThreads)
+ } else {
+ numThreads = totalWorkers
}
- return size
+ return
}
-// getReaderSize - Determine the size of Reader if available.
-func getReaderSize(reader io.Reader) (size int64, err error) {
- size = -1
- if reader == nil {
- return -1, nil
+// Header - constructs the headers from metadata entered by user in
+// PutObjectOptions struct
+func (opts PutObjectOptions) Header() (header http.Header) {
+ header = make(http.Header)
+
+ if opts.ContentType != "" {
+ header["Content-Type"] = []string{opts.ContentType}
+ } else {
+ header["Content-Type"] = []string{"application/octet-stream"}
+ }
+ if opts.ContentEncoding != "" {
+ header["Content-Encoding"] = []string{opts.ContentEncoding}
+ }
+ if opts.ContentDisposition != "" {
+ header["Content-Disposition"] = []string{opts.ContentDisposition}
}
- // Verify if there is a method by name 'Size'.
- sizeFn := reflect.ValueOf(reader).MethodByName("Size")
- // Verify if there is a method by name 'Len'.
- lenFn := reflect.ValueOf(reader).MethodByName("Len")
- if sizeFn.IsValid() {
- if sizeFn.Kind() == reflect.Func {
- // Call the 'Size' function and save its return value.
- result := sizeFn.Call([]reflect.Value{})
- if len(result) == 1 {
- size = toInt(result[0])
- }
+ if opts.CacheControl != "" {
+ header["Cache-Control"] = []string{opts.CacheControl}
+ }
+ if opts.EncryptMaterials != nil {
+ header[amzHeaderIV] = []string{opts.EncryptMaterials.GetIV()}
+ header[amzHeaderKey] = []string{opts.EncryptMaterials.GetKey()}
+ header[amzHeaderMatDesc] = []string{opts.EncryptMaterials.GetDesc()}
+ }
+ if opts.StorageClass != "" {
+ header[amzStorageClass] = []string{opts.StorageClass}
+ }
+ for k, v := range opts.UserMetadata {
+ if !isAmzHeader(k) && !isStandardHeader(k) && !isSSEHeader(k) && !isStorageClassHeader(k) {
+ header["X-Amz-Meta-"+k] = []string{v}
+ } else {
+ header[k] = []string{v}
}
- } else if lenFn.IsValid() {
- if lenFn.Kind() == reflect.Func {
- // Call the 'Len' function and save its return value.
- result := lenFn.Call([]reflect.Value{})
- if len(result) == 1 {
- size = toInt(result[0])
- }
+ }
+ return
+}
+
+// validate() checks if the UserMetadata map has standard headers or client side
+// encryption headers and raises an error if so.
+func (opts PutObjectOptions) validate() (err error) {
+ for k, v := range opts.UserMetadata {
+ if !httplex.ValidHeaderFieldName(k) || isStandardHeader(k) || isCSEHeader(k) || isStorageClassHeader(k) {
+ return ErrInvalidArgument(k + " unsupported user defined metadata name")
}
- } else {
- // Fallback to Stat() method, two possible Stat() structs exist.
- switch v := reader.(type) {
- case *os.File:
- var st os.FileInfo
- st, err = v.Stat()
- if err != nil {
- // Handle this case specially for "windows",
- // certain files for example 'Stdin', 'Stdout' and
- // 'Stderr' it is not allowed to fetch file information.
- if runtime.GOOS == "windows" {
- if strings.Contains(err.Error(), "GetFileInformationByHandle") {
- return -1, nil
- }
- }
- return
- }
- // Ignore if input is a directory, throw an error.
- if st.Mode().IsDir() {
- return -1, ErrInvalidArgument("Input file cannot be a directory.")
- }
- // Ignore 'Stdin', 'Stdout' and 'Stderr', since they
- // represent *os.File type but internally do not
- // implement Seekable calls. Ignore them and treat
- // them like a stream with unknown length.
- switch st.Name() {
- case "stdin", "stdout", "stderr":
- return
- // Ignore read/write stream of os.Pipe() which have unknown length too.
- case "|0", "|1":
- return
- }
- var pos int64
- pos, err = v.Seek(0, 1) // SeekCurrent.
- if err != nil {
- return -1, err
- }
- size = st.Size() - pos
- case *Object:
- var st ObjectInfo
- st, err = v.Stat()
- if err != nil {
- return
- }
- var pos int64
- pos, err = v.Seek(0, 1) // SeekCurrent.
- if err != nil {
- return -1, err
- }
- size = st.Size - pos
+ if !httplex.ValidHeaderFieldValue(v) {
+ return ErrInvalidArgument(v + " unsupported user defined metadata value")
}
}
- // Returns the size here.
- return size, err
+ return nil
}
// completedParts is a collection of parts sortable by their part numbers.
@@ -152,72 +125,41 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
// - For size input as -1 PutObject does a multipart Put operation
// until input stream reaches EOF. Maximum object size that can
// be uploaded through this operation will be 5TiB.
-func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) {
- return c.PutObjectWithMetadata(bucketName, objectName, reader, map[string][]string{
- "Content-Type": []string{contentType},
- }, nil)
+func (c Client) PutObject(bucketName, objectName string, reader io.Reader, objectSize int64,
+ opts PutObjectOptions) (n int64, err error) {
+ return c.PutObjectWithContext(context.Background(), bucketName, objectName, reader, objectSize, opts)
}
-// PutObjectWithSize - is a helper PutObject similar in behavior to PutObject()
-// but takes the size argument explicitly, this function avoids doing reflection
-// internally to figure out the size of input stream. Also if the input size is
-// lesser than 0 this function returns an error.
-func (c Client) PutObjectWithSize(bucketName, objectName string, reader io.Reader, readerSize int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
- return c.putObjectCommon(bucketName, objectName, reader, readerSize, metadata, progress)
-}
-
-// PutObjectWithMetadata using AWS streaming signature V4
-func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
- return c.PutObjectWithProgress(bucketName, objectName, reader, metadata, progress)
-}
-
-// PutObjectWithProgress using AWS streaming signature V4
-func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
- // Size of the object.
- var size int64
-
- // Get reader size.
- size, err = getReaderSize(reader)
- if err != nil {
- return 0, err
- }
-
- return c.putObjectCommon(bucketName, objectName, reader, size, metadata, progress)
-}
-
-func (c Client) putObjectCommon(bucketName, objectName string, reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) {
// Check for largest object size allowed.
if size > int64(maxMultipartPutObjectSize) {
return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
}
// NOTE: Streaming signature is not supported by GCS.
- if s3utils.IsGoogleEndpoint(c.endpointURL) {
+ if s3utils.IsGoogleEndpoint(*c.endpointURL) {
// Do not compute MD5 for Google Cloud Storage.
- return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
+ return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
}
if c.overrideSignerType.IsV2() {
if size >= 0 && size < minPartSize {
- return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
+ return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
}
- return c.putObjectMultipart(bucketName, objectName, reader, size, metadata, progress)
+ return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts)
}
-
if size < 0 {
- return c.putObjectMultipartStreamNoLength(bucketName, objectName, reader, metadata, progress)
+ return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts)
}
if size < minPartSize {
- return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
+ return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
}
-
// For all sizes greater than 64MiB do multipart.
- return c.putObjectMultipartStream(bucketName, objectName, reader, size, metadata, progress)
+ return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts)
}
-func (c Client) putObjectMultipartStreamNoLength(bucketName, objectName string, reader io.Reader, metadata map[string][]string,
- progress io.Reader) (n int64, err error) {
+func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
@@ -238,16 +180,15 @@ func (c Client) putObjectMultipartStreamNoLength(bucketName, objectName string,
if err != nil {
return 0, err
}
-
// Initiate a new multipart upload.
- uploadID, err := c.newUploadID(bucketName, objectName, metadata)
+ uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
if err != nil {
return 0, err
}
defer func() {
if err != nil {
- c.abortMultipartUpload(bucketName, objectName, uploadID)
+ c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
}
}()
@@ -263,21 +204,20 @@ func (c Client) putObjectMultipartStreamNoLength(bucketName, objectName string,
for partNumber <= totalPartsCount {
length, rErr := io.ReadFull(reader, buf)
- if rErr == io.EOF {
+ if rErr == io.EOF && partNumber > 1 {
break
}
if rErr != nil && rErr != io.ErrUnexpectedEOF {
return 0, rErr
}
-
// Update progress reader appropriately to the latest offset
// as we read from the source.
- rd := newHook(bytes.NewReader(buf[:length]), progress)
+ rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
// Proceed to upload the part.
var objPart ObjectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID, rd, partNumber,
- nil, nil, int64(length), metadata)
+ objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber,
+ "", "", int64(length), opts.UserMetadata)
if err != nil {
return totalUploadedSize, err
}
@@ -313,7 +253,7 @@ func (c Client) putObjectMultipartStreamNoLength(bucketName, objectName string,
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
- if _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload); err != nil {
+ if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil {
return totalUploadedSize, err
}
diff --git a/vendor/github.com/minio/minio-go/api-put-object_test.go b/vendor/github.com/minio/minio-go/api-put-object_test.go
new file mode 100644
index 000000000..2b8c1e2bb
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object_test.go
@@ -0,0 +1,62 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package minio
+
+import (
+ "testing"
+)
+
+func TestPutObjectOptionsValidate(t *testing.T) {
+ testCases := []struct {
+ name, value string
+ shouldPass bool
+ }{
+ // Invalid cases.
+ {"X-Amz-Matdesc", "blah", false},
+ {"x-amz-meta-X-Amz-Iv", "blah", false},
+ {"x-amz-meta-X-Amz-Key", "blah", false},
+ {"x-amz-meta-X-Amz-Matdesc", "blah", false},
+ {"It has spaces", "v", false},
+ {"It,has@illegal=characters", "v", false},
+ {"X-Amz-Iv", "blah", false},
+ {"X-Amz-Key", "blah", false},
+ {"X-Amz-Key-prefixed-header", "blah", false},
+ {"Content-Type", "custom/content-type", false},
+ {"content-type", "custom/content-type", false},
+ {"Content-Encoding", "gzip", false},
+ {"Cache-Control", "blah", false},
+ {"Content-Disposition", "something", false},
+
+ // Valid metadata names.
+ {"my-custom-header", "blah", true},
+ {"custom-X-Amz-Key-middle", "blah", true},
+ {"my-custom-header-X-Amz-Key", "blah", true},
+ {"blah-X-Amz-Matdesc", "blah", true},
+ {"X-Amz-MatDesc-suffix", "blah", true},
+ {"It-Is-Fine", "v", true},
+ {"Numbers-098987987-Should-Work", "v", true},
+ {"Crazy-!#$%&'*+-.^_`|~-Should-193832-Be-Fine", "v", true},
+ }
+ for i, testCase := range testCases {
+ err := PutObjectOptions{UserMetadata: map[string]string{
+ testCase.name: testCase.value,
+ }}.validate()
+ if testCase.shouldPass && err != nil {
+ t.Errorf("Test %d - output did not match with reference results, %s", i+1, err)
+ }
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/api-remove.go b/vendor/github.com/minio/minio-go/api-remove.go
index 3574cbc1a..f14b2eb7f 100644
--- a/vendor/github.com/minio/minio-go/api-remove.go
+++ b/vendor/github.com/minio/minio-go/api-remove.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,6 +19,7 @@ package minio
import (
"bytes"
+ "context"
"encoding/xml"
"io"
"net/http"
@@ -36,9 +38,9 @@ func (c Client) RemoveBucket(bucketName string) error {
return err
}
// Execute DELETE on bucket.
- resp, err := c.executeMethod("DELETE", requestMetadata{
- bucketName: bucketName,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{
+ bucketName: bucketName,
+ contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
if err != nil {
@@ -66,10 +68,10 @@ func (c Client) RemoveObject(bucketName, objectName string) error {
return err
}
// Execute DELETE on objectName.
- resp, err := c.executeMethod("DELETE", requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
if err != nil {
@@ -187,13 +189,13 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan
// Generate remove multi objects XML request
removeBytes := generateRemoveMultiObjectsRequest(batch)
// Execute GET on bucket to list objects.
- resp, err := c.executeMethod("POST", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentBody: bytes.NewReader(removeBytes),
- contentLength: int64(len(removeBytes)),
- contentMD5Bytes: sumMD5(removeBytes),
- contentSHA256Bytes: sum256(removeBytes),
+ resp, err := c.executeMethod(context.Background(), "POST", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: bytes.NewReader(removeBytes),
+ contentLength: int64(len(removeBytes)),
+ contentMD5Base64: sumMD5Base64(removeBytes),
+ contentSHA256Hex: sum256Hex(removeBytes),
})
if err != nil {
for _, b := range batch {
@@ -227,7 +229,7 @@ func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
}
if uploadID != "" {
// Upload id found, abort the incomplete multipart upload.
- err := c.abortMultipartUpload(bucketName, objectName, uploadID)
+ err := c.abortMultipartUpload(context.Background(), bucketName, objectName, uploadID)
if err != nil {
return err
}
@@ -237,7 +239,7 @@ func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
// abortMultipartUpload aborts a multipart upload for the given
// uploadID, all previously uploaded parts are deleted.
-func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error {
+func (c Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
@@ -251,11 +253,11 @@ func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) er
urlValues.Set("uploadId", uploadID)
// Execute DELETE on multipart upload.
- resp, err := c.executeMethod("DELETE", requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- queryValues: urlValues,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(ctx, "DELETE", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
if err != nil {
diff --git a/vendor/github.com/minio/minio-go/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/api-s3-datatypes.go
index 4b297407b..8d8880c05 100644
--- a/vendor/github.com/minio/minio-go/api-s3-datatypes.go
+++ b/vendor/github.com/minio/minio-go/api-s3-datatypes.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -128,7 +129,7 @@ type initiator struct {
// copyObjectResult container for copy object response.
type copyObjectResult struct {
ETag string
- LastModified string // time string format "2006-01-02T15:04:05.000Z"
+ LastModified time.Time // time string format "2006-01-02T15:04:05.000Z"
}
// ObjectPart container for particular part of an object.
diff --git a/vendor/github.com/minio/minio-go/api-stat.go b/vendor/github.com/minio/minio-go/api-stat.go
index 5f06bfc9e..8904dd678 100644
--- a/vendor/github.com/minio/minio-go/api-stat.go
+++ b/vendor/github.com/minio/minio-go/api-stat.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,6 +18,7 @@
package minio
import (
+ "context"
"net/http"
"strconv"
"strings"
@@ -33,9 +35,9 @@ func (c Client) BucketExists(bucketName string) (bool, error) {
}
// Execute HEAD on bucketName.
- resp, err := c.executeMethod("HEAD", requestMetadata{
- bucketName: bucketName,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(context.Background(), "HEAD", requestMetadata{
+ bucketName: bucketName,
+ contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
if err != nil {
@@ -80,7 +82,7 @@ func extractObjMetadata(header http.Header) http.Header {
}
// StatObject verifies if object exists and you have permission to access.
-func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
+func (c Client) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
@@ -88,12 +90,11 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return ObjectInfo{}, err
}
- reqHeaders := NewHeadReqHeaders()
- return c.statObject(bucketName, objectName, reqHeaders)
+ return c.statObject(context.Background(), bucketName, objectName, opts)
}
// Lower level API for statObject supporting pre-conditions and range headers.
-func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHeaders) (ObjectInfo, error) {
+func (c Client) statObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
@@ -102,17 +103,12 @@ func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHead
return ObjectInfo{}, err
}
- customHeader := make(http.Header)
- for k, v := range reqHeaders.Header {
- customHeader[k] = v
- }
-
// Execute HEAD on objectName.
- resp, err := c.executeMethod("HEAD", requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- contentSHA256Bytes: emptySHA256,
- customHeader: customHeader,
+ resp, err := c.executeMethod(ctx, "HEAD", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ contentSHA256Hex: emptySHA256Hex,
+ customHeader: opts.Header(),
})
defer closeResponse(resp)
if err != nil {
diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go
index 946a58869..681853849 100644
--- a/vendor/github.com/minio/minio-go/api.go
+++ b/vendor/github.com/minio/minio-go/api.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2015, 2016, 2017 Minio, Inc.
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,10 +19,9 @@ package minio
import (
"bytes"
+ "context"
"crypto/md5"
"crypto/sha256"
- "encoding/base64"
- "encoding/hex"
"errors"
"fmt"
"hash"
@@ -49,7 +48,7 @@ type Client struct {
/// Standard options.
// Parsed endpoint url provided by the user.
- endpointURL url.URL
+ endpointURL *url.URL
// Holds various credential providers.
credsProvider *credentials.Credentials
@@ -87,7 +86,7 @@ type Client struct {
// Global constants.
const (
libraryName = "minio-go"
- libraryVersion = "3.0.3"
+ libraryVersion = "4.0.6"
)
// User Agent should always following the below style.
@@ -131,11 +130,11 @@ func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, e
return nil, err
}
// Google cloud storage should be set to signature V2, force it if not.
- if s3utils.IsGoogleEndpoint(clnt.endpointURL) {
+ if s3utils.IsGoogleEndpoint(*clnt.endpointURL) {
clnt.overrideSignerType = credentials.SignatureV2
}
// If Amazon S3 set to signature v4.
- if s3utils.IsAmazonEndpoint(clnt.endpointURL) {
+ if s3utils.IsAmazonEndpoint(*clnt.endpointURL) {
clnt.overrideSignerType = credentials.SignatureV4
}
return clnt, nil
@@ -178,41 +177,66 @@ func (r *lockedRandSource) Seed(seed int64) {
r.lk.Unlock()
}
-// redirectHeaders copies all headers when following a redirect URL.
-// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800)
-func redirectHeaders(req *http.Request, via []*http.Request) error {
+// Redirect requests by re signing the request.
+func (c *Client) redirectHeaders(req *http.Request, via []*http.Request) error {
+ if len(via) >= 5 {
+ return errors.New("stopped after 5 redirects")
+ }
if len(via) == 0 {
return nil
}
- for key, val := range via[0].Header {
- req.Header[key] = val
+ lastRequest := via[len(via)-1]
+ var reAuth bool
+ for attr, val := range lastRequest.Header {
+ // if hosts do not match do not copy Authorization header
+ if attr == "Authorization" && req.Host != lastRequest.Host {
+ reAuth = true
+ continue
+ }
+ if _, ok := req.Header[attr]; !ok {
+ req.Header[attr] = val
+ }
+ }
+
+ *c.endpointURL = *req.URL
+
+ value, err := c.credsProvider.Get()
+ if err != nil {
+ return err
+ }
+ var (
+ signerType = value.SignerType
+ accessKeyID = value.AccessKeyID
+ secretAccessKey = value.SecretAccessKey
+ sessionToken = value.SessionToken
+ region = c.region
+ )
+
+ // Custom signer set then override the behavior.
+ if c.overrideSignerType != credentials.SignatureDefault {
+ signerType = c.overrideSignerType
+ }
+
+ // If signerType returned by credentials helper is anonymous,
+ // then do not sign regardless of signerType override.
+ if value.SignerType == credentials.SignatureAnonymous {
+ signerType = credentials.SignatureAnonymous
}
- return nil
-}
-// getRegionFromURL - parse region from URL if present.
-func getRegionFromURL(u url.URL) (region string) {
- region = ""
- if s3utils.IsGoogleEndpoint(u) {
- return
- } else if s3utils.IsAmazonChinaEndpoint(u) {
- // For china specifically we need to set everything to
- // cn-north-1 for now, there is no easier way until AWS S3
- // provides a cleaner compatible API across "us-east-1" and
- // China region.
- return "cn-north-1"
- } else if s3utils.IsAmazonGovCloudEndpoint(u) {
- // For us-gov specifically we need to set everything to
- // us-gov-west-1 for now, there is no easier way until AWS S3
- // provides a cleaner compatible API across "us-east-1" and
- // Gov cloud region.
- return "us-gov-west-1"
- }
- parts := s3utils.AmazonS3Host.FindStringSubmatch(u.Host)
- if len(parts) > 1 {
- region = parts[1]
- }
- return region
+ if reAuth {
+ // Check if there is no region override, if not get it from the URL if possible.
+ if region == "" {
+ region = s3utils.GetRegionFromURL(*c.endpointURL)
+ }
+ switch {
+ case signerType.IsV2():
+ // Add signature version '2' authorization header.
+ req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
+ case signerType.IsV4():
+ req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, getDefaultLocation(*c.endpointURL, region))
+ }
+ }
+ return nil
}
func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) {
@@ -232,17 +256,17 @@ func privateNew(endpoint string, creds *credentials.Credentials, secure bool, re
clnt.secure = secure
// Save endpoint URL, user agent for future uses.
- clnt.endpointURL = *endpointURL
+ clnt.endpointURL = endpointURL
// Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{
Transport: defaultMinioTransport,
- CheckRedirect: redirectHeaders,
+ CheckRedirect: clnt.redirectHeaders,
}
// Sets custom region, if region is empty bucket location cache is used automatically.
if region == "" {
- region = getRegionFromURL(clnt.endpointURL)
+ region = s3utils.GetRegionFromURL(*clnt.endpointURL)
}
clnt.region = region
@@ -315,7 +339,7 @@ func (c *Client) TraceOff() {
// please vist -
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) {
- if s3utils.IsAmazonEndpoint(c.endpointURL) {
+ if s3utils.IsAmazonEndpoint(*c.endpointURL) {
c.s3AccelerateEndpoint = accelerateEndpoint
}
}
@@ -356,11 +380,11 @@ type requestMetadata struct {
expires int64
// Generated by our internal code.
- bucketLocation string
- contentBody io.Reader
- contentLength int64
- contentSHA256Bytes []byte
- contentMD5Bytes []byte
+ bucketLocation string
+ contentBody io.Reader
+ contentLength int64
+ contentMD5Base64 string // carries base64 encoded md5sum
+ contentSHA256Hex string // carries hex encoded sha256sum
}
// dumpHTTP - dump HTTP request and response.
@@ -419,6 +443,7 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
}
}
}
+
// Write response to trace output.
_, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n"))
if err != nil {
@@ -437,38 +462,22 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
// do - execute http request.
func (c Client) do(req *http.Request) (*http.Response, error) {
- var resp *http.Response
- var err error
- // Do the request in a loop in case of 307 http is met since golang still doesn't
- // handle properly this situation (https://github.com/golang/go/issues/7912)
- for {
- resp, err = c.httpClient.Do(req)
- if err != nil {
- // Handle this specifically for now until future Golang
- // versions fix this issue properly.
- urlErr, ok := err.(*url.Error)
- if ok && strings.Contains(urlErr.Err.Error(), "EOF") {
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ // Handle this specifically for now until future Golang versions fix this issue properly.
+ if urlErr, ok := err.(*url.Error); ok {
+ if strings.Contains(urlErr.Err.Error(), "EOF") {
return nil, &url.Error{
Op: urlErr.Op,
URL: urlErr.URL,
Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."),
}
}
- return nil, err
- }
- // Redo the request with the new redirect url if http 307 is returned, quit the loop otherwise
- if resp != nil && resp.StatusCode == http.StatusTemporaryRedirect {
- newURL, err := url.Parse(resp.Header.Get("Location"))
- if err != nil {
- break
- }
- req.URL = newURL
- } else {
- break
}
+ return nil, err
}
- // Response cannot be non-nil, report if its the case.
+ // Response cannot be non-nil, report error if thats the case.
if resp == nil {
msg := "Response is empty. " + reportIssue
return nil, ErrInvalidArgument(msg)
@@ -481,6 +490,7 @@ func (c Client) do(req *http.Request) (*http.Response, error) {
return nil, err
}
}
+
return resp, nil
}
@@ -494,9 +504,11 @@ var successStatus = []int{
// executeMethod - instantiates a given method, and retries the
// request upon any error up to maxRetries attempts in a binomially
// delayed manner using a standard back off algorithm.
-func (c Client) executeMethod(method string, metadata requestMetadata) (res *http.Response, err error) {
+func (c Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) {
var isRetryable bool // Indicates if request can be retried.
var bodySeeker io.Seeker // Extracted seeker from io.Reader.
+ var reqRetry = MaxRetry // Indicates how many times we can retry the request
+
if metadata.contentBody != nil {
// Check if body is seekable then it is retryable.
bodySeeker, isRetryable = metadata.contentBody.(io.Seeker)
@@ -504,6 +516,11 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
case os.Stdin, os.Stdout, os.Stderr:
isRetryable = false
}
+ // Retry only when reader is seekable
+ if !isRetryable {
+ reqRetry = 1
+ }
+
// Figure out if the body can be closed - if yes
// we will definitely close it upon the function
// return.
@@ -522,7 +539,7 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
// Blank indentifier is kept here on purpose since 'range' without
// blank identifiers is only supported since go1.4
// https://golang.org/doc/go1.4#forrange.
- for range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) {
+ for range c.newRetryTimer(reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) {
// Retry executes the following function body if request has an
// error until maxRetries have been exhausted, retry attempts are
// performed after waiting for a given period of time in a
@@ -546,6 +563,9 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
return nil, err
}
+ // Add context to request
+ req = req.WithContext(ctx)
+
// Initiate the request.
res, err = c.do(req)
if err != nil {
@@ -639,7 +659,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
// happen when GetBucketLocation() is disabled using IAM policies.
}
if location == "" {
- location = getDefaultLocation(c.endpointURL, c.region)
+ location = getDefaultLocation(*c.endpointURL, c.region)
}
}
@@ -720,8 +740,8 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
}
// set md5Sum for content protection.
- if metadata.contentMD5Bytes != nil {
- req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes))
+ if len(metadata.contentMD5Base64) > 0 {
+ req.Header.Set("Content-Md5", metadata.contentMD5Base64)
}
// For anonymous requests just return.
@@ -742,8 +762,8 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
default:
// Set sha256 sum for signature calculation only with signature version '4'.
shaHeader := unsignedPayload
- if len(metadata.contentSHA256Bytes) > 0 {
- shaHeader = hex.EncodeToString(metadata.contentSHA256Bytes)
+ if metadata.contentSHA256Hex != "" {
+ shaHeader = metadata.contentSHA256Hex
}
req.Header.Set("X-Amz-Content-Sha256", shaHeader)
@@ -767,7 +787,7 @@ func (c Client) setUserAgent(req *http.Request) {
func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, queryValues url.Values) (*url.URL, error) {
host := c.endpointURL.Host
// For Amazon S3 endpoint, try to fetch location based endpoint.
- if s3utils.IsAmazonEndpoint(c.endpointURL) {
+ if s3utils.IsAmazonEndpoint(*c.endpointURL) {
if c.s3AccelerateEndpoint != "" && bucketName != "" {
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
// Disable transfer acceleration for non-compliant bucket names.
@@ -780,7 +800,7 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que
host = c.s3AccelerateEndpoint
} else {
// Do not change the host if the endpoint URL is a FIPS S3 endpoint.
- if !s3utils.IsAmazonFIPSGovCloudEndpoint(c.endpointURL) {
+ if !s3utils.IsAmazonFIPSGovCloudEndpoint(*c.endpointURL) {
// Fetch new host based on the bucket location.
host = getS3Endpoint(bucketLocation)
}
@@ -804,7 +824,7 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que
// endpoint URL.
if bucketName != "" {
// Save if target url will have buckets which suppport virtual host.
- isVirtualHostStyle := s3utils.IsVirtualHostSupported(c.endpointURL, bucketName)
+ isVirtualHostStyle := s3utils.IsVirtualHostSupported(*c.endpointURL, bucketName)
// If endpoint supports virtual host style use that always.
// Currently only S3 and Google Cloud Storage would support
@@ -828,10 +848,5 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que
urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues)
}
- u, err := url.Parse(urlStr)
- if err != nil {
- return nil, err
- }
-
- return u, nil
+ return url.Parse(urlStr)
}
diff --git a/vendor/github.com/minio/minio-go/api_unit_test.go b/vendor/github.com/minio/minio-go/api_unit_test.go
index f15a6eed3..ee0b54f5c 100644
--- a/vendor/github.com/minio/minio-go/api_unit_test.go
+++ b/vendor/github.com/minio/minio-go/api_unit_test.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2015, 2016, 2017 Minio, Inc.
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,13 +18,8 @@
package minio
import (
- "bytes"
- "io"
- "io/ioutil"
"net/http"
"net/url"
- "os"
- "strings"
"testing"
"github.com/minio/minio-go/pkg/credentials"
@@ -41,173 +36,6 @@ func (c *customReader) Size() (n int64) {
return 10
}
-// Tests getReaderSize() for various Reader types.
-func TestGetReaderSize(t *testing.T) {
- var reader io.Reader
- size, err := getReaderSize(reader)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != -1 {
- t.Fatal("Reader shouldn't have any length.")
- }
-
- bytesReader := bytes.NewReader([]byte("Hello World"))
- size, err = getReaderSize(bytesReader)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != int64(len("Hello World")) {
- t.Fatalf("Reader length doesn't match got: %v, want: %v", size, len("Hello World"))
- }
-
- size, err = getReaderSize(new(customReader))
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != int64(10) {
- t.Fatalf("Reader length doesn't match got: %v, want: %v", size, 10)
- }
-
- stringsReader := strings.NewReader("Hello World")
- size, err = getReaderSize(stringsReader)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != int64(len("Hello World")) {
- t.Fatalf("Reader length doesn't match got: %v, want: %v", size, len("Hello World"))
- }
-
- // Create request channel.
- reqCh := make(chan getRequest, 1)
- // Create response channel.
- resCh := make(chan getResponse, 1)
- // Create done channel.
- doneCh := make(chan struct{})
-
- objectInfo := ObjectInfo{Size: 10}
- // Create the first request.
- firstReq := getRequest{
- isReadOp: false, // Perform only a HEAD object to get objectInfo.
- isFirstReq: true,
- }
- // Create the expected response.
- firstRes := getResponse{
- objectInfo: objectInfo,
- }
- // Send the expected response.
- resCh <- firstRes
-
- // Test setting size on the first request.
- objectReaderFirstReq := newObject(reqCh, resCh, doneCh)
- defer objectReaderFirstReq.Close()
- // Not checking the response here...just that the reader size is correct.
- _, err = objectReaderFirstReq.doGetRequest(firstReq)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Validate that the reader size is the objectInfo size.
- size, err = getReaderSize(objectReaderFirstReq)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != int64(10) {
- t.Fatalf("Reader length doesn't match got: %d, wanted %d", size, objectInfo.Size)
- }
-
- fileReader, err := ioutil.TempFile(os.TempDir(), "prefix")
- if err != nil {
- t.Fatal("Error:", err)
- }
- defer fileReader.Close()
- defer os.RemoveAll(fileReader.Name())
-
- size, err = getReaderSize(fileReader)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size == -1 {
- t.Fatal("Reader length for file cannot be -1.")
- }
-
- // Verify for standard input, output and error file descriptors.
- size, err = getReaderSize(os.Stdin)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != -1 {
- t.Fatal("Stdin should have length of -1.")
- }
- size, err = getReaderSize(os.Stdout)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != -1 {
- t.Fatal("Stdout should have length of -1.")
- }
- size, err = getReaderSize(os.Stderr)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != -1 {
- t.Fatal("Stderr should have length of -1.")
- }
- file, err := os.Open(os.TempDir())
- if err != nil {
- t.Fatal("Error:", err)
- }
- defer file.Close()
- _, err = getReaderSize(file)
- if err == nil {
- t.Fatal("Input file as directory should throw an error.")
- }
-}
-
-// Tests get region from host URL.
-func TestGetRegionFromURL(t *testing.T) {
- testCases := []struct {
- u url.URL
- expectedRegion string
- }{
- {
- u: url.URL{Host: "storage.googleapis.com"},
- expectedRegion: "",
- },
- {
- u: url.URL{Host: "s3.cn-north-1.amazonaws.com.cn"},
- expectedRegion: "cn-north-1",
- },
- {
- u: url.URL{Host: "s3-fips-us-gov-west-1.amazonaws.com"},
- expectedRegion: "us-gov-west-1",
- },
- {
- u: url.URL{Host: "s3-us-gov-west-1.amazonaws.com"},
- expectedRegion: "us-gov-west-1",
- },
- {
- u: url.URL{Host: "192.168.1.1"},
- expectedRegion: "",
- },
- {
- u: url.URL{Host: "s3-eu-west-1.amazonaws.com"},
- expectedRegion: "eu-west-1",
- },
- {
- u: url.URL{Host: "s3.amazonaws.com"},
- expectedRegion: "",
- },
- }
-
- for i, testCase := range testCases {
- region := getRegionFromURL(testCase.u)
- if testCase.expectedRegion != region {
- t.Errorf("Test %d: Expected region %s, got %s", i+1, testCase.expectedRegion, region)
- }
- }
-}
-
// Tests valid hosts for location.
func TestValidBucketLocation(t *testing.T) {
s3Hosts := []struct {
@@ -352,7 +180,7 @@ func TestMakeTargetURL(t *testing.T) {
// Test 6
{"localhost:9000", false, "mybucket", "myobject", "", nil, url.URL{Host: "localhost:9000", Scheme: "http", Path: "/mybucket/myobject"}, nil},
// Test 7, testing with query
- {"localhost:9000", false, "mybucket", "myobject", "", map[string][]string{"param": []string{"val"}}, url.URL{Host: "localhost:9000", Scheme: "http", Path: "/mybucket/myobject", RawQuery: "param=val"}, nil},
+ {"localhost:9000", false, "mybucket", "myobject", "", map[string][]string{"param": {"val"}}, url.URL{Host: "localhost:9000", Scheme: "http", Path: "/mybucket/myobject", RawQuery: "param=val"}, nil},
// Test 8, testing with port 80
{"localhost:80", false, "mybucket", "myobject", "", nil, url.URL{Host: "localhost", Scheme: "http", Path: "/mybucket/myobject"}, nil},
// Test 9, testing with port 443
diff --git a/vendor/github.com/minio/minio-go/appveyor.yml b/vendor/github.com/minio/minio-go/appveyor.yml
index 0f623d3d4..b93b4d45d 100644
--- a/vendor/github.com/minio/minio-go/appveyor.yml
+++ b/vendor/github.com/minio/minio-go/appveyor.yml
@@ -17,11 +17,9 @@ install:
- go version
- go env
- go get -u github.com/golang/lint/golint
- - go get -u github.com/go-ini/ini
- - go get -u github.com/minio/go-homedir
- go get -u github.com/remyoudompheng/go-misc/deadcode
- go get -u github.com/gordonklaus/ineffassign
- - go get -u github.com/dustin/go-humanize
+ - go get -t ./...
# to run your custom scripts instead of automatic MSBuild
build_script:
diff --git a/vendor/github.com/minio/minio-go/bucket-cache.go b/vendor/github.com/minio/minio-go/bucket-cache.go
index 3ad06da3a..5d56cdf42 100644
--- a/vendor/github.com/minio/minio-go/bucket-cache.go
+++ b/vendor/github.com/minio/minio-go/bucket-cache.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2015, 2016, 2017 Minio, Inc.
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,7 +18,6 @@
package minio
import (
- "encoding/hex"
"net/http"
"net/url"
"path"
@@ -209,11 +208,9 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
}
// Set sha256 sum for signature calculation only with signature version '4'.
- var contentSha256 string
+ contentSha256 := emptySHA256Hex
if c.secure {
contentSha256 = unsignedPayload
- } else {
- contentSha256 = hex.EncodeToString(sum256([]byte{}))
}
req.Header.Set("X-Amz-Content-Sha256", contentSha256)
diff --git a/vendor/github.com/minio/minio-go/bucket-cache_test.go b/vendor/github.com/minio/minio-go/bucket-cache_test.go
index 6ae4e7be4..fd7e7f344 100644
--- a/vendor/github.com/minio/minio-go/bucket-cache_test.go
+++ b/vendor/github.com/minio/minio-go/bucket-cache_test.go
@@ -1,6 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2015, 2016, 2017 Minio, Inc.
+ * Copyright
+ * 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,7 +19,6 @@ package minio
import (
"bytes"
- "encoding/hex"
"encoding/xml"
"io/ioutil"
"net/http"
@@ -116,11 +115,9 @@ func TestGetBucketLocationRequest(t *testing.T) {
// with signature version '4'.
switch {
case signerType.IsV4():
- var contentSha256 string
+ contentSha256 := emptySHA256Hex
if c.secure {
contentSha256 = unsignedPayload
- } else {
- contentSha256 = hex.EncodeToString(sum256([]byte{}))
}
req.Header.Set("X-Amz-Content-Sha256", contentSha256)
req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")
diff --git a/vendor/github.com/minio/minio-go/bucket-notification.go b/vendor/github.com/minio/minio-go/bucket-notification.go
index 5ac52e5f7..1b9d6a0c7 100644
--- a/vendor/github.com/minio/minio-go/bucket-notification.go
+++ b/vendor/github.com/minio/minio-go/bucket-notification.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/constants.go b/vendor/github.com/minio/minio-go/constants.go
index 9771d2f92..84b6cfdf3 100644
--- a/vendor/github.com/minio/minio-go/constants.go
+++ b/vendor/github.com/minio/minio-go/constants.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -50,7 +51,7 @@ const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
const unsignedPayload = "UNSIGNED-PAYLOAD"
// Total number of parallel workers used for multipart operation.
-var totalWorkers = 3
+const totalWorkers = 4
// Signature related constants.
const (
@@ -64,3 +65,6 @@ const (
amzHeaderKey = "X-Amz-Meta-X-Amz-Key"
amzHeaderMatDesc = "X-Amz-Meta-X-Amz-Matdesc"
)
+
+// Storage class header constant.
+const amzStorageClass = "X-Amz-Storage-Class"
diff --git a/vendor/github.com/minio/minio-go/core.go b/vendor/github.com/minio/minio-go/core.go
index 4b1054a69..4245fc065 100644
--- a/vendor/github.com/minio/minio-go/core.go
+++ b/vendor/github.com/minio/minio-go/core.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,7 +18,9 @@
package minio
import (
+ "context"
"io"
+ "strings"
"github.com/minio/minio-go/pkg/policy"
)
@@ -52,14 +55,44 @@ func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string,
return c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, maxkeys)
}
+// CopyObject - copies an object from source object to destination object on server side.
+func (c Core) CopyObject(sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string) (ObjectInfo, error) {
+ return c.copyObjectDo(context.Background(), sourceBucket, sourceObject, destBucket, destObject, metadata)
+}
+
+// CopyObjectPart - creates a part in a multipart upload by copying (a
+// part of) an existing object.
+func (c Core) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string,
+ partID int, startOffset, length int64, metadata map[string]string) (p CompletePart, err error) {
+
+ return c.copyObjectPartDo(context.Background(), srcBucket, srcObject, destBucket, destObject, uploadID,
+ partID, startOffset, length, metadata)
+}
+
// PutObject - Upload object. Uploads using single PUT call.
-func (c Core) PutObject(bucket, object string, size int64, data io.Reader, md5Sum, sha256Sum []byte, metadata map[string][]string) (ObjectInfo, error) {
- return c.putObjectDo(bucket, object, data, md5Sum, sha256Sum, size, metadata)
+func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string) (ObjectInfo, error) {
+ opts := PutObjectOptions{}
+ m := make(map[string]string)
+ for k, v := range metadata {
+ if strings.ToLower(k) == "content-encoding" {
+ opts.ContentEncoding = v
+ } else if strings.ToLower(k) == "content-disposition" {
+ opts.ContentDisposition = v
+ } else if strings.ToLower(k) == "content-type" {
+ opts.ContentType = v
+ } else if strings.ToLower(k) == "cache-control" {
+ opts.CacheControl = v
+ } else {
+ m[k] = metadata[k]
+ }
+ }
+ opts.UserMetadata = m
+ return c.putObjectDo(context.Background(), bucket, object, data, md5Base64, sha256Hex, size, opts)
}
-// NewMultipartUpload - Initiates new multipart upload and returns the new uploaID.
-func (c Core) NewMultipartUpload(bucket, object string, metadata map[string][]string) (uploadID string, err error) {
- result, err := c.initiateMultipartUpload(bucket, object, metadata)
+// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID.
+func (c Core) NewMultipartUpload(bucket, object string, opts PutObjectOptions) (uploadID string, err error) {
+ result, err := c.initiateMultipartUpload(context.Background(), bucket, object, opts)
return result.UploadID, err
}
@@ -69,14 +102,14 @@ func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, de
}
// PutObjectPart - Upload an object part.
-func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Sum, sha256Sum []byte) (ObjectPart, error) {
- return c.PutObjectPartWithMetadata(bucket, object, uploadID, partID, size, data, md5Sum, sha256Sum, nil)
+func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string) (ObjectPart, error) {
+ return c.PutObjectPartWithMetadata(bucket, object, uploadID, partID, data, size, md5Base64, sha256Hex, nil)
}
// PutObjectPartWithMetadata - upload an object part with additional request metadata.
-func (c Core) PutObjectPartWithMetadata(bucket, object, uploadID string, partID int,
- size int64, data io.Reader, md5Sum, sha256Sum []byte, metadata map[string][]string) (ObjectPart, error) {
- return c.uploadPart(bucket, object, uploadID, data, partID, md5Sum, sha256Sum, size, metadata)
+func (c Core) PutObjectPartWithMetadata(bucket, object, uploadID string, partID int, data io.Reader,
+ size int64, md5Base64, sha256Hex string, metadata map[string]string) (ObjectPart, error) {
+ return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, metadata)
}
// ListObjectParts - List uploaded parts of an incomplete upload.x
@@ -86,7 +119,7 @@ func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker
// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.
func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) error {
- _, err := c.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload{
+ _, err := c.completeMultipartUpload(context.Background(), bucket, object, uploadID, completeMultipartUpload{
Parts: parts,
})
return err
@@ -94,7 +127,7 @@ func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []C
// AbortMultipartUpload - Abort an incomplete upload.
func (c Core) AbortMultipartUpload(bucket, object, uploadID string) error {
- return c.abortMultipartUpload(bucket, object, uploadID)
+ return c.abortMultipartUpload(context.Background(), bucket, object, uploadID)
}
// GetBucketPolicy - fetches bucket access policy for a given bucket.
@@ -110,12 +143,12 @@ func (c Core) PutBucketPolicy(bucket string, bucketPolicy policy.BucketAccessPol
// GetObject is a lower level API implemented to support reading
// partial objects and also downloading objects with special conditions
// matching etag, modtime etc.
-func (c Core) GetObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) {
- return c.getObject(bucketName, objectName, reqHeaders)
+func (c Core) GetObject(bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) {
+ return c.getObject(context.Background(), bucketName, objectName, opts)
}
// StatObject is a lower level API implemented to support special
// conditions matching etag, modtime on a request.
-func (c Core) StatObject(bucketName, objectName string, reqHeaders RequestHeaders) (ObjectInfo, error) {
- return c.statObject(bucketName, objectName, reqHeaders)
+func (c Core) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
+ return c.statObject(context.Background(), bucketName, objectName, opts)
}
diff --git a/vendor/github.com/minio/minio-go/core_test.go b/vendor/github.com/minio/minio-go/core_test.go
index 8cadc251b..8cf810465 100644
--- a/vendor/github.com/minio/minio-go/core_test.go
+++ b/vendor/github.com/minio/minio-go/core_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -25,7 +26,6 @@ import (
"testing"
"time"
- "crypto/md5"
"math/rand"
)
@@ -103,7 +103,9 @@ func TestGetObjectCore(t *testing.T) {
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.Client.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ n, err := c.Client.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), PutObjectOptions{
+ ContentType: "binary/octet-stream",
+ })
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
@@ -112,8 +114,6 @@ func TestGetObjectCore(t *testing.T) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
}
- reqHeaders := NewGetReqHeaders()
-
offset := int64(2048)
// read directly
@@ -122,8 +122,9 @@ func TestGetObjectCore(t *testing.T) {
buf3 := make([]byte, n)
buf4 := make([]byte, 1)
- reqHeaders.SetRange(offset, offset+int64(len(buf1))-1)
- reader, objectInfo, err := c.GetObject(bucketName, objectName, reqHeaders)
+ opts := GetObjectOptions{}
+ opts.SetRange(offset, offset+int64(len(buf1))-1)
+ reader, objectInfo, err := c.GetObject(bucketName, objectName, opts)
if err != nil {
t.Fatal(err)
}
@@ -141,8 +142,8 @@ func TestGetObjectCore(t *testing.T) {
}
offset += 512
- reqHeaders.SetRange(offset, offset+int64(len(buf2))-1)
- reader, objectInfo, err = c.GetObject(bucketName, objectName, reqHeaders)
+ opts.SetRange(offset, offset+int64(len(buf2))-1)
+ reader, objectInfo, err = c.GetObject(bucketName, objectName, opts)
if err != nil {
t.Fatal(err)
}
@@ -160,8 +161,8 @@ func TestGetObjectCore(t *testing.T) {
t.Fatal("Error: Incorrect read between two GetObject from same offset.")
}
- reqHeaders.SetRange(0, int64(len(buf3)))
- reader, objectInfo, err = c.GetObject(bucketName, objectName, reqHeaders)
+ opts.SetRange(0, int64(len(buf3)))
+ reader, objectInfo, err = c.GetObject(bucketName, objectName, opts)
if err != nil {
t.Fatal(err)
}
@@ -180,9 +181,9 @@ func TestGetObjectCore(t *testing.T) {
t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.")
}
- reqHeaders = NewGetReqHeaders()
- reqHeaders.SetMatchETag("etag")
- _, _, err = c.GetObject(bucketName, objectName, reqHeaders)
+ opts = GetObjectOptions{}
+ opts.SetMatchETag("etag")
+ _, _, err = c.GetObject(bucketName, objectName, opts)
if err == nil {
t.Fatal("Unexpected GetObject should fail with mismatching etags")
}
@@ -190,9 +191,9 @@ func TestGetObjectCore(t *testing.T) {
t.Fatalf("Expected \"PreconditionFailed\" as code, got %s instead", errResp.Code)
}
- reqHeaders = NewGetReqHeaders()
- reqHeaders.SetMatchETagExcept("etag")
- reader, objectInfo, err = c.GetObject(bucketName, objectName, reqHeaders)
+ opts = GetObjectOptions{}
+ opts.SetMatchETagExcept("etag")
+ reader, objectInfo, err = c.GetObject(bucketName, objectName, opts)
if err != nil {
t.Fatal(err)
}
@@ -210,9 +211,9 @@ func TestGetObjectCore(t *testing.T) {
t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.")
}
- reqHeaders = NewGetReqHeaders()
- reqHeaders.SetRange(0, 0)
- reader, objectInfo, err = c.GetObject(bucketName, objectName, reqHeaders)
+ opts = GetObjectOptions{}
+ opts.SetRange(0, 0)
+ reader, objectInfo, err = c.GetObject(bucketName, objectName, opts)
if err != nil {
t.Fatal(err)
}
@@ -275,12 +276,12 @@ func TestGetObjectContentEncoding(t *testing.T) {
// Generate data more than 32K
buf := bytes.Repeat([]byte("3"), rand.Intn(1<<20)+32*1024)
- m := make(map[string][]string)
- m["Content-Encoding"] = []string{"gzip"}
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.Client.PutObjectWithMetadata(bucketName, objectName, bytes.NewReader(buf), m, nil)
+ n, err := c.Client.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), PutObjectOptions{
+ ContentEncoding: "gzip",
+ })
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
@@ -289,8 +290,7 @@ func TestGetObjectContentEncoding(t *testing.T) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
}
- reqHeaders := NewGetReqHeaders()
- rwc, objInfo, err := c.GetObject(bucketName, objectName, reqHeaders)
+ rwc, objInfo, err := c.GetObject(bucketName, objectName, GetObjectOptions{})
if err != nil {
t.Fatalf("Error: %v", err)
}
@@ -370,6 +370,265 @@ func TestGetBucketPolicy(t *testing.T) {
}
}
+// Tests Core CopyObject API implementation.
+func TestCoreCopyObject(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := NewCore(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ buf := bytes.Repeat([]byte("a"), 32*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{
+ "Content-Type": "binary/octet-stream",
+ })
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ if objInfo.Size != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size)
+ }
+
+ destBucketName := bucketName
+ destObjectName := objectName + "-dest"
+
+ cobjInfo, err := c.CopyObject(bucketName, objectName, destBucketName, destObjectName, map[string]string{
+ "X-Amz-Metadata-Directive": "REPLACE",
+ "Content-Type": "application/javascript",
+ })
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName, destBucketName, destObjectName)
+ }
+ if cobjInfo.ETag != objInfo.ETag {
+ t.Fatalf("Error: expected etag to be same as source object %s, but found different etag :%s", objInfo.ETag, cobjInfo.ETag)
+ }
+
+ // Attempt to read from destBucketName and object name.
+ r, err := c.Client.GetObject(destBucketName, destObjectName, GetObjectOptions{})
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ if st.Size != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
+ len(buf), st.Size)
+ }
+
+ if st.ContentType != "application/javascript" {
+ t.Fatalf("Error: Content types don't match, expected: application/javascript, found: %+v\n", st.ContentType)
+ }
+
+ if st.ETag != objInfo.ETag {
+ t.Fatalf("Error: expected etag to be same as source object %s, but found different etag :%s", objInfo.ETag, st.ETag)
+ }
+
+ if err := r.Close(); err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ if err := r.Close(); err == nil {
+ t.Fatal("Error: object is already closed, should return error")
+ }
+
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveObject(destBucketName, destObjectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Do not need to remove destBucketName its same as bucketName.
+}
+
+// Test Core CopyObjectPart implementation
+func TestCoreCopyObjectPart(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := NewCore(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Make a buffer with 5MB of data
+ buf := bytes.Repeat([]byte("abcde"), 1024*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{
+ "Content-Type": "binary/octet-stream",
+ })
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ if objInfo.Size != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size)
+ }
+
+ destBucketName := bucketName
+ destObjectName := objectName + "-dest"
+
+ uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, PutObjectOptions{})
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ // Content of the destination object will be two copies of
+ // `objectName` concatenated, followed by first byte of
+ // `objectName`.
+
+ // First of three parts
+ fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, nil)
+ if err != nil {
+ t.Fatal("Error:", err, destBucketName, destObjectName)
+ }
+
+ // Second of three parts
+ sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, nil)
+ if err != nil {
+ t.Fatal("Error:", err, destBucketName, destObjectName)
+ }
+
+ // Last of three parts
+ lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, nil)
+ if err != nil {
+ t.Fatal("Error:", err, destBucketName, destObjectName)
+ }
+
+ // Complete the multipart upload
+ err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []CompletePart{fstPart, sndPart, lstPart})
+ if err != nil {
+ t.Fatal("Error:", err, destBucketName, destObjectName)
+ }
+
+ // Stat the object and check its length matches
+ objInfo, err = c.StatObject(destBucketName, destObjectName, StatObjectOptions{})
+ if err != nil {
+ t.Fatal("Error:", err, destBucketName, destObjectName)
+ }
+
+ if objInfo.Size != (5*1024*1024)*2+1 {
+ t.Fatal("Destination object has incorrect size!")
+ }
+
+ // Now we read the data back
+ getOpts := GetObjectOptions{}
+ getOpts.SetRange(0, 5*1024*1024-1)
+ r, _, err := c.GetObject(destBucketName, destObjectName, getOpts)
+ if err != nil {
+ t.Fatal("Error:", err, destBucketName, destObjectName)
+ }
+ getBuf := make([]byte, 5*1024*1024)
+ _, err = io.ReadFull(r, getBuf)
+ if err != nil {
+ t.Fatal("Error:", err, destBucketName, destObjectName)
+ }
+ if !bytes.Equal(getBuf, buf) {
+ t.Fatal("Got unexpected data in first 5MB")
+ }
+
+ getOpts.SetRange(5*1024*1024, 0)
+ r, _, err = c.GetObject(destBucketName, destObjectName, getOpts)
+ if err != nil {
+ t.Fatal("Error:", err, destBucketName, destObjectName)
+ }
+ getBuf = make([]byte, 5*1024*1024+1)
+ _, err = io.ReadFull(r, getBuf)
+ if err != nil {
+ t.Fatal("Error:", err, destBucketName, destObjectName)
+ }
+ if !bytes.Equal(getBuf[:5*1024*1024], buf) {
+ t.Fatal("Got unexpected data in second 5MB")
+ }
+ if getBuf[5*1024*1024] != buf[0] {
+ t.Fatal("Got unexpected data in last byte of copied object!")
+ }
+
+ if err := c.RemoveObject(destBucketName, destObjectName); err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ if err := c.RemoveObject(bucketName, objectName); err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ if err := c.RemoveBucket(bucketName); err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ // Do not need to remove destBucketName its same as bucketName.
+}
+
// Test Core PutObject.
func TestCorePutObject(t *testing.T) {
if testing.Short() {
@@ -405,21 +664,21 @@ func TestCorePutObject(t *testing.T) {
t.Fatal("Error:", err, bucketName)
}
- buf := bytes.Repeat([]byte("a"), minPartSize)
+ buf := bytes.Repeat([]byte("a"), 32*1024)
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
// Object content type
objectContentType := "binary/octet-stream"
- metadata := make(map[string][]string)
- metadata["Content-Type"] = []string{objectContentType}
+ metadata := make(map[string]string)
+ metadata["Content-Type"] = objectContentType
- objInfo, err := c.PutObject(bucketName, objectName, int64(len(buf)), bytes.NewReader(buf), md5.New().Sum(nil), nil, metadata)
+ objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "1B2M2Y8AsgTpgAmY7PhCfg==", "", metadata)
if err == nil {
- t.Fatal("Error expected: nil, got: ", err)
+ t.Fatal("Error expected: error, got: nil(success)")
}
- objInfo, err = c.PutObject(bucketName, objectName, int64(len(buf)), bytes.NewReader(buf), nil, nil, metadata)
+ objInfo, err = c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", metadata)
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
@@ -429,7 +688,7 @@ func TestCorePutObject(t *testing.T) {
}
// Read the data back
- r, err := c.Client.GetObject(bucketName, objectName)
+ r, err := c.Client.GetObject(bucketName, objectName, GetObjectOptions{})
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
@@ -490,18 +749,17 @@ func TestCoreGetObjectMetadata(t *testing.T) {
t.Fatal("Error:", err, bucketName)
}
- metadata := map[string][]string{
- "X-Amz-Meta-Key-1": {"Val-1"},
+ metadata := map[string]string{
+ "X-Amz-Meta-Key-1": "Val-1",
}
- _, err = core.PutObject(bucketName, "my-objectname", 5,
- bytes.NewReader([]byte("hello")), nil, nil, metadata)
+ _, err = core.PutObject(bucketName, "my-objectname",
+ bytes.NewReader([]byte("hello")), 5, "", "", metadata)
if err != nil {
log.Fatalln(err)
}
- reader, objInfo, err := core.GetObject(bucketName, "my-objectname",
- RequestHeaders{})
+ reader, objInfo, err := core.GetObject(bucketName, "my-objectname", GetObjectOptions{})
if err != nil {
log.Fatalln(err)
}
diff --git a/vendor/github.com/minio/minio-go/docs/API.md b/vendor/github.com/minio/minio-go/docs/API.md
index bfdd42db6..33b8c5891 100644
--- a/vendor/github.com/minio/minio-go/docs/API.md
+++ b/vendor/github.com/minio/minio-go/docs/API.md
@@ -54,19 +54,20 @@ func main() {
| :--- | :--- | :--- | :--- | :--- | :--- |
| [`MakeBucket`](#MakeBucket) | [`GetObject`](#GetObject) | [`NewSymmetricKey`](#NewSymmetricKey) | [`PresignedGetObject`](#PresignedGetObject) | [`SetBucketPolicy`](#SetBucketPolicy) | [`SetAppInfo`](#SetAppInfo) |
| [`ListBuckets`](#ListBuckets) | [`PutObject`](#PutObject) | [`NewAsymmetricKey`](#NewAsymmetricKey) | [`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) | [`SetCustomTransport`](#SetCustomTransport) |
-| [`BucketExists`](#BucketExists) | [`PutObjectStreaming`](#PutObjectStreaming) | [`GetEncryptedObject`](#GetEncryptedObject) | [`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) | [`TraceOn`](#TraceOn) |
-| [`RemoveBucket`](#RemoveBucket) | [`CopyObject`](#CopyObject) | [`PutEncryptedObject`](#PutEncryptedObject) | | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOff`](#TraceOff) |
-| [`ListObjects`](#ListObjects) | [`StatObject`](#StatObject) | [`NewSSEInfo`](#NewSSEInfo) | | [`GetBucketNotification`](#GetBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) |
-| [`ListObjectsV2`](#ListObjectsV2) | [`RemoveObject`](#RemoveObject) | | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) | |
-| [`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveObjects`](#RemoveObjects) | | | [`ListenBucketNotification`](#ListenBucketNotification) | |
-| | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | | | |
+| [`BucketExists`](#BucketExists) | [`CopyObject`](#CopyObject) | [`GetEncryptedObject`](#GetEncryptedObject) | [`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) | [`TraceOn`](#TraceOn) |
+| [`RemoveBucket`](#RemoveBucket) | [`StatObject`](#StatObject) | [`PutEncryptedObject`](#PutEncryptedObject) | | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOff`](#TraceOff) |
+| [`ListObjects`](#ListObjects) | [`RemoveObject`](#RemoveObject) | [`NewSSEInfo`](#NewSSEInfo) | | [`GetBucketNotification`](#GetBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) |
+| [`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | [`FPutEncryptedObject`](#FPutEncryptedObject) | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) | |
+| [`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | | [`ListenBucketNotification`](#ListenBucketNotification) | |
| | [`FPutObject`](#FPutObject) | | | | |
| | [`FGetObject`](#FGetObject) | | | | |
| | [`ComposeObject`](#ComposeObject) | | | | |
| | [`NewSourceInfo`](#NewSourceInfo) | | | | |
| | [`NewDestinationInfo`](#NewDestinationInfo) | | | | |
-
-
+| | [`PutObjectWithContext`](#PutObjectWithContext) | | | |
+| | [`GetObjectWithContext`](#GetObjectWithContext) | | | |
+| | [`FPutObjectWithContext`](#FPutObjectWithContext) | | | |
+| | [`FGetObjectWithContext`](#FGetObjectWithContext) | | | |
## 1. Constructor
<a name="Minio"></a>
@@ -83,7 +84,7 @@ __Parameters__
|`ssl` | _bool_ | If 'true' API requests will be secure (HTTPS), and insecure (HTTP) otherwise |
### NewWithRegion(endpoint, accessKeyID, secretAccessKey string, ssl bool, region string) (*Client, error)
-Initializes minio client, with region configured. Unlike New(), NewWithRegion avoids bucket-location lookup operations and it is slightly faster. Use this function when if your application deals with single region.
+Initializes minio client, with region configured. Unlike New(), NewWithRegion avoids bucket-location lookup operations and it is slightly faster. Use this function when your application deals with a single region.
__Parameters__
@@ -122,7 +123,7 @@ __Example__
```go
-err := minioClient.MakeBucket("mybucket", "us-east-1")
+err = minioClient.MakeBucket("mybucket", "us-east-1")
if err != nil {
fmt.Println(err)
return
@@ -132,15 +133,16 @@ fmt.Println("Successfully created mybucket.")
<a name="ListBuckets"></a>
### ListBuckets() ([]BucketInfo, error)
-
Lists all buckets.
| Param | Type | Description |
|---|---|---|
-|`bucketList` | _[]BucketInfo_ | Lists of all buckets |
+|`bucketList` | _[]minio.BucketInfo_ | Lists of all buckets |
-| Param | Type | Description |
+__minio.BucketInfo__
+
+| Field | Type | Description |
|---|---|---|
|`bucket.Name` | _string_ | Name of the bucket |
|`bucket.CreationDate` | _time.Time_ | Date of bucket creation |
@@ -151,7 +153,7 @@ __Example__
```go
buckets, err := minioClient.ListBuckets()
- if err != nil {
+if err != nil {
fmt.Println(err)
return
}
@@ -162,7 +164,6 @@ for _, bucket := range buckets {
<a name="BucketExists"></a>
### BucketExists(bucketName string) (found bool, err error)
-
Checks if a bucket exists.
__Parameters__
@@ -197,8 +198,7 @@ if found {
<a name="RemoveBucket"></a>
### RemoveBucket(bucketName string) error
-
-Removes a bucket.
+Removes a bucket, bucket should be empty to be successfully removed.
__Parameters__
@@ -211,7 +211,7 @@ __Example__
```go
-err := minioClient.RemoveBucket("mybucket")
+err = minioClient.RemoveBucket("mybucket")
if err != nil {
fmt.Println(err)
return
@@ -220,7 +220,6 @@ if err != nil {
<a name="ListObjects"></a>
### ListObjects(bucketName, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
-
Lists objects in a bucket.
__Parameters__
@@ -238,9 +237,11 @@ __Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`chan ObjectInfo` | _chan ObjectInfo_ |Read channel for all objects in the bucket, the object is of the format listed below: |
+|`objectInfo` | _chan minio.ObjectInfo_ |Read channel for all objects in the bucket, the object is of the format listed below: |
-|Param |Type |Description |
+__minio.ObjectInfo__
+
+|Field |Type |Description |
|:---|:---| :---|
|`objectInfo.Key` | _string_ |Name of the object |
|`objectInfo.Size` | _int64_ |Size of the object |
@@ -269,7 +270,6 @@ for object := range objectCh {
<a name="ListObjectsV2"></a>
### ListObjectsV2(bucketName, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
-
Lists objects in a bucket using the recommended listing API v2
__Parameters__
@@ -287,14 +287,7 @@ __Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`chan ObjectInfo` | _chan ObjectInfo_ |Read channel for all the objects in the bucket, the object is of the format listed below: |
-
-|Param |Type |Description |
-|:---|:---| :---|
-|`objectInfo.Key` | _string_ |Name of the object |
-|`objectInfo.Size` | _int64_ |Size of the object |
-|`objectInfo.ETag` | _string_ |MD5 checksum of the object |
-|`objectInfo.LastModified` | _time.Time_ |Time when object was last modified |
+|`objectInfo` | _chan minio.ObjectInfo_ |Read channel for all the objects in the bucket, the object is of the format listed below: |
```go
@@ -317,7 +310,6 @@ for object := range objectCh {
<a name="ListIncompleteUploads"></a>
### ListIncompleteUploads(bucketName, prefix string, recursive bool, doneCh chan struct{}) <- chan ObjectMultipartInfo
-
Lists partially uploaded objects in a bucket.
@@ -336,11 +328,11 @@ __Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`chan ObjectMultipartInfo` | _chan ObjectMultipartInfo_ |Emits multipart objects of the format listed below: |
+|`multiPartInfo` | _chan minio.ObjectMultipartInfo_ |Emits multipart objects of the format listed below: |
-__Return Value__
+__minio.ObjectMultipartInfo__
-|Param |Type |Description |
+|Field |Type |Description |
|:---|:---| :---|
|`multiPartObjInfo.Key` | _string_ |Name of incompletely uploaded object |
|`multiPartObjInfo.UploadID` | _string_ |Upload ID of incompletely uploaded object |
@@ -370,8 +362,7 @@ for multiPartObject := range multiPartObjectCh {
## 3. Object operations
<a name="GetObject"></a>
-### GetObject(bucketName, objectName string) (*Object, error)
-
+### GetObject(bucketName, objectName string, opts GetObjectOptions) (*Object, error)
Returns a stream of the object data. Most of the common errors occur when reading the stream.
@@ -382,8 +373,15 @@ __Parameters__
|:---|:---| :---|
|`bucketName` | _string_ |Name of the bucket |
|`objectName` | _string_ |Name of the object |
+|`opts` | _minio.GetObjectOptions_ | Options for GET requests specifying additional options like encryption, If-Match |
+__minio.GetObjectOptions__
+
+|Field | Type | Description |
+|:---|:---|:---|
+| `opts.Materials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go) |
+
__Return Value__
@@ -396,7 +394,7 @@ __Example__
```go
-object, err := minioClient.GetObject("mybucket", "photo.jpg")
+object, err := minioClient.GetObject("mybucket", "myobject", minio.GetObjectOptions{})
if err != nil {
fmt.Println(err)
return
@@ -413,34 +411,146 @@ if _, err = io.Copy(localFile, object); err != nil {
```
<a name="FGetObject"></a>
-### FGetObject(bucketName, objectName, filePath string) error
- Downloads and saves the object as a file in the local filesystem.
+### FGetObject(bucketName, objectName, filePath string, opts GetObjectOptions) error
+Downloads and saves the object as a file in the local filesystem.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`filePath` | _string_ |Path to download object to |
+|`opts` | _minio.GetObjectOptions_ | Options for GET requests specifying additional options like encryption, If-Match |
+
+
+__Example__
+
+```go
+err = minioClient.FGetObject("mybucket", "myobject", "/tmp/myobject", minio.GetObjectOptions{})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+<a name="GetObjectWithContext"></a>
+### GetObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error)
+Identical to GetObject operation, but accepts a context for request cancellation.
__Parameters__
|Param |Type |Description |
|:---|:---| :---|
+|`ctx` | _context.Context_ |Request context |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`opts` | _minio.GetObjectOptions_ | Options for GET requests specifying additional options like encryption, If-Match |
+
+
+__Return Value__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`object` | _*minio.Object_ |_minio.Object_ represents object reader. It implements io.Reader, io.Seeker, io.ReaderAt and io.Closer interfaces. |
+
+
+__Example__
+
+
+```go
+ctx, cancel := context.WithTimeout(context.Background(), 100 * time.Second)
+defer cancel()
+
+object, err := minioClient.GetObjectWithContext(ctx, "mybucket", "myobject", minio.GetObjectOptions{})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+localFile, err := os.Create("/tmp/local-file.jpg")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+if _, err = io.Copy(localFile, object); err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="FGetObjectWithContext"></a>
+### FGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error
+Identical to FGetObject operation, but allows request cancellation.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`ctx` | _context.Context_ |Request context |
|`bucketName` | _string_ |Name of the bucket |
|`objectName` | _string_ |Name of the object |
|`filePath` | _string_ |Path to download object to |
+|`opts` | _minio.GetObjectOptions_ | Options for GET requests specifying additional options like encryption, If-Match |
__Example__
```go
-err := minioClient.FGetObject("mybucket", "photo.jpg", "/tmp/photo.jpg")
+ctx, cancel := context.WithTimeout(context.Background(), 100 * time.Second)
+defer cancel()
+
+err = minioClient.FGetObjectWithContext(ctx, "mybucket", "myobject", "/tmp/myobject", minio.GetObjectOptions{})
if err != nil {
fmt.Println(err)
return
}
```
-<a name="PutObject"></a>
-### PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int, err error)
+<a name="FGetEncryptedObject"></a>
+### FGetEncryptedObject(bucketName, objectName, filePath string, materials encrypt.Materials) error
+Identical to FGetObject operation, but decrypts an encrypted request
+
+__Parameters__
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`filePath` | _string_ |Path to download object to |
+|`materials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go) |
+
+
+__Example__
+
+
+```go
+// Generate a master symmetric key
+key := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
+
+// Build the CBC encryption material
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(key)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+err = minioClient.FGetEncryptedObject("mybucket", "myobject", "/tmp/myobject", cbcMaterials)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="PutObject"></a>
+### PutObject(bucketName, objectName string, reader io.Reader, objectSize int64,opts PutObjectOptions) (n int, err error)
Uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than 64MiB in size, PutObject seamlessly uploads the object as parts of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB.
__Parameters__
@@ -451,8 +561,21 @@ __Parameters__
|`bucketName` | _string_ |Name of the bucket |
|`objectName` | _string_ |Name of the object |
|`reader` | _io.Reader_ |Any Go type that implements io.Reader |
-|`contentType` | _string_ |Content type of the object |
-
+|`objectSize`| _int64_ |Size of the object being uploaded. Pass -1 if stream size is unknown |
+|`opts` | _minio.PutObjectOptions_ | Allows user to set optional custom metadata, content headers, encryption keys and number of threads for multipart upload operation. |
+
+__minio.PutObjectOptions__
+
+|Field | Type | Description |
+|:--- |:--- | :--- |
+| `opts.UserMetadata` | _map[string]string_ | Map of user metadata|
+| `opts.Progress` | _io.Reader_ | Reader to fetch progress of an upload |
+| `opts.ContentType` | _string_ | Content type of object, e.g "application/text" |
+| `opts.ContentEncoding` | _string_ | Content encoding of object, e.g "gzip" |
+| `opts.ContentDisposition` | _string_ | Content disposition of object, "inline" |
+| `opts.CacheControl` | _string_ | Used to specify directives for caching mechanisms in both requests and responses e.g "max-age=600"|
+| `opts.EncryptMaterials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go) |
+| `opts.StorageClass` | _string_ | Specify storage class for the object. Supported values for Minio server are `REDUCED_REDUNDANCY` and `STANDARD` |
__Example__
@@ -465,32 +588,46 @@ if err != nil {
}
defer file.Close()
-n, err := minioClient.PutObject("mybucket", "myobject", file, "application/octet-stream")
+fileStat, err := file.Stat()
if err != nil {
fmt.Println(err)
return
}
-```
-<a name="PutObjectStreaming"></a>
-### PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int, err error)
+n, err := minioClient.PutObject("mybucket", "myobject", file, fileStat.Size(), minio.PutObjectOptions{ContentType:"application/octet-stream"})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully uploaded bytes: ", n)
+```
-Uploads an object as multiple chunks keeping memory consumption constant. It is similar to PutObject in how objects are broken into multiple parts. Each part in turn is transferred as multiple chunks with constant memory usage. However resuming previously failed uploads from where it was left is not supported.
+API methods PutObjectWithSize, PutObjectWithMetadata, PutObjectStreaming, and PutObjectWithProgress available in minio-go SDK release v3.0.3 are replaced by the new PutObject call variant that accepts a pointer to PutObjectOptions struct.
+<a name="PutObjectWithContext"></a>
+### PutObjectWithContext(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, opts PutObjectOptions) (n int, err error)
+Identical to PutObject operation, but allows request cancellation.
__Parameters__
|Param |Type |Description |
-|:---|:---|:---|
+|:---|:---| :---|
+|`ctx` | _context.Context_ |Request context |
|`bucketName` | _string_ |Name of the bucket |
|`objectName` | _string_ |Name of the object |
|`reader` | _io.Reader_ |Any Go type that implements io.Reader |
+|`objectSize`| _int64_ | size of the object being uploaded. Pass -1 if stream size is unknown |
+|`opts` | _minio.PutObjectOptions_ |Pointer to struct that allows user to set optional custom metadata, content-type, content-encoding,content-disposition and cache-control headers, pass encryption module for encrypting objects, and optionally configure number of threads for multipart put operation. |
+
__Example__
```go
+ctx, cancel := context.WithTimeout(context.Background(), 10 * time.Second)
+defer cancel()
+
file, err := os.Open("my-testfile")
if err != nil {
fmt.Println(err)
@@ -498,36 +635,42 @@ if err != nil {
}
defer file.Close()
-n, err := minioClient.PutObjectStreaming("mybucket", "myobject", file)
+fileStat, err := file.Stat()
if err != nil {
fmt.Println(err)
return
}
-```
+n, err := minioClient.PutObjectWithContext(ctx, "my-bucketname", "my-objectname", file, fileStat.Size(), minio.PutObjectOptions{
+ ContentType: "application/octet-stream",
+})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully uploaded bytes: ", n)
+```
<a name="CopyObject"></a>
### CopyObject(dst DestinationInfo, src SourceInfo) error
-
Create or replace an object through server-side copying of an existing object. It supports conditional copying, copying a part of an object and server-side encryption of destination and decryption of source. See the `SourceInfo` and `DestinationInfo` types for further details.
To copy multiple source objects into a single destination object see the `ComposeObject` API.
-
__Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`dst` | _DestinationInfo_ |Argument describing the destination object |
-|`src` | _SourceInfo_ |Argument describing the source object |
+|`dst` | _minio.DestinationInfo_ |Argument describing the destination object |
+|`src` | _minio.SourceInfo_ |Argument describing the source object |
__Example__
```go
-// Use-case 1: Simple copy object with no conditions, etc
+// Use-case 1: Simple copy object with no conditions.
// Source object
src := minio.NewSourceInfo("my-sourcebucketname", "my-sourceobjectname", nil)
@@ -539,13 +682,16 @@ if err != nil {
}
// Copy object call
-err = s3Client.CopyObject(dst, src)
+err = minioClient.CopyObject(dst, src)
if err != nil {
fmt.Println(err)
return
}
+```
-// Use-case 2: Copy object with copy-conditions, and copying only part of the source object.
+```go
+// Use-case 2:
+// Copy object with copy-conditions, and copying only part of the source object.
// 1. that matches a given ETag
// 2. and modified after 1st April 2014
// 3. but unmodified since 23rd April 2014
@@ -574,7 +720,7 @@ if err != nil {
}
// Copy object call
-err = s3Client.CopyObject(dst, src)
+err = minioClient.CopyObject(dst, src)
if err != nil {
fmt.Println(err)
return
@@ -582,10 +728,8 @@ if err != nil {
```
<a name="ComposeObject"></a>
-### ComposeObject(dst DestinationInfo, srcs []SourceInfo) error
-
-Create an object by concatenating a list of source objects using
-server-side copying.
+### ComposeObject(dst minio.DestinationInfo, srcs []minio.SourceInfo) error
+Create an object by concatenating a list of source objects using server-side copying.
__Parameters__
@@ -606,14 +750,14 @@ decKey := minio.NewSSEInfo([]byte{1, 2, 3}, "")
// Source objects to concatenate. We also specify decryption
// key for each
-src1 := minio.NewSourceInfo("bucket1", "object1", decKey)
-src1.SetMatchETag("31624deb84149d2f8ef9c385918b653a")
+src1 := minio.NewSourceInfo("bucket1", "object1", &decKey)
+src1.SetMatchETagCond("31624deb84149d2f8ef9c385918b653a")
-src2 := minio.NewSourceInfo("bucket2", "object2", decKey)
-src2.SetMatchETag("f8ef9c385918b653a31624deb84149d2")
+src2 := minio.NewSourceInfo("bucket2", "object2", &decKey)
+src2.SetMatchETagCond("f8ef9c385918b653a31624deb84149d2")
-src3 := minio.NewSourceInfo("bucket3", "object3", decKey)
-src3.SetMatchETag("5918b653a31624deb84149d2f8ef9c38")
+src3 := minio.NewSourceInfo("bucket3", "object3", &decKey)
+src3.SetMatchETagCond("5918b653a31624deb84149d2f8ef9c38")
// Create slice of sources.
srcs := []minio.SourceInfo{src1, src2, src3}
@@ -622,19 +766,24 @@ srcs := []minio.SourceInfo{src1, src2, src3}
encKey := minio.NewSSEInfo([]byte{8, 9, 0}, "")
// Create destination info
-dst := minio.NewDestinationInfo("bucket", "object", encKey, nil)
-err = s3Client.ComposeObject(dst, srcs)
+dst, err := minio.NewDestinationInfo("bucket", "object", &encKey, nil)
if err != nil {
- log.Println(err)
- return
+ fmt.Println(err)
+ return
+}
+
+// Compose object call by concatenating multiple source files.
+err = minioClient.ComposeObject(dst, srcs)
+if err != nil {
+ fmt.Println(err)
+ return
}
-log.Println("Composed object successfully.")
+fmt.Println("Composed object successfully.")
```
<a name="NewSourceInfo"></a>
### NewSourceInfo(bucket, object string, decryptSSEC *SSEInfo) SourceInfo
-
Construct a `SourceInfo` object that can be used as the source for server-side copying operations like `CopyObject` and `ComposeObject`. This object can be used to set copy-conditions on the source.
__Parameters__
@@ -647,18 +796,47 @@ __Parameters__
__Example__
-``` go
+```go
// No decryption parameter.
-src := NewSourceInfo("bucket", "object", nil)
+src := minio.NewSourceInfo("bucket", "object", nil)
+
+// Destination object
+dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Copy object call
+err = minioClient.CopyObject(dst, src)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+```go
// With decryption parameter.
-decKey := NewSSEKey([]byte{1,2,3}, "")
-src := NewSourceInfo("bucket", "object", decKey)
+decKey := minio.NewSSEInfo([]byte{1,2,3}, "")
+src := minio.NewSourceInfo("bucket", "object", &decKey)
+
+// Destination object
+dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Copy object call
+err = minioClient.CopyObject(dst, src)
+if err != nil {
+ fmt.Println(err)
+ return
+}
```
<a name="NewDestinationInfo"></a>
### NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo, userMeta map[string]string) (DestinationInfo, error)
-
Construct a `DestinationInfo` object that can be used as the destination object for server-side copying operations like `CopyObject` and `ComposeObject`.
__Parameters__
@@ -672,24 +850,48 @@ __Parameters__
__Example__
-``` go
+```go
// No encryption parameter.
-dst, err := NewDestinationInfo("bucket", "object", nil, nil)
+src := minio.NewSourceInfo("bucket", "object", nil)
+dst, err := minio.NewDestinationInfo("bucket", "object", nil, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
-// With encryption parameter.
-encKey := NewSSEKey([]byte{1,2,3}, "")
-dst, err := NewDecryptionInfo("bucket", "object", encKey, nil)
+// Copy object call
+err = minioClient.CopyObject(dst, src)
+if err != nil {
+ fmt.Println(err)
+ return
+}
```
+```go
+src := minio.NewSourceInfo("bucket", "object", nil)
-<a name="FPutObject"></a>
-### FPutObject(bucketName, objectName, filePath, contentType string) (length int64, err error)
+// With encryption parameter.
+encKey := minio.NewSSEInfo([]byte{1,2,3}, "")
+dst, err := minio.NewDestinationInfo("bucket", "object", &encKey, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Copy object call
+err = minioClient.CopyObject(dst, src)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+<a name="FPutObject"></a>
+### FPutObject(bucketName, objectName, filePath, opts PutObjectOptions) (length int64, err error)
Uploads contents from a file to objectName.
FPutObject uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than the 64MiB in size, FPutObject seamlessly uploads the object in chunks of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB.
-
__Parameters__
@@ -698,25 +900,56 @@ __Parameters__
|`bucketName` | _string_ |Name of the bucket |
|`objectName` | _string_ |Name of the object |
|`filePath` | _string_ |Path to file to be uploaded |
-|`contentType` | _string_ |Content type of the object |
+|`opts` | _minio.PutObjectOptions_ |Pointer to struct that allows user to set optional custom metadata, content-type, content-encoding,content-disposition and cache-control headers, pass encryption module for encrypting objects, and optionally configure number of threads for multipart put operation. |
__Example__
```go
-n, err := minioClient.FPutObject("mybucket", "myobject.csv", "/tmp/otherobject.csv", "application/csv")
+n, err := minioClient.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", minio.PutObjectOptions{
+ ContentType: "application/csv",
+});
if err != nil {
fmt.Println(err)
return
}
+fmt.Println("Successfully uploaded bytes: ", n)
```
-<a name="StatObject"></a>
-### StatObject(bucketName, objectName string) (ObjectInfo, error)
+<a name="FPutObjectWithContext"></a>
+### FPutObjectWithContext(ctx context.Context, bucketName, objectName, filePath, opts PutObjectOptions) (length int64, err error)
+Identical to FPutObject operation, but allows request cancellation.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`ctx` | _context.Context_ |Request context |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`filePath` | _string_ |Path to file to be uploaded |
+|`opts` | _minio.PutObjectOptions_ |Pointer to struct that allows user to set optional custom metadata, content-type, content-encoding,content-disposition and cache-control headers, pass encryption module for encrypting objects, and optionally configure number of threads for multipart put operation. |
+
+__Example__
-Gets metadata of an object.
+```go
+ctx, cancel := context.WithTimeout(context.Background(), 100 * time.Second)
+defer cancel()
+
+n, err := minioClient.FPutObjectWithContext(ctx, "mybucket", "myobject.csv", "/tmp/otherobject.csv", minio.PutObjectOptions{ContentType:"application/csv"})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully uploaded bytes: ", n)
+```
+
+<a name="StatObject"></a>
+### StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error)
+Fetch metadata of an object.
__Parameters__
@@ -725,16 +958,19 @@ __Parameters__
|:---|:---| :---|
|`bucketName` | _string_ |Name of the bucket |
|`objectName` | _string_ |Name of the object |
+|`opts` | _minio.StatObjectOptions_ | Options for GET info/stat requests specifying additional options like encryption, If-Match |
__Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`objInfo` | _ObjectInfo_ |Object stat information |
+|`objInfo` | _minio.ObjectInfo_ |Object stat information |
-|Param |Type |Description |
+__minio.ObjectInfo__
+
+|Field |Type |Description |
|:---|:---| :---|
|`objInfo.LastModified` | _time.Time_ |Time when object was last modified |
|`objInfo.ETag` | _string_ |MD5 checksum of the object|
@@ -742,11 +978,11 @@ __Return Value__
|`objInfo.Size` | _int64_ |Size of the object|
- __Example__
+__Example__
```go
-objInfo, err := minioClient.StatObject("mybucket", "photo.jpg")
+objInfo, err := minioClient.StatObject("mybucket", "myobject", minio.StatObjectOptions{})
if err != nil {
fmt.Println(err)
return
@@ -756,10 +992,8 @@ fmt.Println(objInfo)
<a name="RemoveObject"></a>
### RemoveObject(bucketName, objectName string) error
-
Removes an object.
-
__Parameters__
@@ -770,46 +1004,54 @@ __Parameters__
```go
-err := minioClient.RemoveObject("mybucket", "photo.jpg")
+err = minioClient.RemoveObject("mybucket", "myobject")
if err != nil {
fmt.Println(err)
return
}
```
-<a name="RemoveObjects"></a>
-### RemoveObjects(bucketName string, objectsCh chan string) errorCh chan minio.RemoveObjectError
-Removes a list of objects obtained from an input channel. The call sends a delete request to the server up to 1000 objects at a time.
-The errors observed are sent over the error channel.
+<a name="RemoveObjects"></a>
+### RemoveObjects(bucketName string, objectsCh chan string) (errorCh <-chan RemoveObjectError)
+Removes a list of objects obtained from an input channel. The call sends a delete request to the server up to 1000 objects at a time. The errors observed are sent over the error channel.
__Parameters__
|Param |Type |Description |
|:---|:---| :---|
|`bucketName` | _string_ |Name of the bucket |
-|`objectsCh` | _chan string_ | Prefix of objects to be removed |
+|`objectsCh` | _chan string_ | Channel of objects to be removed |
__Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`errorCh` | _chan minio.RemoveObjectError | Channel of errors observed during deletion. |
-
+|`errorCh` | _<-chan minio.RemoveObjectError_ | Receive-only channel of errors observed during deletion. |
```go
-errorCh := minioClient.RemoveObjects("mybucket", objectsCh)
-for e := range errorCh {
- fmt.Println("Error detected during deletion: " + e.Err.Error())
+objectsCh := make(chan string)
+
+// Send object names that are needed to be removed to objectsCh
+go func() {
+ defer close(objectsCh)
+ // List all objects from a bucket-name with a matching prefix.
+ for object := range minioClient.ListObjects("my-bucketname", "my-prefixname", true, nil) {
+ if object.Err != nil {
+ log.Fatalln(object.Err)
+ }
+ objectsCh <- object.Key
+ }
+}()
+
+for rErr := range minioClient.RemoveObjects("mybucket", objectsCh) {
+ fmt.Println("Error detected during deletion: ", rErr)
}
```
-
-
<a name="RemoveIncompleteUpload"></a>
### RemoveIncompleteUpload(bucketName, objectName string) error
-
Removes a partially uploaded object.
__Parameters__
@@ -824,7 +1066,7 @@ __Example__
```go
-err := minioClient.RemoveIncompleteUpload("mybucket", "photo.jpg")
+err = minioClient.RemoveIncompleteUpload("mybucket", "myobject")
if err != nil {
fmt.Println(err)
return
@@ -834,7 +1076,7 @@ if err != nil {
## 4. Encrypted object operations
<a name="NewSymmetricKey"></a>
-### NewSymmetricKey(key []byte) *minio.SymmetricKey
+### NewSymmetricKey(key []byte) *encrypt.SymmetricKey
__Parameters__
@@ -847,15 +1089,29 @@ __Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`symmetricKey` | _*minio.SymmetricKey_ |_minio.SymmetricKey_ represents a symmetric key structure which can be used to encrypt and decrypt data. |
+|`symmetricKey` | _*encrypt.SymmetricKey_ | represents a symmetric key structure which can be used to encrypt and decrypt data |
```go
-symKey := minio.NewSymmetricKey([]byte("my-secret-key-00"))
-```
+symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
+// Build the CBC encryption material with symmetric key.
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(symKey)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully initialized Symmetric key CBC materials", cbcMaterials)
+
+object, err := minioClient.GetEncryptedObject("mybucket", "myobject", cbcMaterials)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+defer object.Close()
+```
<a name="NewAsymmetricKey"></a>
-### NewAsymmetricKey(privateKey []byte, publicKey[]byte) (*minio.AsymmetricKey, error)
+### NewAsymmetricKey(privateKey []byte, publicKey[]byte) (*encrypt.AsymmetricKey, error)
__Parameters__
@@ -869,32 +1125,50 @@ __Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`asymmetricKey` | _*minio.AsymmetricKey_ | represents an asymmetric key structure which can be used to encrypt and decrypt data. |
-|`err` | _error_ | encountered errors. |
+|`asymmetricKey` | _*encrypt.AsymmetricKey_ | represents an asymmetric key structure which can be used to encrypt and decrypt data |
+|`err` | _error_ | Standard Error |
```go
privateKey, err := ioutil.ReadFile("private.key")
if err != nil {
- log.Fatal(err)
+ fmt.Println(err)
+ return
}
publicKey, err := ioutil.ReadFile("public.key")
if err != nil {
- log.Fatal(err)
+ fmt.Println(err)
+ return
}
// Initialize the asymmetric key
-asymmetricKey, err := minio.NewAsymmetricKey(privateKey, publicKey)
+asymmetricKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Build the CBC encryption material for asymmetric key.
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(asymmetricKey)
if err != nil {
- log.Fatal(err)
+ fmt.Println(err)
+ return
}
+fmt.Println("Successfully initialized Asymmetric key CBC materials", cbcMaterials)
+
+object, err := minioClient.GetEncryptedObject("mybucket", "myobject", cbcMaterials)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+defer object.Close()
```
<a name="GetEncryptedObject"></a>
-### GetEncryptedObject(bucketName, objectName string, encryptMaterials minio.EncryptionMaterials) (io.ReadCloser, error)
+### GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.ReadCloser, error)
-Returns the decrypted stream of the object data based of the given encryption materiels. Most of the common errors occur when reading the stream.
+Returns the decrypted stream of the object data based of the given encryption materials. Most of the common errors occur when reading the stream.
__Parameters__
@@ -902,7 +1176,7 @@ __Parameters__
|:---|:---| :---|
|`bucketName` | _string_ | Name of the bucket |
|`objectName` | _string_ | Name of the object |
-|`encryptMaterials` | _minio.EncryptionMaterials_ | The module to decrypt the object data |
+|`encryptMaterials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go) |
__Return Value__
@@ -918,15 +1192,16 @@ __Example__
```go
// Generate a master symmetric key
-key := minio.NewSymmetricKey("my-secret-key-00")
+key := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
// Build the CBC encryption material
-cbcMaterials, err := NewCBCSecureMaterials(key)
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(key)
if err != nil {
- t.Fatal(err)
+ fmt.Println(err)
+ return
}
-object, err := minioClient.GetEncryptedObject("mybucket", "photo.jpg", cbcMaterials)
+object, err := minioClient.GetEncryptedObject("mybucket", "myobject", cbcMaterials)
if err != nil {
fmt.Println(err)
return
@@ -938,6 +1213,7 @@ if err != nil {
fmt.Println(err)
return
}
+defer localFile.Close()
if _, err = io.Copy(localFile, object); err != nil {
fmt.Println(err)
@@ -947,11 +1223,9 @@ if _, err = io.Copy(localFile, object); err != nil {
<a name="PutEncryptedObject"></a>
-### PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials minio.EncryptionMaterials, metadata map[string][]string, progress io.Reader) (n int, err error)
-
+### PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials) (n int, err error)
Encrypt and upload an object.
-
__Parameters__
|Param |Type |Description |
@@ -959,10 +1233,7 @@ __Parameters__
|`bucketName` | _string_ |Name of the bucket |
|`objectName` | _string_ |Name of the object |
|`reader` | _io.Reader_ |Any Go type that implements io.Reader |
-|`encryptMaterials` | _minio.EncryptionMaterials_ | The module that encrypts data |
-|`metadata` | _map[string][]string_ | Object metadata to be stored |
-|`progress` | io.Reader | A reader to update the upload progress |
-
+|`encryptMaterials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go) |
__Example__
@@ -970,25 +1241,29 @@ __Example__
// Load a private key
privateKey, err := ioutil.ReadFile("private.key")
if err != nil {
- log.Fatal(err)
+ fmt.Println(err)
+ return
}
// Load a public key
publicKey, err := ioutil.ReadFile("public.key")
if err != nil {
- log.Fatal(err)
+ fmt.Println(err)
+ return
}
// Build an asymmetric key
-key, err := NewAssymetricKey(privateKey, publicKey)
+key, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
if err != nil {
- log.Fatal(err)
+ fmt.Println(err)
+ return
}
// Build the CBC encryption module
-cbcMaterials, err := NewCBCSecureMaterials(key)
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(key)
if err != nil {
- t.Fatal(err)
+ fmt.Println(err)
+ return
}
// Open a file to upload
@@ -1000,17 +1275,71 @@ if err != nil {
defer file.Close()
// Upload the encrypted form of the file
-n, err := minioClient.PutEncryptedObject("mybucket", "myobject", file, encryptMaterials, nil, nil)
+n, err := minioClient.PutEncryptedObject("mybucket", "myobject", file, cbcMaterials)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully uploaded encrypted bytes: ", n)
+```
+
+<a name="FPutEncryptedObject"></a>
+### FPutEncryptedObject(bucketName, objectName, filePath, encryptMaterials encrypt.Materials) (n int, err error)
+Encrypt and upload an object from a file.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`filePath` | _string_ |Path to file to be uploaded |
+|`encryptMaterials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go)The module that encrypts data |
+
+__Example__
+
+
+```go
+// Load a private key
+privateKey, err := ioutil.ReadFile("private.key")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Load a public key
+publicKey, err := ioutil.ReadFile("public.key")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Build an asymmetric key
+key, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Build the CBC encryption module
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(key)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+n, err := minioClient.FPutEncryptedObject("mybucket", "myobject.csv", "/tmp/otherobject.csv", cbcMaterials)
if err != nil {
fmt.Println(err)
return
}
+fmt.Println("Successfully uploaded encrypted bytes: ", n)
```
<a name="NewSSEInfo"></a>
### NewSSEInfo(key []byte, algo string) SSEInfo
-
Create a key object for use as encryption or decryption parameter in operations involving server-side-encryption with customer provided key (SSE-C).
__Parameters__
@@ -1020,18 +1349,11 @@ __Parameters__
| `key` | _[]byte_ | Byte-slice of the raw, un-encoded binary key |
| `algo` | _string_ | Algorithm to use in encryption or decryption with the given key. Can be empty (defaults to `AES256`) |
-__Example__
-
-``` go
-// Key for use in encryption/decryption
-keyInfo := NewSSEInfo([]byte{1,2,3}, "")
-```
## 5. Presigned operations
<a name="PresignedGetObject"></a>
### PresignedGetObject(bucketName, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error)
-
Generates a presigned URL for HTTP GET operations. Browsers/Mobile clients may point to this URL to directly download objects even if the bucket is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days.
__Parameters__
@@ -1059,11 +1381,11 @@ if err != nil {
fmt.Println(err)
return
}
+fmt.Println("Successfully generated presigned URL", presignedURL)
```
<a name="PresignedPutObject"></a>
### PresignedPutObject(bucketName, objectName string, expiry time.Duration) (*url.URL, error)
-
Generates a presigned URL for HTTP PUT operations. Browsers/Mobile clients may point to this URL to upload objects directly to a bucket even if it is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days.
NOTE: you can upload to S3 only with specified object name.
@@ -1089,12 +1411,11 @@ if err != nil {
fmt.Println(err)
return
}
-fmt.Println(presignedURL)
+fmt.Println("Successfully generated presigned URL", presignedURL)
```
<a name="PresignedHeadObject"></a>
### PresignedHeadObject(bucketName, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error)
-
Generates a presigned URL for HTTP HEAD operations. Browsers/Mobile clients may point to this URL to directly get metadata from objects even if the bucket is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days.
__Parameters__
@@ -1121,23 +1442,18 @@ if err != nil {
fmt.Println(err)
return
}
+fmt.Println("Successfully generated presigned URL", presignedURL)
```
<a name="PresignedPostPolicy"></a>
### PresignedPostPolicy(PostPolicy) (*url.URL, map[string]string, error)
-
Allows setting policy conditions to a presigned URL for POST operations. Policies such as bucket name to receive object uploads, key name prefixes, expiry policy may be set.
-Create policy :
-
```go
+// Initialize policy condition config.
policy := minio.NewPostPolicy()
-```
-Apply upload policy restrictions:
-
-
-```go
+// Apply upload policy restrictions:
policy.SetBucket("mybucket")
policy.SetKey("myobject")
policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
@@ -1148,18 +1464,17 @@ policy.SetContentType("image/png")
// Only allow content size in range 1KB to 1MB.
policy.SetContentLengthRange(1024, 1024*1024)
-// Get the POST form key/value object:
+// Add a user metadata using the key "custom" and value "user"
+policy.SetUserMetadata("custom", "user")
+// Get the POST form key/value object:
url, formData, err := minioClient.PresignedPostPolicy(policy)
if err != nil {
fmt.Println(err)
return
}
-```
-POST your content from the command line using `curl`:
-
-```go
+// POST your content from the command line using `curl`
fmt.Printf("curl ")
for k, v := range formData {
fmt.Printf("-F %s=%s ", k, v)
@@ -1172,7 +1487,6 @@ fmt.Printf("%s\n", url)
<a name="SetBucketPolicy"></a>
### SetBucketPolicy(bucketname, objectPrefix string, policy policy.BucketPolicy) error
-
Set access permissions on bucket or an object prefix.
Importing `github.com/minio/minio-go/pkg/policy` package is needed.
@@ -1203,7 +1517,9 @@ __Example__
```go
-err := minioClient.SetBucketPolicy("mybucket", "myprefix", policy.BucketPolicyReadWrite)
+// Sets 'mybucket' with a sub-directory 'myprefix' to be anonymously accessible for
+// both read and write operations.
+err = minioClient.SetBucketPolicy("mybucket", "myprefix", policy.BucketPolicyReadWrite)
if err != nil {
fmt.Println(err)
return
@@ -1212,7 +1528,6 @@ if err != nil {
<a name="GetBucketPolicy"></a>
### GetBucketPolicy(bucketName, objectPrefix string) (policy.BucketPolicy, error)
-
Get access permissions on a bucket or a prefix.
Importing `github.com/minio/minio-go/pkg/policy` package is needed.
@@ -1247,7 +1562,6 @@ fmt.Println("Access permissions for mybucket is", bucketPolicy)
<a name="ListBucketPolicies"></a>
### ListBucketPolicies(bucketName, objectPrefix string) (map[string]BucketPolicy, error)
-
Get access permissions rules associated to the specified bucket and prefix.
__Parameters__
@@ -1263,7 +1577,7 @@ __Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketPolicies` | _map[string]BucketPolicy_ |Map of object resource paths and their permissions |
+|`bucketPolicies` | _map[string]minio.BucketPolicy_ |Map of object resource paths and their permissions |
|`err` | _error_ |Standard Error |
__Example__
@@ -1282,8 +1596,7 @@ for resource, permission := range bucketPolicies {
<a name="GetBucketNotification"></a>
### GetBucketNotification(bucketName string) (BucketNotification, error)
-
-Get all notification configurations related to the specified bucket.
+Get notification configuration on a bucket.
__Parameters__
@@ -1297,7 +1610,7 @@ __Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketNotification` | _BucketNotification_ |structure which holds all notification configurations|
+|`bucketNotification` | _minio.BucketNotification_ |structure which holds all notification configurations|
|`err` | _error_ |Standard Error |
__Example__
@@ -1306,10 +1619,12 @@ __Example__
```go
bucketNotification, err := minioClient.GetBucketNotification("mybucket")
if err != nil {
- log.Fatalf("Failed to get bucket notification configurations for mybucket - %v", err)
+ fmt.Println("Failed to get bucket notification configurations for mybucket", err)
+ return
}
-for _, topicConfig := range bucketNotification.TopicConfigs {
- for _, e := range topicConfig.Events {
+
+for _, queueConfig := range bucketNotification.QueueConfigs {
+ for _, e := range queueConfig.Events {
fmt.Println(e + " event is enabled")
}
}
@@ -1317,7 +1632,6 @@ for _, topicConfig := range bucketNotification.TopicConfigs {
<a name="SetBucketNotification"></a>
### SetBucketNotification(bucketName string, bucketNotification BucketNotification) error
-
Set a new bucket notification on a bucket.
__Parameters__
@@ -1326,7 +1640,7 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
|`bucketName` | _string_ |Name of the bucket |
-|`bucketNotification` | _BucketNotification_ |Represents the XML to be sent to the configured web service |
+|`bucketNotification` | _minio.BucketNotification_ |Represents the XML to be sent to the configured web service |
__Return Values__
@@ -1339,24 +1653,25 @@ __Example__
```go
-topicArn := NewArn("aws", "sns", "us-east-1", "804605494417", "PhotoUpdate")
+queueArn := minio.NewArn("aws", "sqs", "us-east-1", "804605494417", "PhotoUpdate")
+
+queueConfig := minio.NewNotificationConfig(queueArn)
+queueConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
+queueConfig.AddFilterPrefix("photos/")
+queueConfig.AddFilterSuffix(".jpg")
-topicConfig := NewNotificationConfig(topicArn)
-topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
-lambdaConfig.AddFilterPrefix("photos/")
-lambdaConfig.AddFilterSuffix(".jpg")
+bucketNotification := minio.BucketNotification{}
+bucketNotification.AddQueue(queueConfig)
-bucketNotification := BucketNotification{}
-bucketNotification.AddTopic(topicConfig)
-err := c.SetBucketNotification(bucketName, bucketNotification)
+err = minioClient.SetBucketNotification("mybucket", bucketNotification)
if err != nil {
- fmt.Println("Unable to set the bucket notification: " + err)
+ fmt.Println("Unable to set the bucket notification: ", err)
+ return
}
```
<a name="RemoveAllBucketNotification"></a>
### RemoveAllBucketNotification(bucketName string) error
-
Remove all configured bucket notifications on a bucket.
__Parameters__
@@ -1377,18 +1692,16 @@ __Example__
```go
-err := c.RemoveAllBucketNotification(bucketName)
+err = minioClient.RemoveAllBucketNotification("mybucket")
if err != nil {
fmt.Println("Unable to remove bucket notifications.", err)
+ return
}
```
<a name="ListenBucketNotification"></a>
### ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo
-
-ListenBucketNotification API receives bucket notification events through the
-notification channel. The returned notification channel has two fields
-'Records' and 'Err'.
+ListenBucketNotification API receives bucket notification events through the notification channel. The returned notification channel has two fields 'Records' and 'Err'.
- 'Records' holds the notifications received from the server.
- 'Err' indicates any error while processing the received notifications.
@@ -1403,17 +1716,20 @@ __Parameters__
|`bucketName` | _string_ | Bucket to listen notifications on |
|`prefix` | _string_ | Object key prefix to filter notifications for |
|`suffix` | _string_ | Object key suffix to filter notifications for |
-|`events` | _[]string_| Enables notifications for specific event types |
+|`events` | _[]string_ | Enables notifications for specific event types |
|`doneCh` | _chan struct{}_ | A message on this channel ends the ListenBucketNotification iterator |
__Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`chan NotificationInfo` | _chan_ | Read channel for all notifications on bucket |
-|`NotificationInfo` | _object_ | Notification object represents events info |
-|`notificationInfo.Records` | _[]NotificationEvent_ | Collection of notification events |
-|`notificationInfo.Err` | _error_ | Carries any error occurred during the operation |
+|`notificationInfo` | _chan minio.NotificationInfo_ | Channel of bucket notifications |
+
+__minio.NotificationInfo__
+
+|Field |Type |Description |
+|`notificationInfo.Records` | _[]minio.NotificationEvent_ | Collection of notification events |
+|`notificationInfo.Err` | _error_ | Carries any error occurred during the operation (Standard Error) |
__Example__
@@ -1427,15 +1743,15 @@ doneCh := make(chan struct{})
defer close(doneCh)
// Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events.
-for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET", "PREFIX", "SUFFIX", []string{
+for notificationInfo := range minioClient.ListenBucketNotification("mybucket", "myprefix/", ".mysuffix", []string{
"s3:ObjectCreated:*",
"s3:ObjectAccessed:*",
"s3:ObjectRemoved:*",
}, doneCh) {
if notificationInfo.Err != nil {
- log.Fatalln(notificationInfo.Err)
+ fmt.Println(notificationInfo.Err)
}
- log.Println(notificationInfo)
+ fmt.Println(notificationInfo)
}
```
@@ -1443,7 +1759,7 @@ for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET"
<a name="SetAppInfo"></a>
### SetAppInfo(appName, appVersion string)
-Adds application details to User-Agent.
+Add custom application details to User-Agent.
__Parameters__
@@ -1463,8 +1779,7 @@ minioClient.SetAppInfo("myCloudApp", "1.0.0")
<a name="SetCustomTransport"></a>
### SetCustomTransport(customHTTPTransport http.RoundTripper)
-Overrides default HTTP transport. This is usually needed for debugging
-or for adding custom TLS certificates.
+Overrides default HTTP transport. This is usually needed for debugging or for adding custom TLS certificates.
__Parameters__
@@ -1475,8 +1790,7 @@ __Parameters__
<a name="TraceOn"></a>
### TraceOn(outputStream io.Writer)
-Enables HTTP tracing. The trace is written to the io.Writer
-provided. If outputStream is nil, trace is written to os.Stdout.
+Enables HTTP tracing. The trace is written to the io.Writer provided. If outputStream is nil, trace is written to os.Stdout.
__Parameters__
@@ -1492,7 +1806,7 @@ Disables HTTP tracing.
<a name="SetS3TransferAccelerate"></a>
### SetS3TransferAccelerate(acceleratedEndpoint string)
Set AWS S3 transfer acceleration endpoint for all API requests hereafter.
-NOTE: This API applies only to AWS S3 and ignored with other S3 compatible object storage services.
+NOTE: This API applies only to AWS S3 and is a no operation for S3 compatible object storage services.
__Parameters__
diff --git a/vendor/github.com/minio/minio-go/docs/checker.go.template b/vendor/github.com/minio/minio-go/docs/checker.go.template
new file mode 100644
index 000000000..2e0f13a53
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/docs/checker.go.template
@@ -0,0 +1,21 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Use a secure connection.
+ ssl := true
+
+ // Initialize minio client object.
+ minioClient, err := minio.New("play.minio.io:9000", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ssl)
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+
+ {{.Text}}
+}
diff --git a/vendor/github.com/minio/minio-go/docs/validator.go b/vendor/github.com/minio/minio-go/docs/validator.go
new file mode 100644
index 000000000..7d5cbaaab
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/docs/validator.go
@@ -0,0 +1,227 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "text/template"
+
+ "github.com/a8m/mark"
+ "github.com/gernest/wow"
+ "github.com/gernest/wow/spin"
+ "github.com/minio/cli"
+)
+
+func init() {
+ // Validate go binary.
+ if _, err := exec.LookPath("go"); err != nil {
+ panic(err)
+ }
+}
+
+var globalFlags = []cli.Flag{
+ cli.StringFlag{
+ Name: "m",
+ Value: "API.md",
+ Usage: "Path to markdown api documentation.",
+ },
+ cli.StringFlag{
+ Name: "t",
+ Value: "checker.go.template",
+ Usage: "Template used for generating the programs.",
+ },
+ cli.IntFlag{
+ Name: "skip",
+ Value: 2,
+ Usage: "Skip entries before validating the code.",
+ },
+}
+
+func runGofmt(path string) (msg string, err error) {
+ cmdArgs := []string{"-s", "-w", "-l", path}
+ cmd := exec.Command("gofmt", cmdArgs...)
+ stdoutStderr, err := cmd.CombinedOutput()
+ if err != nil {
+ return "", err
+ }
+ return string(stdoutStderr), nil
+}
+
+func runGoImports(path string) (msg string, err error) {
+ cmdArgs := []string{"-w", path}
+ cmd := exec.Command("goimports", cmdArgs...)
+ stdoutStderr, err := cmd.CombinedOutput()
+ if err != nil {
+ return string(stdoutStderr), err
+ }
+ return string(stdoutStderr), nil
+}
+
+func runGoBuild(path string) (msg string, err error) {
+ // Go build the path.
+ cmdArgs := []string{"build", "-o", "/dev/null", path}
+ cmd := exec.Command("go", cmdArgs...)
+ stdoutStderr, err := cmd.CombinedOutput()
+ if err != nil {
+ return string(stdoutStderr), err
+ }
+ return string(stdoutStderr), nil
+}
+
+func validatorAction(ctx *cli.Context) error {
+ if !ctx.IsSet("m") || !ctx.IsSet("t") {
+ return nil
+ }
+ docPath := ctx.String("m")
+ var err error
+ docPath, err = filepath.Abs(docPath)
+ if err != nil {
+ return err
+ }
+ data, err := ioutil.ReadFile(docPath)
+ if err != nil {
+ return err
+ }
+
+ templatePath := ctx.String("t")
+ templatePath, err = filepath.Abs(templatePath)
+ if err != nil {
+ return err
+ }
+
+ skipEntries := ctx.Int("skip")
+ m := mark.New(string(data), &mark.Options{
+ Gfm: true, // Github markdown support is enabled by default.
+ })
+
+ t, err := template.ParseFiles(templatePath)
+ if err != nil {
+ return err
+ }
+
+ tmpDir, err := ioutil.TempDir("", "md-verifier")
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(tmpDir)
+
+ entryN := 1
+ for i := mark.NodeText; i < mark.NodeCheckbox; i++ {
+ if mark.NodeCode != mark.NodeType(i) {
+ m.AddRenderFn(mark.NodeType(i), func(node mark.Node) (s string) {
+ return ""
+ })
+ continue
+ }
+ m.AddRenderFn(mark.NodeCode, func(node mark.Node) (s string) {
+ p, ok := node.(*mark.CodeNode)
+ if !ok {
+ return
+ }
+ p.Text = strings.NewReplacer("&lt;", "<", "&gt;", ">", "&quot;", `"`, "&amp;", "&").Replace(p.Text)
+ if skipEntries > 0 {
+ skipEntries--
+ return
+ }
+
+ testFilePath := filepath.Join(tmpDir, "example.go")
+ w, werr := os.Create(testFilePath)
+ if werr != nil {
+ panic(werr)
+ }
+ t.Execute(w, p)
+ w.Sync()
+ w.Close()
+ entryN++
+
+ msg, err := runGofmt(testFilePath)
+ if err != nil {
+ fmt.Printf("Failed running gofmt on %s, with (%s):(%s)\n", testFilePath, msg, err)
+ os.Exit(-1)
+ }
+
+ msg, err = runGoImports(testFilePath)
+ if err != nil {
+ fmt.Printf("Failed running gofmt on %s, with (%s):(%s)\n", testFilePath, msg, err)
+ os.Exit(-1)
+ }
+
+ msg, err = runGoBuild(testFilePath)
+ if err != nil {
+ fmt.Printf("Failed running gobuild on %s, with (%s):(%s)\n", testFilePath, msg, err)
+ fmt.Printf("Code with possible issue in %s:\n%s", docPath, p.Text)
+ fmt.Printf("To test `go build %s`\n", testFilePath)
+ os.Exit(-1)
+ }
+
+ // Once successfully built remove the test file
+ os.Remove(testFilePath)
+ return
+ })
+ }
+
+ w := wow.New(os.Stdout, spin.Get(spin.Moon), fmt.Sprintf(" Running validation tests in %s", tmpDir))
+
+ w.Start()
+ // Render markdown executes our checker on each code blocks.
+ _ = m.Render()
+ w.PersistWith(spin.Get(spin.Runner), " Successfully finished tests")
+ w.Stop()
+
+ return nil
+}
+
+func main() {
+ app := cli.NewApp()
+ app.Action = validatorAction
+ app.HideVersion = true
+ app.HideHelpCommand = true
+ app.Usage = "Validates code block sections inside API.md"
+ app.Author = "Minio.io"
+ app.Flags = globalFlags
+ // Help template for validator
+ app.CustomAppHelpTemplate = `NAME:
+ {{.Name}} - {{.Usage}}
+
+USAGE:
+ {{.Name}} {{if .VisibleFlags}}[FLAGS] {{end}}COMMAND{{if .VisibleFlags}} [COMMAND FLAGS | -h]{{end}} [ARGUMENTS...]
+
+COMMANDS:
+ {{range .VisibleCommands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}}
+ {{end}}{{if .VisibleFlags}}
+FLAGS:
+ {{range .VisibleFlags}}{{.}}
+ {{end}}{{end}}
+TEMPLATE:
+ Validator uses Go's 'text/template' formatting so you need to ensure
+ your template is formatted correctly, check 'docs/checker.go.template'
+
+USAGE:
+ go run docs/validator.go -m docs/API.md -t /tmp/mycode.go.template
+
+`
+ app.Run(os.Args)
+
+}
diff --git a/vendor/github.com/minio/minio-go/docs/zh_CN/API.md b/vendor/github.com/minio/minio-go/docs/zh_CN/API.md
new file mode 100644
index 000000000..d20ca102a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/docs/zh_CN/API.md
@@ -0,0 +1,1820 @@
+# Minio Go Client API文档 [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io)
+
+## 初使化Minio Client对象。
+
+## Minio
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // 使用ssl
+ ssl := true
+
+ // 初使化minio client对象。
+ minioClient, err := minio.New("play.minio.io:9000", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ssl)
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+}
+```
+
+## AWS S3
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // 使用ssl
+ ssl := true
+
+ // 初使化minio client对象。
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ssl)
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+}
+```
+
+| 操作存储桶 | 操作对象 | 操作加密对象 | Presigned操作 | 存储桶策略/通知 | 客户端自定义设置 |
+| :--- | :--- | :--- | :--- | :--- | :--- |
+| [`MakeBucket`](#MakeBucket) | [`GetObject`](#GetObject) | [`NewSymmetricKey`](#NewSymmetricKey) | [`PresignedGetObject`](#PresignedGetObject) | [`SetBucketPolicy`](#SetBucketPolicy) | [`SetAppInfo`](#SetAppInfo) |
+| [`ListBuckets`](#ListBuckets) | [`PutObject`](#PutObject) | [`NewAsymmetricKey`](#NewAsymmetricKey) | [`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) | [`SetCustomTransport`](#SetCustomTransport) |
+| [`BucketExists`](#BucketExists) | [`CopyObject`](#CopyObject) | [`GetEncryptedObject`](#GetEncryptedObject) | [`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) | [`TraceOn`](#TraceOn) |
+| [`RemoveBucket`](#RemoveBucket) | [`StatObject`](#StatObject) | [`PutEncryptedObject`](#PutEncryptedObject) | | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOff`](#TraceOff) |
+| [`ListObjects`](#ListObjects) | [`RemoveObject`](#RemoveObject) | [`NewSSEInfo`](#NewSSEInfo) | | [`GetBucketNotification`](#GetBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) |
+| [`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | [`FPutEncryptedObject`](#FPutEncryptedObject) | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) | |
+| [`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | | [`ListenBucketNotification`](#ListenBucketNotification) | |
+| | [`FPutObject`](#FPutObject) | | | | |
+| | [`FGetObject`](#FGetObject) | | | | |
+| | [`ComposeObject`](#ComposeObject) | | | | |
+| | [`NewSourceInfo`](#NewSourceInfo) | | | | |
+| | [`NewDestinationInfo`](#NewDestinationInfo) | | | | |
+| | [`PutObjectWithContext`](#PutObjectWithContext) | | | |
+| | [`GetObjectWithContext`](#GetObjectWithContext) | | | |
+| | [`FPutObjectWithContext`](#FPutObjectWithContext) | | | |
+| | [`FGetObjectWithContext`](#FGetObjectWithContext) | | | |
+## 1. 构造函数
+<a name="Minio"></a>
+
+### New(endpoint, accessKeyID, secretAccessKey string, ssl bool) (*Client, error)
+初使化一个新的client对象。
+
+__参数__
+
+|参数 | 类型 |描述 |
+|:---|:---| :---|
+|`endpoint` | _string_ |S3兼容对象存储服务endpoint |
+|`accessKeyID` |_string_ |对象存储的Access key |
+|`secretAccessKey` | _string_ |对象存储的Secret key |
+|`ssl` | _bool_ |true代表使用HTTPS |
+
+### NewWithRegion(endpoint, accessKeyID, secretAccessKey string, ssl bool, region string) (*Client, error)
+初使化minio client,带有region配置。和New()不同的是,NewWithRegion避免了bucket-location操作,所以会快那么一丢丢。如果你的应用只使用一个region的话可以用这个方法。
+
+__参数__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`endpoint` | _string_ |S3兼容对象存储服务endpoint |
+|`accessKeyID` |_string_ |对象存储的Access key |
+|`secretAccessKey` | _string_ |对象存储的Secret key |
+|`ssl` | _bool_ |true代表使用HTTPS |
+|`region`| _string_ | 对象存储的region |
+
+## 2. 操作存储桶
+
+<a name="MakeBucket"></a>
+### MakeBucket(bucketName, location string) error
+创建一个存储桶。
+
+__参数__
+
+| 参数 | 类型 | 描述 |
+|---|---|---|
+|`bucketName` | _string_ | 存储桶名称 |
+| `location` | _string_ | 存储桶被创建的region(地区),默认是us-east-1(美国东一区),下面列举的是其它合法的值。注意:如果用的是minio服务的话,resion是在它的配置文件中,(默认是us-east-1)。|
+| | |us-east-1 |
+| | |us-west-1 |
+| | |us-west-2 |
+| | |eu-west-1 |
+| | | eu-central-1|
+| | | ap-southeast-1|
+| | | ap-northeast-1|
+| | | ap-southeast-2|
+| | | sa-east-1|
+
+
+__示例__
+
+
+```go
+err = minioClient.MakeBucket("mybucket", "us-east-1")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully created mybucket.")
+```
+
+<a name="ListBuckets"></a>
+### ListBuckets() ([]BucketInfo, error)
+列出所有的存储桶。
+
+| 参数 | 类型 | 描述 |
+|---|---|---|
+|`bucketList` | _[]minio.BucketInfo_ | 所有存储桶的list。 |
+
+
+__minio.BucketInfo__
+
+| 参数 | 类型 | 描述 |
+|---|---|---|
+|`bucket.Name` | _string_ | 存储桶名称 |
+|`bucket.CreationDate` | _time.Time_ | 存储桶的创建时间 |
+
+
+__示例__
+
+
+```go
+buckets, err := minioClient.ListBuckets()
+if err != nil {
+ fmt.Println(err)
+ return
+}
+for _, bucket := range buckets {
+ fmt.Println(bucket)
+}
+```
+
+<a name="BucketExists"></a>
+### BucketExists(bucketName string) (found bool, err error)
+检查存储桶是否存在。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+
+
+__返回值__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`found` | _bool_ | 存储桶是否存在 |
+|`err` | _error_ | 标准Error |
+
+
+__示例__
+
+
+```go
+found, err := minioClient.BucketExists("mybucket")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+if found {
+ fmt.Println("Bucket found")
+}
+```
+
+<a name="RemoveBucket"></a>
+### RemoveBucket(bucketName string) error
+删除一个存储桶,存储桶必须为空才能被成功删除。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+
+__示例__
+
+
+```go
+err = minioClient.RemoveBucket("mybucket")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="ListObjects"></a>
+### ListObjects(bucketName, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
+列举存储桶里的对象。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectPrefix` |_string_ | 要列举的对象前缀 |
+|`recursive` | _bool_ |`true`代表递归查找,`false`代表类似文件夹查找,以'/'分隔,不查子文件夹。 |
+|`doneCh` | _chan struct{}_ | 在该channel上结束ListObjects iterator的一个message。 |
+
+
+__返回值__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`objectInfo` | _chan minio.ObjectInfo_ |存储桶中所有对象的read channel,对象的格式如下: |
+
+__minio.ObjectInfo__
+
+|属性 |类型 |描述 |
+|:---|:---| :---|
+|`objectInfo.Key` | _string_ |对象的名称 |
+|`objectInfo.Size` | _int64_ |对象的大小 |
+|`objectInfo.ETag` | _string_ |对象的MD5校验码 |
+|`objectInfo.LastModified` | _time.Time_ |对象的最后修改时间 |
+
+
+```go
+// Create a done channel to control 'ListObjects' go routine.
+doneCh := make(chan struct{})
+
+// Indicate to our routine to exit cleanly upon return.
+defer close(doneCh)
+
+isRecursive := true
+objectCh := minioClient.ListObjects("mybucket", "myprefix", isRecursive, doneCh)
+for object := range objectCh {
+ if object.Err != nil {
+ fmt.Println(object.Err)
+ return
+ }
+ fmt.Println(object)
+}
+```
+
+
+<a name="ListObjectsV2"></a>
+### ListObjectsV2(bucketName, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
+使用listing API v2版本列举存储桶中的对象。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+| `objectPrefix` |_string_ | 要列举的对象前缀 |
+| `recursive` | _bool_ |`true`代表递归查找,`false`代表类似文件夹查找,以'/'分隔,不查子文件夹。 |
+|`doneCh` | _chan struct{}_ | 在该channel上结束ListObjects iterator的一个message。 |
+
+
+__返回值__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`objectInfo` | _chan minio.ObjectInfo_ |存储桶中所有对象的read channel |
+
+
+```go
+// Create a done channel to control 'ListObjectsV2' go routine.
+doneCh := make(chan struct{})
+
+// Indicate to our routine to exit cleanly upon return.
+defer close(doneCh)
+
+isRecursive := true
+objectCh := minioClient.ListObjectsV2("mybucket", "myprefix", isRecursive, doneCh)
+for object := range objectCh {
+ if object.Err != nil {
+ fmt.Println(object.Err)
+ return
+ }
+ fmt.Println(object)
+}
+```
+
+<a name="ListIncompleteUploads"></a>
+### ListIncompleteUploads(bucketName, prefix string, recursive bool, doneCh chan struct{}) <- chan ObjectMultipartInfo
+列举存储桶中未完整上传的对象。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+| `prefix` |_string_ | 不完整上传的对象的前缀 |
+| `recursive` | _bool_ |`true`代表递归查找,`false`代表类似文件夹查找,以'/'分隔,不查子文件夹。 |
+|`doneCh` | _chan struct{}_ | 在该channel上结束ListIncompleteUploads iterator的一个message。 |
+
+
+__返回值__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`multiPartInfo` | _chan minio.ObjectMultipartInfo_ |multipart对象格式如下: |
+
+__minio.ObjectMultipartInfo__
+
+|属性 |类型 |描述 |
+|:---|:---| :---|
+|`multiPartObjInfo.Key` | _string_ |未完整上传的对象的名称 |
+|`multiPartObjInfo.UploadID` | _string_ |未完整上传的对象的Upload ID |
+|`multiPartObjInfo.Size` | _int64_ |未完整上传的对象的大小 |
+
+__示例__
+
+
+```go
+// Create a done channel to control 'ListObjects' go routine.
+doneCh := make(chan struct{})
+
+// Indicate to our routine to exit cleanly upon return.
+defer close(doneCh)
+
+isRecursive := true // Recursively list everything at 'myprefix'
+multiPartObjectCh := minioClient.ListIncompleteUploads("mybucket", "myprefix", isRecursive, doneCh)
+for multiPartObject := range multiPartObjectCh {
+ if multiPartObject.Err != nil {
+ fmt.Println(multiPartObject.Err)
+ return
+ }
+ fmt.Println(multiPartObject)
+}
+```
+
+## 3. 操作对象
+
+<a name="GetObject"></a>
+### GetObject(bucketName, objectName string, opts GetObjectOptions) (*Object, error)
+返回对象数据的流,error是读流时经常抛的那些错。
+
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`opts` | _minio.GetObjectOptions_ | GET请求的一些额外参数,像encryption,If-Match |
+
+
+__minio.GetObjectOptions__
+
+|参数 | 类型 | 描述 |
+|:---|:---|:---|
+| `opts.Materials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go) |
+
+__返回值__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`object` | _*minio.Object_ |_minio.Object_代表了一个object reader。它实现了io.Reader, io.Seeker, io.ReaderAt and io.Closer接口。 |
+
+
+__示例__
+
+
+```go
+object, err := minioClient.GetObject("mybucket", "myobject", minio.GetObjectOptions{})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+localFile, err := os.Create("/tmp/local-file.jpg")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+if _, err = io.Copy(localFile, object); err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="FGetObject"></a>
+### FGetObject(bucketName, objectName, filePath string, opts GetObjectOptions) error
+下载并将文件保存到本地文件系统。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`filePath` | _string_ |下载后保存的路径 |
+|`opts` | _minio.GetObjectOptions_ | GET请求的一些额外参数,像encryption,If-Match |
+
+
+__示例__
+
+
+```go
+err = minioClient.FGetObject("mybucket", "myobject", "/tmp/myobject", minio.GetObjectOptions{})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+<a name="GetObjectWithContext"></a>
+### GetObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error)
+和GetObject操作是一样的,不过传入了取消请求的context。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`ctx` | _context.Context_ |请求上下文(Request context) |
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`opts` | _minio.GetObjectOptions_ | GET请求的一些额外参数,像encryption,If-Match |
+
+
+__返回值__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`object` | _*minio.Object_ |_minio.Object_代表了一个object reader。它实现了io.Reader, io.Seeker, io.ReaderAt and io.Closer接口。 |
+
+
+__示例__
+
+
+```go
+ctx, cancel := context.WithTimeout(context.Background(), 100 * time.Second)
+defer cancel()
+
+object, err := minioClient.GetObjectWithContext(ctx, "mybucket", "myobject", minio.GetObjectOptions{})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+localFile, err := os.Create("/tmp/local-file.jpg")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+if _, err = io.Copy(localFile, object); err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="FGetObjectWithContext"></a>
+### FGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error
+和FGetObject操作是一样的,不过允许取消请求。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`ctx` | _context.Context_ |请求上下文 |
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`filePath` | _string_ |下载后保存的路径 |
+|`opts` | _minio.GetObjectOptions_ | GET请求的一些额外参数,像encryption,If-Match |
+
+
+__示例__
+
+
+```go
+ctx, cancel := context.WithTimeout(context.Background(), 100 * time.Second)
+defer cancel()
+
+err = minioClient.FGetObjectWithContext(ctx, "mybucket", "myobject", "/tmp/myobject", minio.GetObjectOptions{})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="FGetEncryptedObject"></a>
+### FGetEncryptedObject(bucketName, objectName, filePath string, materials encrypt.Materials) error
+和FGetObject操作是一样的,不过会对加密请求进行解密。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`filePath` | _string_ |下载后保存的路径|
+|`materials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go) |
+
+
+__示例__
+
+
+```go
+// Generate a master symmetric key
+key := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
+
+// Build the CBC encryption material
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(key)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+err = minioClient.FGetEncryptedObject("mybucket", "myobject", "/tmp/myobject", cbcMaterials)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="PutObject"></a>
+### PutObject(bucketName, objectName string, reader io.Reader, objectSize int64,opts PutObjectOptions) (n int, err error)
+当对象小于64MiB时,直接在一次PUT请求里进行上传。当大于64MiB时,根据文件的实际大小,PutObject会自动地将对象进行拆分成64MiB一块或更大一些进行上传。对象的最大大小是5TB。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`reader` | _io.Reader_ |任意实现了io.Reader的GO类型 |
+|`objectSize`| _int64_ |上传的对象的大小,-1代表未知。 |
+|`opts` | _minio.PutObjectOptions_ | 允许用户设置可选的自定义元数据,内容标题,加密密钥和用于分段上传操作的线程数量。 |
+
+__minio.PutObjectOptions__
+
+|属性 | 类型 | 描述 |
+|:--- |:--- | :--- |
+| `opts.UserMetadata` | _map[string]string_ | 用户元数据的Map|
+| `opts.Progress` | _io.Reader_ | 获取上传进度的Reader |
+| `opts.ContentType` | _string_ | 对象的Content type, 例如"application/text" |
+| `opts.ContentEncoding` | _string_ | 对象的Content encoding,例如"gzip" |
+| `opts.ContentDisposition` | _string_ | 对象的Content disposition, "inline" |
+| `opts.CacheControl` | _string_ | 指定针对请求和响应的缓存机制,例如"max-age=600"|
+| `opts.EncryptMaterials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go) |
+
+
+__示例__
+
+
+```go
+file, err := os.Open("my-testfile")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+defer file.Close()
+
+fileStat, err := file.Stat()
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+n, err := minioClient.PutObject("mybucket", "myobject", file, fileStat.Size(), minio.PutObjectOptions{ContentType:"application/octet-stream"})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully uploaded bytes: ", n)
+```
+
+API方法在minio-go SDK版本v3.0.3中提供的PutObjectWithSize,PutObjectWithMetadata,PutObjectStreaming和PutObjectWithProgress被替换为接受指向PutObjectOptions struct的指针的新的PutObject调用变体。
+
+<a name="PutObjectWithContext"></a>
+### PutObjectWithContext(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, opts PutObjectOptions) (n int, err error)
+和PutObject是一样的,不过允许取消请求。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`ctx` | _context.Context_ |请求上下文 |
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`reader` | _io.Reader_ |任何实现io.Reader的Go类型 |
+|`objectSize`| _int64_ | 上传的对象的大小,-1代表未知 |
+|`opts` | _minio.PutObjectOptions_ |允许用户设置可选的自定义元数据,content-type,content-encoding,content-disposition以及cache-control headers,传递加密模块以加密对象,并可选地设置multipart put操作的线程数量。|
+
+
+__示例__
+
+
+```go
+ctx, cancel := context.WithTimeout(context.Background(), 10 * time.Second)
+defer cancel()
+
+file, err := os.Open("my-testfile")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+defer file.Close()
+
+fileStat, err := file.Stat()
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+n, err := minioClient.PutObjectWithContext(ctx, "my-bucketname", "my-objectname", file, fileStat.Size(), minio.PutObjectOptions{
+ ContentType: "application/octet-stream",
+})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully uploaded bytes: ", n)
+```
+
+<a name="CopyObject"></a>
+### CopyObject(dst DestinationInfo, src SourceInfo) error
+通过在服务端对已存在的对象进行拷贝,实现新建或者替换对象。它支持有条件的拷贝,拷贝对象的一部分,以及在服务端的加解密。请查看`SourceInfo`和`DestinationInfo`两个类型来了解更多细节。
+
+拷贝多个源文件到一个目标对象,请查看`ComposeObject` API。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`dst` | _minio.DestinationInfo_ |目标对象 |
+|`src` | _minio.SourceInfo_ |源对象 |
+
+
+__示例__
+
+
+```go
+// Use-case 1: Simple copy object with no conditions.
+// Source object
+src := minio.NewSourceInfo("my-sourcebucketname", "my-sourceobjectname", nil)
+
+// Destination object
+dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Copy object call
+err = minioClient.CopyObject(dst, src)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+```go
+// Use-case 2:
+// Copy object with copy-conditions, and copying only part of the source object.
+// 1. that matches a given ETag
+// 2. and modified after 1st April 2014
+// 3. but unmodified since 23rd April 2014
+// 4. copy only first 1MiB of object.
+
+// Source object
+src := minio.NewSourceInfo("my-sourcebucketname", "my-sourceobjectname", nil)
+
+// Set matching ETag condition, copy object which matches the following ETag.
+src.SetMatchETagCond("31624deb84149d2f8ef9c385918b653a")
+
+// Set modified condition, copy object modified since 2014 April 1.
+src.SetModifiedSinceCond(time.Date(2014, time.April, 1, 0, 0, 0, 0, time.UTC))
+
+// Set unmodified condition, copy object unmodified since 2014 April 23.
+src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 23, 0, 0, 0, 0, time.UTC))
+
+// Set copy-range of only first 1MiB of file.
+src.SetRange(0, 1024*1024-1)
+
+// Destination object
+dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Copy object call
+err = minioClient.CopyObject(dst, src)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="ComposeObject"></a>
+### ComposeObject(dst minio.DestinationInfo, srcs []minio.SourceInfo) error
+通过使用服务端拷贝实现钭多个源对象合并创建成一个新的对象。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---|:---|
+|`dst` | _minio.DestinationInfo_ |要被创建的目标对象 |
+|`srcs` | _[]minio.SourceInfo_ |要合并的多个源对象 |
+
+
+__示例__
+
+
+```go
+// Prepare source decryption key (here we assume same key to
+// decrypt all source objects.)
+decKey := minio.NewSSEInfo([]byte{1, 2, 3}, "")
+
+// Source objects to concatenate. We also specify decryption
+// key for each
+src1 := minio.NewSourceInfo("bucket1", "object1", &decKey)
+src1.SetMatchETagCond("31624deb84149d2f8ef9c385918b653a")
+
+src2 := minio.NewSourceInfo("bucket2", "object2", &decKey)
+src2.SetMatchETagCond("f8ef9c385918b653a31624deb84149d2")
+
+src3 := minio.NewSourceInfo("bucket3", "object3", &decKey)
+src3.SetMatchETagCond("5918b653a31624deb84149d2f8ef9c38")
+
+// Create slice of sources.
+srcs := []minio.SourceInfo{src1, src2, src3}
+
+// Prepare destination encryption key
+encKey := minio.NewSSEInfo([]byte{8, 9, 0}, "")
+
+// Create destination info
+dst, err := minio.NewDestinationInfo("bucket", "object", &encKey, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Compose object call by concatenating multiple source files.
+err = minioClient.ComposeObject(dst, srcs)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+fmt.Println("Composed object successfully.")
+```
+
+<a name="NewSourceInfo"></a>
+### NewSourceInfo(bucket, object string, decryptSSEC *SSEInfo) SourceInfo
+构建一个可用于服务端拷贝操作(像`CopyObject`和`ComposeObject`)的`SourceInfo`对象。该对象可用于给源对象设置拷贝条件。
+
+__参数__
+
+| 参数 | 类型 | 描述 |
+| :--- | :--- | :--- |
+| `bucket` | _string_ | 源存储桶 |
+| `object` | _string_ | 源对象 |
+| `decryptSSEC` | _*minio.SSEInfo_ | 源对象的解密信息 (`nil`代表不用解密) |
+
+__示例__
+
+```go
+// No decryption parameter.
+src := minio.NewSourceInfo("bucket", "object", nil)
+
+// Destination object
+dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Copy object call
+err = minioClient.CopyObject(dst, src)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+```go
+// With decryption parameter.
+decKey := minio.NewSSEInfo([]byte{1,2,3}, "")
+src := minio.NewSourceInfo("bucket", "object", &decKey)
+
+// Destination object
+dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Copy object call
+err = minioClient.CopyObject(dst, src)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="NewDestinationInfo"></a>
+### NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo, userMeta map[string]string) (DestinationInfo, error)
+构建一个用于服务端拷贝操作(像`CopyObject`和`ComposeObject`)的用作目标对象的`DestinationInfo`。
+
+__参数__
+
+| 参数 | 类型 | 描述 |
+| :--- | :--- | :--- |
+| `bucket` | _string_ | 目标存储桶名称 |
+| `object` | _string_ | 目标对象名称 |
+| `encryptSSEC` | _*minio.SSEInfo_ | 源对象的加密信息 (`nil`代表不用加密) |
+| `userMeta` | _map[string]string_ | 给目标对象的用户元数据,如果是nil,并只有一个源对象,则将源对象的用户元数据拷贝给目标对象。|
+
+__示例__
+
+```go
+// No encryption parameter.
+src := minio.NewSourceInfo("bucket", "object", nil)
+dst, err := minio.NewDestinationInfo("bucket", "object", nil, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Copy object call
+err = minioClient.CopyObject(dst, src)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+```go
+src := minio.NewSourceInfo("bucket", "object", nil)
+
+// With encryption parameter.
+encKey := minio.NewSSEInfo([]byte{1,2,3}, "")
+dst, err := minio.NewDestinationInfo("bucket", "object", &encKey, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Copy object call
+err = minioClient.CopyObject(dst, src)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="FPutObject"></a>
+### FPutObject(bucketName, objectName, filePath, opts PutObjectOptions) (length int64, err error)
+将filePath对应的文件内容上传到一个对象中。
+
+当对象小于64MiB时,FPutObject直接在一次PUT请求里进行上传。当大于64MiB时,根据文件的实际大小,FPutObject会自动地将对象进行拆分成64MiB一块或更大一些进行上传。对象的最大大小是5TB。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`filePath` | _string_ |要上传的文件的路径 |
+|`opts` | _minio.PutObjectOptions_ |允许用户设置可选的自定义元数据,content-type,content-encoding,content-disposition以及cache-control headers,传递加密模块以加密对象,并可选地设置multipart put操作的线程数量。 |
+
+
+__示例__
+
+
+```go
+n, err := minioClient.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", minio.PutObjectOptions{
+ ContentType: "application/csv",
+});
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully uploaded bytes: ", n)
+```
+
+<a name="FPutObjectWithContext"></a>
+### FPutObjectWithContext(ctx context.Context, bucketName, objectName, filePath, opts PutObjectOptions) (length int64, err error)
+和FPutObject操作是一样的,不过允许取消请求。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`ctx` | _context.Context_ |请求上下文 |
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`filePath` | _string_ |要上传的文件的路径 |
+|`opts` | _minio.PutObjectOptions_ |允许用户设置可选的自定义元数据,content-type,content-encoding,content-disposition以及cache-control headers,传递加密模块以加密对象,并可选地设置multipart put操作的线程数量。 |
+
+__示例__
+
+
+```go
+ctx, cancel := context.WithTimeout(context.Background(), 100 * time.Second)
+defer cancel()
+
+n, err := minioClient.FPutObjectWithContext(ctx, "mybucket", "myobject.csv", "/tmp/otherobject.csv", minio.PutObjectOptions{ContentType:"application/csv"})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully uploaded bytes: ", n)
+```
+
+<a name="StatObject"></a>
+### StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error)
+获取对象的元数据。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`opts` | _minio.StatObjectOptions_ | GET info/stat请求的一些额外参数,像encryption,If-Match |
+
+
+__返回值__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`objInfo` | _minio.ObjectInfo_ |对象stat信息 |
+
+
+__minio.ObjectInfo__
+
+|属性 |类型 |描述 |
+|:---|:---| :---|
+|`objInfo.LastModified` | _time.Time_ |对象的最后修改时间 |
+|`objInfo.ETag` | _string_ |对象的MD5校验码|
+|`objInfo.ContentType` | _string_ |对象的Content type|
+|`objInfo.Size` | _int64_ |对象的大小|
+
+
+__示例__
+
+
+```go
+objInfo, err := minioClient.StatObject("mybucket", "myobject", minio.StatObjectOptions{})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println(objInfo)
+```
+
+<a name="RemoveObject"></a>
+### RemoveObject(bucketName, objectName string) error
+删除一个对象。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+
+
+```go
+err = minioClient.RemoveObject("mybucket", "myobject")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="RemoveObjects"></a>
+### RemoveObjects(bucketName string, objectsCh chan string) (errorCh <-chan RemoveObjectError)
+
+从一个input channel里删除一个对象集合。一次发送到服务端的删除请求最多可删除1000个对象。通过error channel返回的错误信息。
+
+__参数__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectsCh` | _chan string_ | 要删除的对象的channel |
+
+
+__返回值__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`errorCh` | _<-chan minio.RemoveObjectError_ | 删除时观察到的错误的Receive-only channel。 |
+
+
+```go
+objectsCh := make(chan string)
+
+// Send object names that are needed to be removed to objectsCh
+go func() {
+ defer close(objectsCh)
+ // List all objects from a bucket-name with a matching prefix.
+ for object := range minioClient.ListObjects("my-bucketname", "my-prefixname", true, nil) {
+ if object.Err != nil {
+ log.Fatalln(object.Err)
+ }
+ objectsCh <- object.Key
+ }
+}()
+
+for rErr := range minioClient.RemoveObjects("mybucket", objectsCh) {
+ fmt.Println("Error detected during deletion: ", rErr)
+}
+```
+
+<a name="RemoveIncompleteUpload"></a>
+### RemoveIncompleteUpload(bucketName, objectName string) error
+删除一个未完整上传的对象。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+
+__示例__
+
+
+```go
+err = minioClient.RemoveIncompleteUpload("mybucket", "myobject")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+## 4. 操作加密对象
+
+<a name="NewSymmetricKey"></a>
+### NewSymmetricKey(key []byte) *encrypt.SymmetricKey
+
+__参数__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`key` | _string_ |存储桶名称 |
+
+
+__返回值__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`symmetricKey` | _*encrypt.SymmetricKey_ | 加密解密的对称秘钥 |
+
+```go
+symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
+
+// Build the CBC encryption material with symmetric key.
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(symKey)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully initialized Symmetric key CBC materials", cbcMaterials)
+
+object, err := minioClient.GetEncryptedObject("mybucket", "myobject", cbcMaterials)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+defer object.Close()
+```
+
+<a name="NewAsymmetricKey"></a>
+### NewAsymmetricKey(privateKey []byte, publicKey[]byte) (*encrypt.AsymmetricKey, error)
+
+__参数__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`privateKey` | _[]byte_ | Private key数据 |
+|`publicKey` | _[]byte_ | Public key数据 |
+
+
+__返回值__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`asymmetricKey` | _*encrypt.AsymmetricKey_ | 加密解密的非对称秘钥 |
+|`err` | _error_ | 标准Error |
+
+
+```go
+privateKey, err := ioutil.ReadFile("private.key")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+publicKey, err := ioutil.ReadFile("public.key")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Initialize the asymmetric key
+asymmetricKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Build the CBC encryption material for asymmetric key.
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(asymmetricKey)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully initialized Asymmetric key CBC materials", cbcMaterials)
+
+object, err := minioClient.GetEncryptedObject("mybucket", "myobject", cbcMaterials)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+defer object.Close()
+```
+
+<a name="GetEncryptedObject"></a>
+### GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.ReadCloser, error)
+
+返回对象的解密流。读流时的常见错误。
+
+__参数__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ | 存储桶名称 |
+|`objectName` | _string_ | 对象的名称 |
+|`encryptMaterials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go) |
+
+
+__返回值__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`stream` | _io.ReadCloser_ | 返回对象的reader,调用者需要在读取之后进行关闭。 |
+|`err` | _error | 错误信息 |
+
+
+__示例__
+
+
+```go
+// Generate a master symmetric key
+key := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
+
+// Build the CBC encryption material
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(key)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+object, err := minioClient.GetEncryptedObject("mybucket", "myobject", cbcMaterials)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+defer object.Close()
+
+localFile, err := os.Create("/tmp/local-file.jpg")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+defer localFile.Close()
+
+if _, err = io.Copy(localFile, object); err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="PutEncryptedObject"></a>
+
+### PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials) (n int, err error)
+加密并上传对象。
+
+__参数__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`reader` | _io.Reader_ |任何实现io.Reader的Go类型 |
+|`encryptMaterials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go) |
+
+__示例__
+
+```go
+// Load a private key
+privateKey, err := ioutil.ReadFile("private.key")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Load a public key
+publicKey, err := ioutil.ReadFile("public.key")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Build an asymmetric key
+key, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Build the CBC encryption module
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(key)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Open a file to upload
+file, err := os.Open("my-testfile")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+defer file.Close()
+
+// Upload the encrypted form of the file
+n, err := minioClient.PutEncryptedObject("mybucket", "myobject", file, cbcMaterials)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully uploaded encrypted bytes: ", n)
+```
+
+<a name="FPutEncryptedObject"></a>
+### FPutEncryptedObject(bucketName, objectName, filePath, encryptMaterials encrypt.Materials) (n int, err error)
+通过一个文件进行加密并上传到对象。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`filePath` | _string_ |要上传的文件的路径 |
+|`encryptMaterials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go) |
+
+__示例__
+
+
+```go
+// Load a private key
+privateKey, err := ioutil.ReadFile("private.key")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Load a public key
+publicKey, err := ioutil.ReadFile("public.key")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Build an asymmetric key
+key, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Build the CBC encryption module
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(key)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+n, err := minioClient.FPutEncryptedObject("mybucket", "myobject.csv", "/tmp/otherobject.csv", cbcMaterials)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully uploaded encrypted bytes: ", n)
+```
+
+<a name="NewSSEInfo"></a>
+
+### NewSSEInfo(key []byte, algo string) SSEInfo
+创建一个通过用户提供的key(SSE-C),进行服务端加解密操作的key对象。
+
+__参数__
+
+| 参数 | 类型 | 描述 |
+| :--- | :--- | :--- |
+| `key` | _[]byte_ | 未编码的二进制key数组 |
+| `algo` | _string_ | 加密算法,可以为空(默认是`AES256`) |
+
+
+## 5. Presigned操作
+
+<a name="PresignedGetObject"></a>
+### PresignedGetObject(bucketName, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error)
+生成一个用于HTTP GET操作的presigned URL。浏览器/移动客户端可以在即使存储桶为私有的情况下也可以通过这个URL进行下载。这个presigned URL可以有一个过期时间,默认是7天。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`expiry` | _time.Duration_ |presigned URL的过期时间,单位是秒 |
+|`reqParams` | _url.Values_ |额外的响应头,支持_response-expires_, _response-content-type_, _response-cache-control_, _response-content-disposition_。 |
+
+
+__示例__
+
+
+```go
+// Set request parameters for content-disposition.
+reqParams := make(url.Values)
+reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
+
+// Generates a presigned url which expires in a day.
+presignedURL, err := minioClient.PresignedGetObject("mybucket", "myobject", time.Second * 24 * 60 * 60, reqParams)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully generated presigned URL", presignedURL)
+```
+
+<a name="PresignedPutObject"></a>
+### PresignedPutObject(bucketName, objectName string, expiry time.Duration) (*url.URL, error)
+生成一个用于HTTP GET操作的presigned URL。浏览器/移动客户端可以在即使存储桶为私有的情况下也可以通过这个URL进行下载。这个presigned URL可以有一个过期时间,默认是7天。
+
+注意:你可以通过只指定对象名称上传到S3。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`expiry` | _time.Duration_ |presigned URL的过期时间,单位是秒 |
+
+
+__示例__
+
+
+```go
+// Generates a url which expires in a day.
+expiry := time.Second * 24 * 60 * 60 // 1 day.
+presignedURL, err := minioClient.PresignedPutObject("mybucket", "myobject", expiry)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully generated presigned URL", presignedURL)
+```
+
+<a name="PresignedHeadObject"></a>
+### PresignedHeadObject(bucketName, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error)
+生成一个用于HTTP GET操作的presigned URL。浏览器/移动客户端可以在即使存储桶为私有的情况下也可以通过这个URL进行下载。这个presigned URL可以有一个过期时间,默认是7天。
+
+__参数__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`expiry` | _time.Duration_ |presigned URL的过期时间,单位是秒 |
+|`reqParams` | _url.Values_ |额外的响应头,支持_response-expires_, _response-content-type_, _response-cache-control_, _response-content-disposition_。 |
+
+
+__示例__
+
+
+```go
+// Set request parameters for content-disposition.
+reqParams := make(url.Values)
+reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
+
+// Generates a presigned url which expires in a day.
+presignedURL, err := minioClient.PresignedHeadObject("mybucket", "myobject", time.Second * 24 * 60 * 60, reqParams)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully generated presigned URL", presignedURL)
+```
+
+<a name="PresignedPostPolicy"></a>
+### PresignedPostPolicy(PostPolicy) (*url.URL, map[string]string, error)
+允许给POST操作的presigned URL设置策略条件。这些策略包括比如,接收对象上传的存储桶名称,名称前缀,过期策略。
+
+```go
+// Initialize policy condition config.
+policy := minio.NewPostPolicy()
+
+// Apply upload policy restrictions:
+policy.SetBucket("mybucket")
+policy.SetKey("myobject")
+policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
+
+// Only allow 'png' images.
+policy.SetContentType("image/png")
+
+// Only allow content size in range 1KB to 1MB.
+policy.SetContentLengthRange(1024, 1024*1024)
+
+// Add a user metadata using the key "custom" and value "user"
+policy.SetUserMetadata("custom", "user")
+
+// Get the POST form key/value object:
+url, formData, err := minioClient.PresignedPostPolicy(policy)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// POST your content from the command line using `curl`
+fmt.Printf("curl ")
+for k, v := range formData {
+ fmt.Printf("-F %s=%s ", k, v)
+}
+fmt.Printf("-F file=@/etc/bash.bashrc ")
+fmt.Printf("%s\n", url)
+```
+
+## 6. 存储桶策略/通知
+
+<a name="SetBucketPolicy"></a>
+### SetBucketPolicy(bucketname, objectPrefix string, policy policy.BucketPolicy) error
+给存储桶或者对象前缀设置访问权限。
+
+必须引入`github.com/minio/minio-go/pkg/policy`包。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称|
+|`objectPrefix` | _string_ |对象的名称前缀|
+|`policy` | _policy.BucketPolicy_ |Policy的取值如下: |
+| | | _policy.BucketPolicyNone_ |
+| | | _policy.BucketPolicyReadOnly_ |
+| | | _policy.BucketPolicyReadWrite_ |
+| | | _policy.BucketPolicyWriteOnly_ |
+
+
+__返回值__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`err` | _error_ |标准Error |
+
+
+__示例__
+
+
+```go
+// Sets 'mybucket' with a sub-directory 'myprefix' to be anonymously accessible for
+// both read and write operations.
+err = minioClient.SetBucketPolicy("mybucket", "myprefix", policy.BucketPolicyReadWrite)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="GetBucketPolicy"></a>
+### GetBucketPolicy(bucketName, objectPrefix string) (policy.BucketPolicy, error)
+获取存储桶或者对象前缀的访问权限。
+
+必须引入`github.com/minio/minio-go/pkg/policy`包。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectPrefix` | _string_ |该存储桶下的对象前缀 |
+
+__返回值__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketPolicy` | _policy.BucketPolicy_ |取值如下: `none`, `readonly`, `readwrite`,或者`writeonly` |
+|`err` | _error_ |标准Error |
+
+__示例__
+
+
+```go
+bucketPolicy, err := minioClient.GetBucketPolicy("mybucket", "")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Access permissions for mybucket is", bucketPolicy)
+```
+
+<a name="ListBucketPolicies"></a>
+### ListBucketPolicies(bucketName, objectPrefix string) (map[string]BucketPolicy, error)
+获取指定的存储桶和前缀的访问策略。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectPrefix` | _string_ |该存储桶下的对象前缀 |
+
+__返回值__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketPolicies` | _map[string]minio.BucketPolicy_ |对象以及它们的权限的Map |
+|`err` | _error_ |标准Error |
+
+__示例__
+
+
+```go
+bucketPolicies, err := minioClient.ListBucketPolicies("mybucket", "")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+for resource, permission := range bucketPolicies {
+ fmt.Println(resource, " => ", permission)
+}
+```
+
+<a name="GetBucketNotification"></a>
+### GetBucketNotification(bucketName string) (BucketNotification, error)
+获取存储桶的通知配置
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+
+__返回值__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketNotification` | _minio.BucketNotification_ |含有所有通知配置的数据结构|
+|`err` | _error_ |标准Error |
+
+__示例__
+
+
+```go
+bucketNotification, err := minioClient.GetBucketNotification("mybucket")
+if err != nil {
+ fmt.Println("Failed to get bucket notification configurations for mybucket", err)
+ return
+}
+
+for _, queueConfig := range bucketNotification.QueueConfigs {
+ for _, e := range queueConfig.Events {
+ fmt.Println(e + " event is enabled")
+ }
+}
+```
+
+<a name="SetBucketNotification"></a>
+### SetBucketNotification(bucketName string, bucketNotification BucketNotification) error
+给存储桶设置新的通知
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`bucketNotification` | _minio.BucketNotification_ |发送给配置的web service的XML |
+
+__返回值__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`err` | _error_ |标准Error |
+
+__示例__
+
+
+```go
+queueArn := minio.NewArn("aws", "sqs", "us-east-1", "804605494417", "PhotoUpdate")
+
+queueConfig := minio.NewNotificationConfig(queueArn)
+queueConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
+queueConfig.AddFilterPrefix("photos/")
+queueConfig.AddFilterSuffix(".jpg")
+
+bucketNotification := minio.BucketNotification{}
+bucketNotification.AddQueue(queueConfig)
+
+err = minioClient.SetBucketNotification("mybucket", bucketNotification)
+if err != nil {
+ fmt.Println("Unable to set the bucket notification: ", err)
+ return
+}
+```
+
+<a name="RemoveAllBucketNotification"></a>
+### RemoveAllBucketNotification(bucketName string) error
+删除存储桶上所有配置的通知
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+
+__返回值__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`err` | _error_ |标准Error |
+
+__示例__
+
+
+```go
+err = minioClient.RemoveAllBucketNotification("mybucket")
+if err != nil {
+ fmt.Println("Unable to remove bucket notifications.", err)
+ return
+}
+```
+
+<a name="ListenBucketNotification"></a>
+### ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo
+ListenBucketNotification API通过notification channel接收存储桶通知事件。返回的notification channel有两个属性,'Records'和'Err'。
+
+- 'Records'持有从服务器返回的通知信息。
+- 'Err'表示的是处理接收到的通知时报的任何错误。
+
+注意:一旦报错,notification channel就会关闭。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ | 被监听通知的存储桶 |
+|`prefix` | _string_ | 过滤通知的对象前缀 |
+|`suffix` | _string_ | 过滤通知的对象后缀 |
+|`events` | _[]string_ | 开启指定事件类型的通知 |
+|`doneCh` | _chan struct{}_ | 在该channel上结束ListenBucketNotification iterator的一个message。 |
+
+__返回值__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`notificationInfo` | _chan minio.NotificationInfo_ | 存储桶通知的channel |
+
+__minio.NotificationInfo__
+
+|属性 |类型 |描述 |
+|`notificationInfo.Records` | _[]minio.NotificationEvent_ | 通知事件的集合 |
+|`notificationInfo.Err` | _error_ | 操作时报的任何错误(标准Error) |
+
+
+__示例__
+
+
+```go
+// Create a done channel to control 'ListenBucketNotification' go routine.
+doneCh := make(chan struct{})
+
+// Indicate a background go-routine to exit cleanly upon return.
+defer close(doneCh)
+
+// Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events.
+for notificationInfo := range minioClient.ListenBucketNotification("mybucket", "myprefix/", ".mysuffix", []string{
+ "s3:ObjectCreated:*",
+ "s3:ObjectAccessed:*",
+ "s3:ObjectRemoved:*",
+ }, doneCh) {
+ if notificationInfo.Err != nil {
+ fmt.Println(notificationInfo.Err)
+ }
+ fmt.Println(notificationInfo)
+}
+```
+
+## 7. 客户端自定义设置
+
+<a name="SetAppInfo"></a>
+### SetAppInfo(appName, appVersion string)
+给User-Agent添加的自定义应用信息。
+
+__参数__
+
+| 参数 | 类型 | 描述 |
+|---|---|---|
+|`appName` | _string_ | 发请求的应用名称 |
+| `appVersion`| _string_ | 发请求的应用版本 |
+
+
+__示例__
+
+
+```go
+// Set Application name and version to be used in subsequent API requests.
+minioClient.SetAppInfo("myCloudApp", "1.0.0")
+```
+
+<a name="SetCustomTransport"></a>
+### SetCustomTransport(customHTTPTransport http.RoundTripper)
+重写默认的HTTP transport,通常用于调试或者添加自定义的TLS证书。
+
+__参数__
+
+| 参数 | 类型 | 描述 |
+|---|---|---|
+|`customHTTPTransport` | _http.RoundTripper_ | 自定义的transport,例如:为了调试对API请求响应进行追踪。|
+
+
+<a name="TraceOn"></a>
+### TraceOn(outputStream io.Writer)
+开启HTTP tracing。追踪信息输出到io.Writer,如果outputstream为nil,则trace写入到os.Stdout标准输出。
+
+__参数__
+
+| 参数 | 类型 | 描述 |
+|---|---|---|
+|`outputStream` | _io.Writer_ | HTTP trace写入到outputStream |
+
+
+<a name="TraceOff"></a>
+### TraceOff()
+关闭HTTP tracing。
+
+<a name="SetS3TransferAccelerate"></a>
+### SetS3TransferAccelerate(acceleratedEndpoint string)
+给后续所有API请求设置ASW S3传输加速endpoint。
+注意:此API仅对AWS S3有效,对其它S3兼容的对象存储服务不生效。
+
+__参数__
+
+| 参数 | 类型 | 描述 |
+|---|---|---|
+|`acceleratedEndpoint` | _string_ | 设置新的S3传输加速endpoint。|
+
+
+## 8. 了解更多
+
+- [用Go语言创建属于你的音乐播放器APP示例](https://docs.minio.io/docs/go-music-player-app)
diff --git a/vendor/github.com/minio/minio-go/docs/zh_CN/CONTRIBUTING.md b/vendor/github.com/minio/minio-go/docs/zh_CN/CONTRIBUTING.md
new file mode 100644
index 000000000..bc408c611
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/docs/zh_CN/CONTRIBUTING.md
@@ -0,0 +1,22 @@
+
+### 开发者指南
+
+``minio-go``欢迎你的贡献。为了让大家配合更加默契,我们做出如下约定:
+
+* fork项目并修改,我们鼓励大家使用pull requests进行代码相关的讨论。
+ - Fork项目
+ - 创建你的特性分支 (git checkout -b my-new-feature)
+ - Commit你的修改(git commit -am 'Add some feature')
+ - Push到远程分支(git push origin my-new-feature)
+ - 创建一个Pull Request
+
+* 当你准备创建pull request时,请确保:
+ - 写单元测试,如果你有什么疑问,请在pull request中提出来。
+ - 运行`go fmt`
+ - 将你的多个提交合并成一个提交: `git rebase -i`。你可以强制update你的pull request。
+ - 确保`go test -race ./...`和`go build`完成。
+ 注意:go test会进行功能测试,这需要你有一个AWS S3账号。将账户信息设为``ACCESS_KEY``和``SECRET_KEY``环境变量。如果想运行简版测试,请使用``go test -short -race ./...``。
+
+* 请阅读 [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments)
+ - `minio-go`项目严格符合Golang风格
+ - 如果您看到代码有问题,请随时发一个pull request
diff --git a/vendor/github.com/minio/minio-go/examples/minio/listenbucketnotification.go b/vendor/github.com/minio/minio-go/examples/minio/listenbucketnotification.go
index 037e2251c..4c48510da 100644
--- a/vendor/github.com/minio/minio-go/examples/minio/listenbucketnotification.go
+++ b/vendor/github.com/minio/minio-go/examples/minio/listenbucketnotification.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/bucketexists.go b/vendor/github.com/minio/minio-go/examples/s3/bucketexists.go
index 945510db8..20dea30a3 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/bucketexists.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/bucketexists.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/composeobject.go b/vendor/github.com/minio/minio-go/examples/s3/composeobject.go
index 8aec6c158..2f76ff053 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/composeobject.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/composeobject.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/copyobject.go b/vendor/github.com/minio/minio-go/examples/s3/copyobject.go
index c1d92d73a..a7c3eca45 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/copyobject.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/copyobject.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/fgetobject-context.go b/vendor/github.com/minio/minio-go/examples/s3/fgetobject-context.go
new file mode 100644
index 000000000..6004baa14
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/fgetobject-context.go
@@ -0,0 +1,54 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+ "time"
+
+ "context"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname
+ // and my-filename.csv are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
+ defer cancel()
+
+ if err := s3Client.FGetObjectWithContext(ctx, "my-bucketname", "my-objectname", "my-filename.csv", minio.GetObjectOptions{}); err != nil {
+ log.Fatalln(err)
+ }
+ log.Println("Successfully saved my-filename.csv")
+
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/fgetobject.go b/vendor/github.com/minio/minio-go/examples/s3/fgetobject.go
index bef756dd6..819a34f91 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/fgetobject.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/fgetobject.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -38,7 +39,7 @@ func main() {
log.Fatalln(err)
}
- if err := s3Client.FGetObject("my-bucketname", "my-objectname", "my-filename.csv"); err != nil {
+ if err := s3Client.FGetObject("my-bucketname", "my-objectname", "my-filename.csv", minio.GetObjectOptions{}); err != nil {
log.Fatalln(err)
}
log.Println("Successfully saved my-filename.csv")
diff --git a/vendor/github.com/minio/minio-go/examples/s3/fputencrypted-object.go b/vendor/github.com/minio/minio-go/examples/s3/fputencrypted-object.go
new file mode 100644
index 000000000..96eec7e8f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/fputencrypted-object.go
@@ -0,0 +1,80 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+ "github.com/minio/minio-go/pkg/encrypt"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
+ // my-objectname are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // Specify a local file that we will upload
+ filePath := "my-testfile"
+
+ //// Build an asymmetric key from private and public files
+ //
+ // privateKey, err := ioutil.ReadFile("private.key")
+ // if err != nil {
+ // t.Fatal(err)
+ // }
+ //
+ // publicKey, err := ioutil.ReadFile("public.key")
+ // if err != nil {
+ // t.Fatal(err)
+ // }
+ //
+ // asymmetricKey, err := NewAsymmetricKey(privateKey, publicKey)
+ // if err != nil {
+ // t.Fatal(err)
+ // }
+ ////
+
+ // Build a symmetric key
+ symmetricKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
+
+ // Build encryption materials which will encrypt uploaded data
+ cbcMaterials, err := encrypt.NewCBCSecureMaterials(symmetricKey)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // Encrypt file content and upload to the server
+ n, err := s3Client.FPutEncryptedObject("my-bucketname", "my-objectname", filePath, cbcMaterials)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/fputobject-context.go b/vendor/github.com/minio/minio-go/examples/s3/fputobject-context.go
new file mode 100644
index 000000000..d7c941c2b
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/fputobject-context.go
@@ -0,0 +1,53 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+ "time"
+
+ "context"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname
+ // and my-filename.csv are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
+ defer cancel()
+
+ if _, err := s3Client.FPutObjectWithContext(ctx, "my-bucketname", "my-objectname", "my-filename.csv", minio.PutObjectOptions{ContentType: "application/csv"}); err != nil {
+ log.Fatalln(err)
+ }
+ log.Println("Successfully uploaded my-filename.csv")
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/fputobject.go b/vendor/github.com/minio/minio-go/examples/s3/fputobject.go
index f4e60acff..34d876804 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/fputobject.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/fputobject.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -38,7 +39,9 @@ func main() {
log.Fatalln(err)
}
- if _, err := s3Client.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", "application/csv"); err != nil {
+ if _, err := s3Client.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", minio.PutObjectOptions{
+ ContentType: "application/csv",
+ }); err != nil {
log.Fatalln(err)
}
log.Println("Successfully uploaded my-filename.csv")
diff --git a/vendor/github.com/minio/minio-go/examples/s3/get-encrypted-object.go b/vendor/github.com/minio/minio-go/examples/s3/get-encrypted-object.go
index 8f51f26ae..9783bebe8 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/get-encrypted-object.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/get-encrypted-object.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/getbucketnotification.go b/vendor/github.com/minio/minio-go/examples/s3/getbucketnotification.go
index 67f010ef3..19349baaf 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/getbucketnotification.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/getbucketnotification.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/getbucketpolicy.go b/vendor/github.com/minio/minio-go/examples/s3/getbucketpolicy.go
index e5f960403..f9ac89b61 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/getbucketpolicy.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/getbucketpolicy.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/getobject-context.go b/vendor/github.com/minio/minio-go/examples/s3/getobject-context.go
new file mode 100644
index 000000000..c7d41707a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/getobject-context.go
@@ -0,0 +1,73 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "io"
+ "log"
+ "os"
+ "time"
+
+ "context"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and
+ // my-testfile are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
+ defer cancel()
+
+ opts := minio.GetObjectOptions{}
+ opts.SetModified(time.Now().Round(10 * time.Minute)) // get object if was modified within the last 10 minutes
+ reader, err := s3Client.GetObjectWithContext(ctx, "my-bucketname", "my-objectname", opts)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ defer reader.Close()
+
+ localFile, err := os.Create("my-testfile")
+ if err != nil {
+ log.Fatalln(err)
+ }
+ defer localFile.Close()
+
+ stat, err := reader.Stat()
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ if _, err := io.CopyN(localFile, reader, stat.Size); err != nil {
+ log.Fatalln(err)
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/getobject.go b/vendor/github.com/minio/minio-go/examples/s3/getobject.go
index 96bb85505..e17ef8172 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/getobject.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/getobject.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -40,7 +41,7 @@ func main() {
log.Fatalln(err)
}
- reader, err := s3Client.GetObject("my-bucketname", "my-objectname")
+ reader, err := s3Client.GetObject("my-bucketname", "my-objectname", minio.GetObjectOptions{})
if err != nil {
log.Fatalln(err)
}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/listbucketpolicies.go b/vendor/github.com/minio/minio-go/examples/s3/listbucketpolicies.go
index 19a2d1b2b..43edd0c3d 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/listbucketpolicies.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/listbucketpolicies.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/listbuckets.go b/vendor/github.com/minio/minio-go/examples/s3/listbuckets.go
index 81a99e627..5eae587b4 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/listbuckets.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/listbuckets.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/listincompleteuploads.go b/vendor/github.com/minio/minio-go/examples/s3/listincompleteuploads.go
index 34771e44b..a5a79b603 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/listincompleteuploads.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/listincompleteuploads.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/listobjects-N.go b/vendor/github.com/minio/minio-go/examples/s3/listobjects-N.go
index 5dde36746..55bceb470 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/listobjects-N.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/listobjects-N.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/listobjects.go b/vendor/github.com/minio/minio-go/examples/s3/listobjects.go
index 4fd5c069a..1da2e3faa 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/listobjects.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/listobjects.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/listobjectsV2.go b/vendor/github.com/minio/minio-go/examples/s3/listobjectsV2.go
index b52b4dab8..190aec36b 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/listobjectsV2.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/listobjectsV2.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/makebucket.go b/vendor/github.com/minio/minio-go/examples/s3/makebucket.go
index ae222a8af..419c96cf2 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/makebucket.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/makebucket.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/presignedgetobject.go b/vendor/github.com/minio/minio-go/examples/s3/presignedgetobject.go
index 11be0c0a4..fd7fb9e8d 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/presignedgetobject.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/presignedgetobject.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/presignedheadobject.go b/vendor/github.com/minio/minio-go/examples/s3/presignedheadobject.go
index 0332049e5..8dbc0a4b7 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/presignedheadobject.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/presignedheadobject.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go b/vendor/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go
index 3f37cef38..205ac95a3 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/presignedputobject.go b/vendor/github.com/minio/minio-go/examples/s3/presignedputobject.go
index 3db6f6e7b..b2f8b4f82 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/presignedputobject.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/presignedputobject.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/put-encrypted-object.go b/vendor/github.com/minio/minio-go/examples/s3/put-encrypted-object.go
index b8f7e12f2..cdf09ac53 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/put-encrypted-object.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/put-encrypted-object.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -75,7 +76,7 @@ func main() {
}
// Encrypt file content and upload to the server
- n, err := s3Client.PutEncryptedObject("my-bucketname", "my-objectname", file, cbcMaterials, nil, nil)
+ n, err := s3Client.PutEncryptedObject("my-bucketname", "my-objectname", file, cbcMaterials)
if err != nil {
log.Fatalln(err)
}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/putobject-context.go b/vendor/github.com/minio/minio-go/examples/s3/putobject-context.go
new file mode 100644
index 000000000..acc923f7e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/examples/s3/putobject-context.go
@@ -0,0 +1,68 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+ "os"
+ "time"
+
+ "context"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
+ // my-objectname are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
+ defer cancel()
+
+ object, err := os.Open("my-testfile")
+ if err != nil {
+ log.Fatalln(err)
+ }
+ defer object.Close()
+
+ objectStat, err := object.Stat()
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ n, err := s3Client.PutObjectWithContext(ctx, "my-bucketname", "my-objectname", object, objectStat.Size(), minio.PutObjectOptions{
+ ContentType: "application/octet-stream",
+ })
+ if err != nil {
+ log.Fatalln(err)
+ }
+ log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
+}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/putobject-getobject-sse.go b/vendor/github.com/minio/minio-go/examples/s3/putobject-getobject-sse.go
index 92e6a4840..3d3b2fd2d 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/putobject-getobject-sse.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/putobject-getobject-sse.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -24,7 +25,6 @@ import (
"encoding/base64"
"io/ioutil"
"log"
- "net/http"
minio "github.com/minio/minio-go"
)
@@ -54,24 +54,24 @@ func main() {
// of the encryption key or to decrypt the contents of the
// encrypted object. That means, if you lose the encryption
// key, you lose the object.
- var metadata = map[string][]string{
- "x-amz-server-side-encryption-customer-algorithm": []string{"AES256"},
- "x-amz-server-side-encryption-customer-key": []string{encryptionKey},
- "x-amz-server-side-encryption-customer-key-MD5": []string{encryptionKeyMD5},
+ var metadata = map[string]string{
+ "x-amz-server-side-encryption-customer-algorithm": "AES256",
+ "x-amz-server-side-encryption-customer-key": encryptionKey,
+ "x-amz-server-side-encryption-customer-key-MD5": encryptionKeyMD5,
}
// minioClient.TraceOn(os.Stderr) // Enable to debug.
- _, err = minioClient.PutObjectWithMetadata("mybucket", "my-encrypted-object.txt", content, metadata, nil)
+ _, err = minioClient.PutObject("mybucket", "my-encrypted-object.txt", content, 11, minio.PutObjectOptions{UserMetadata: metadata})
if err != nil {
log.Fatalln(err)
}
- var reqHeaders = minio.RequestHeaders{Header: http.Header{}}
+ opts := minio.GetObjectOptions{}
for k, v := range metadata {
- reqHeaders.Set(k, v[0])
+ opts.Set(k, v)
}
coreClient := minio.Core{minioClient}
- reader, _, err := coreClient.GetObject("mybucket", "my-encrypted-object.txt", reqHeaders)
+ reader, _, err := coreClient.GetObject("mybucket", "my-encrypted-object.txt", opts)
if err != nil {
log.Fatalln(err)
}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/putobject-progress.go b/vendor/github.com/minio/minio-go/examples/s3/putobject-progress.go
index 26e77b9e6..0e92dd65e 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/putobject-progress.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/putobject-progress.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -39,7 +40,7 @@ func main() {
log.Fatalln(err)
}
- reader, err := s3Client.GetObject("my-bucketname", "my-objectname")
+ reader, err := s3Client.GetObject("my-bucketname", "my-objectname", minio.GetObjectOptions{})
if err != nil {
log.Fatalln(err)
}
@@ -54,10 +55,8 @@ func main() {
// the Reads inside.
progress := pb.New64(objectInfo.Size)
progress.Start()
+ n, err := s3Client.PutObject("my-bucketname", "my-objectname-progress", reader, objectInfo.Size, minio.PutObjectOptions{ContentType: "application/octet-stream", Progress: progress})
- n, err := s3Client.PutObjectWithProgress("my-bucketname", "my-objectname-progress", reader, map[string][]string{
- "Content-Type": []string{"application/octet-stream"},
- }, progress)
if err != nil {
log.Fatalln(err)
}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/putobject-s3-accelerate.go b/vendor/github.com/minio/minio-go/examples/s3/putobject-s3-accelerate.go
index a26415c7a..06345cd87 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/putobject-s3-accelerate.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/putobject-s3-accelerate.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -48,7 +49,12 @@ func main() {
}
defer object.Close()
- n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream")
+ objectStat, err := object.Stat()
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, objectStat.Size(), minio.PutObjectOptions{ContentType: "application/octet-stream"})
if err != nil {
log.Fatalln(err)
}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/putobject-streaming.go b/vendor/github.com/minio/minio-go/examples/s3/putobject-streaming.go
index d10407dbd..85b78dd45 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/putobject-streaming.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/putobject-streaming.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -45,7 +46,7 @@ func main() {
}
defer object.Close()
- n, err := s3Client.PutObjectStreaming("my-bucketname", "my-objectname", object)
+ n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, -1, minio.PutObjectOptions{})
if err != nil {
log.Fatalln(err)
}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/putobject.go b/vendor/github.com/minio/minio-go/examples/s3/putobject.go
index caa731302..b9e4ff16c 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/putobject.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/putobject.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -44,8 +45,12 @@ func main() {
log.Fatalln(err)
}
defer object.Close()
+ objectStat, err := object.Stat()
+ if err != nil {
+ log.Fatalln(err)
+ }
- n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream")
+ n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, objectStat.Size(), minio.PutObjectOptions{ContentType: "application/octet-stream"})
if err != nil {
log.Fatalln(err)
}
diff --git a/vendor/github.com/minio/minio-go/examples/s3/removeallbucketnotification.go b/vendor/github.com/minio/minio-go/examples/s3/removeallbucketnotification.go
index 0f5f3a74d..1186afad8 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/removeallbucketnotification.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/removeallbucketnotification.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/removebucket.go b/vendor/github.com/minio/minio-go/examples/s3/removebucket.go
index fb013ca24..7a7737ee0 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/removebucket.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/removebucket.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/removeincompleteupload.go b/vendor/github.com/minio/minio-go/examples/s3/removeincompleteupload.go
index d486182af..31cc8790b 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/removeincompleteupload.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/removeincompleteupload.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/removeobject.go b/vendor/github.com/minio/minio-go/examples/s3/removeobject.go
index 13b00b41e..7e5848576 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/removeobject.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/removeobject.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/removeobjects.go b/vendor/github.com/minio/minio-go/examples/s3/removeobjects.go
index 594606929..b912bc85d 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/removeobjects.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/removeobjects.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -20,7 +21,6 @@ package main
import (
"log"
- "strconv"
"github.com/minio/minio-go"
)
@@ -44,8 +44,12 @@ func main() {
// Send object names that are needed to be removed to objectsCh
go func() {
defer close(objectsCh)
- for i := 0; i < 10; i++ {
- objectsCh <- "/path/to/my-objectname" + strconv.Itoa(i)
+ // List all objects from a bucket-name with a matching prefix.
+ for object := range s3Client.ListObjects("my-bucketname", "my-prefixname", true, doneCh) {
+ if object.Err != nil {
+ log.Fatalln(object.Err)
+ }
+ objectsCh <- object.Key
}
}()
diff --git a/vendor/github.com/minio/minio-go/examples/s3/setbucketnotification.go b/vendor/github.com/minio/minio-go/examples/s3/setbucketnotification.go
index 5fe1e318e..b5af30f06 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/setbucketnotification.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/setbucketnotification.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/setbucketpolicy.go b/vendor/github.com/minio/minio-go/examples/s3/setbucketpolicy.go
index 40906ee92..c81fb5050 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/setbucketpolicy.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/setbucketpolicy.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/examples/s3/statobject.go b/vendor/github.com/minio/minio-go/examples/s3/statobject.go
index 4c5453a07..0b27a83b3 100644
--- a/vendor/github.com/minio/minio-go/examples/s3/statobject.go
+++ b/vendor/github.com/minio/minio-go/examples/s3/statobject.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -37,7 +38,7 @@ func main() {
if err != nil {
log.Fatalln(err)
}
- stat, err := s3Client.StatObject("my-bucketname", "my-objectname")
+ stat, err := s3Client.StatObject("my-bucketname", "my-objectname", minio.StatObjectOptions{})
if err != nil {
log.Fatalln(err)
}
diff --git a/vendor/github.com/minio/minio-go/functional_tests.go b/vendor/github.com/minio/minio-go/functional_tests.go
index ec554e4fe..c4156c293 100644
--- a/vendor/github.com/minio/minio-go/functional_tests.go
+++ b/vendor/github.com/minio/minio-go/functional_tests.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -20,6 +21,7 @@ package main
import (
"bytes"
+ "context"
"encoding/hex"
"encoding/json"
"errors"
@@ -27,27 +29,25 @@ import (
"io"
"io/ioutil"
"math/rand"
+ "mime/multipart"
"net/http"
"net/url"
"os"
+ "path/filepath"
"reflect"
+ "runtime"
"strconv"
"strings"
"time"
+ humanize "github.com/dustin/go-humanize"
minio "github.com/minio/minio-go"
log "github.com/sirupsen/logrus"
- "github.com/dustin/go-humanize"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/pkg/policy"
)
-const (
- sixtyFiveMiB = 65 * humanize.MiByte // 65MiB
- thirtyThreeKiB = 33 * humanize.KiByte // 33KiB
-)
-
const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
const (
letterIdxBits = 6 // 6 bits to represent a letter index
@@ -84,39 +84,103 @@ func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) {
return append(serialized, '\n'), nil
}
+func cleanEmptyEntries(fields log.Fields) log.Fields {
+ cleanFields := log.Fields{}
+ for k, v := range fields {
+ if v != "" {
+ cleanFields[k] = v
+ }
+ }
+ return cleanFields
+}
+
// log successful test runs
-func successLogger(function string, args map[string]interface{}, startTime time.Time) *log.Entry {
+func successLogger(testName string, function string, args map[string]interface{}, startTime time.Time) *log.Entry {
// calculate the test case duration
duration := time.Since(startTime)
// log with the fields as per mint
- fields := log.Fields{"name": "minio-go", "function": function, "args": args, "duration": duration.Nanoseconds() / 1000000, "status": "pass"}
- return log.WithFields(fields)
+ fields := log.Fields{"name": "minio-go: " + testName, "function": function, "args": args, "duration": duration.Nanoseconds() / 1000000, "status": "PASS"}
+ return log.WithFields(cleanEmptyEntries(fields))
+}
+
+// As few of the features are not available in Gateway(s) currently, Check if err value is NotImplemented,
+// and log as NA in that case and continue execution. Otherwise log as failure and return
+func logError(testName string, function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) {
+ // If server returns NotImplemented we assume it is gateway mode and hence log it as info and move on to next tests
+ // Special case for ComposeObject API as it is implemented on client side and adds specific error details like `Error in upload-part-copy` in
+ // addition to NotImplemented error returned from server
+ if isErrNotImplemented(err) {
+ ignoredLog(testName, function, args, startTime, message).Info()
+ } else {
+ failureLog(testName, function, args, startTime, alert, message, err).Fatal()
+ }
}
// log failed test runs
-func failureLog(function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) *log.Entry {
+func failureLog(testName string, function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) *log.Entry {
// calculate the test case duration
duration := time.Since(startTime)
var fields log.Fields
// log with the fields as per mint
if err != nil {
- fields = log.Fields{"name": "minio-go", "function": function, "args": args,
- "duration": duration.Nanoseconds() / 1000000, "status": "fail", "alert": alert, "message": message, "error": err}
+ fields = log.Fields{"name": "minio-go: " + testName, "function": function, "args": args,
+ "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message, "error": err}
} else {
- fields = log.Fields{"name": "minio-go", "function": function, "args": args,
- "duration": duration.Nanoseconds() / 1000000, "status": "fail", "alert": alert, "message": message}
+ fields = log.Fields{"name": "minio-go: " + testName, "function": function, "args": args,
+ "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message}
}
- return log.WithFields(fields)
+ return log.WithFields(cleanEmptyEntries(fields))
}
// log not applicable test runs
-func ignoredLog(function string, args map[string]interface{}, startTime time.Time, message string) *log.Entry {
+func ignoredLog(testName string, function string, args map[string]interface{}, startTime time.Time, alert string) *log.Entry {
// calculate the test case duration
duration := time.Since(startTime)
// log with the fields as per mint
- fields := log.Fields{"name": "minio-go", "function": function, "args": args,
- "duration": duration.Nanoseconds() / 1000000, "status": "na", "message": message}
- return log.WithFields(fields)
+ fields := log.Fields{"name": "minio-go: " + testName, "function": function, "args": args,
+ "duration": duration.Nanoseconds() / 1000000, "status": "NA", "alert": alert}
+ return log.WithFields(cleanEmptyEntries(fields))
+}
+
+// Delete objects in given bucket, recursively
+func cleanupBucket(bucketName string, c *minio.Client) error {
+ // Create a done channel to control 'ListObjectsV2' go routine.
+ doneCh := make(chan struct{})
+ // Exit cleanly upon return.
+ defer close(doneCh)
+ // Iterate over all objects in the bucket via listObjectsV2 and delete
+ for objCh := range c.ListObjectsV2(bucketName, "", true, doneCh) {
+ if objCh.Err != nil {
+ return objCh.Err
+ }
+ if objCh.Key != "" {
+ err := c.RemoveObject(bucketName, objCh.Key)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ for objPartInfo := range c.ListIncompleteUploads(bucketName, "", true, doneCh) {
+ if objPartInfo.Err != nil {
+ return objPartInfo.Err
+ }
+ if objPartInfo.Key != "" {
+ err := c.RemoveIncompleteUpload(bucketName, objPartInfo.Key)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ // objects are already deleted, clear the buckets now
+ err := c.RemoveBucket(bucketName)
+ if err != nil {
+ return err
+ }
+ return err
+}
+
+func isErrNotImplemented(err error) bool {
+ return minio.ToErrorResponse(err).Code == "NotImplemented"
}
func init() {
@@ -130,19 +194,13 @@ func init() {
}
}
-func getDataDir() (dir string) {
- dir = os.Getenv("MINT_DATA_DIR")
- if dir == "" {
- dir = "/mint/data"
- }
- return
-}
+var mintDataDir = os.Getenv("MINT_DATA_DIR")
-func getFilePath(filename string) (filepath string) {
- if getDataDir() != "" {
- filepath = getDataDir() + "/" + filename
+func getMintDataDirFilePath(filename string) (fp string) {
+ if mintDataDir == "" {
+ return
}
- return
+ return filepath.Join(mintDataDir, filename)
}
type sizedReader struct {
@@ -165,14 +223,17 @@ func (r *randomReader) Read(b []byte) (int, error) {
}
// read data from file if it exists or optionally create a buffer of particular size
-func getDataReader(fileName string, size int) io.ReadCloser {
- if _, err := os.Stat(getFilePath(fileName)); os.IsNotExist(err) {
+func getDataReader(fileName string) io.ReadCloser {
+ if mintDataDir == "" {
+ size := dataFileMap[fileName]
return &sizedReader{
- Reader: io.LimitReader(&randomReader{seed: []byte("a")}, int64(size)),
- size: size,
+ Reader: io.LimitReader(&randomReader{
+ seed: []byte("a"),
+ }, int64(size)),
+ size: size,
}
}
- reader, _ := os.Open(getFilePath(fileName))
+ reader, _ := os.Open(getMintDataDirFilePath(fileName))
return reader
}
@@ -194,8 +255,26 @@ func randString(n int, src rand.Source, prefix string) string {
return prefix + string(b[0:30-len(prefix)])
}
-func isQuickMode() bool {
- return os.Getenv("MODE") == "quick"
+var dataFileMap = map[string]int{
+ "datafile-1-b": 1,
+ "datafile-10-kB": 10 * humanize.KiByte,
+ "datafile-33-kB": 33 * humanize.KiByte,
+ "datafile-100-kB": 100 * humanize.KiByte,
+ "datafile-1.03-MB": 1056 * humanize.KiByte,
+ "datafile-1-MB": 1 * humanize.MiByte,
+ "datafile-5-MB": 5 * humanize.MiByte,
+ "datafile-6-MB": 6 * humanize.MiByte,
+ "datafile-11-MB": 11 * humanize.MiByte,
+ "datafile-65-MB": 65 * humanize.MiByte,
+}
+
+func isFullMode() bool {
+ return os.Getenv("MINT_MODE") == "full"
+}
+
+func getFuncName() string {
+ pc, _, _, _ := runtime.Caller(1)
+ return strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "main.")
}
// Tests bucket re-create errors.
@@ -204,6 +283,7 @@ func testMakeBucketError() {
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "MakeBucket(bucketName, region)"
// initialize logging params
args := map[string]interface{}{
@@ -213,7 +293,7 @@ func testMakeBucketError() {
// skipping region functional tests for non s3 runs
if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
- ignoredLog(function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
+ ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
return
}
@@ -228,7 +308,8 @@ func testMakeBucketError() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -238,26 +319,99 @@ func testMakeBucketError() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket in 'eu-central-1'.
if err = c.MakeBucket(bucketName, region); err != nil {
- failureLog(function, args, startTime, "", "MakeBucket Failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket Failed", err)
+ return
}
if err = c.MakeBucket(bucketName, region); err == nil {
- failureLog(function, args, startTime, "", "Bucket already exists", err).Fatal()
+ logError(testName, function, args, startTime, "", "Bucket already exists", err)
+ return
}
// Verify valid error response from server.
if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
- failureLog(function, args, startTime, "", "Invalid error returned by server", err).Fatal()
+ logError(testName, function, args, startTime, "", "Invalid error returned by server", err)
+ return
+ }
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+ successLogger(testName, function, args, startTime).Info()
+}
+
+func testMetadataSizeLimit() {
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader, objectSize, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "opts.UserMetadata": "",
+ }
+ rand.Seed(startTime.Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client creation failed", err)
+ return
+ }
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ const HeaderSizeLimit = 8 * 1024
+ const UserMetadataLimit = 2 * 1024
+
+ // Meta-data greater than the 2 KB limit of AWS - PUT calls with this meta-data should fail
+ metadata := make(map[string]string)
+ metadata["X-Amz-Meta-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+UserMetadataLimit-len("X-Amz-Meta-Mint-Test")))
+ args["metadata"] = fmt.Sprint(metadata)
+
+ _, err = c.PutObject(bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "Created object with user-defined metadata exceeding metadata size limits", nil)
+ return
}
- if err = c.RemoveBucket(bucketName); err != nil {
- failureLog(function, args, startTime, "", "Remove bucket failed", err).Fatal()
+
+ // Meta-data (headers) greater than the 8 KB limit of AWS - PUT calls with this meta-data should fail
+ metadata = make(map[string]string)
+ metadata["X-Amz-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+HeaderSizeLimit-len("X-Amz-Mint-Test")))
+ args["metadata"] = fmt.Sprint(metadata)
+ _, err = c.PutObject(bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "Created object with headers exceeding header size limits", nil)
+ return
}
- successLogger(function, args, startTime).Info()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
}
// Tests various bucket supported formats.
@@ -265,6 +419,7 @@ func testMakeBucketRegions() {
region := "eu-central-1"
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "MakeBucket(bucketName, region)"
// initialize logging params
args := map[string]interface{}{
@@ -274,7 +429,7 @@ func testMakeBucketRegions() {
// skipping region functional tests for non s3 runs
if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
- ignoredLog(function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
+ ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
return
}
@@ -289,7 +444,8 @@ func testMakeBucketRegions() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -299,16 +455,19 @@ func testMakeBucketRegions() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket in 'eu-central-1'.
if err = c.MakeBucket(bucketName, region); err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
- if err = c.RemoveBucket(bucketName); err != nil {
- failureLog(function, args, startTime, "", "Remove bucket failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
// Make a new bucket with '.' in its name, in 'us-west-2'. This
@@ -317,26 +476,28 @@ func testMakeBucketRegions() {
region = "us-west-2"
args["region"] = region
if err = c.MakeBucket(bucketName+".withperiod", region); err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
- // Remove the newly created bucket.
- if err = c.RemoveBucket(bucketName + ".withperiod"); err != nil {
- failureLog(function, args, startTime, "", "Remove bucket failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName+".withperiod", c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
-
- successLogger(function, args, startTime).Info()
+ successLogger(testName, function, args, startTime).Info()
}
// Test PutObject using a large data to trigger multipart readat
func testPutObjectReadAt() {
// initialize logging params
startTime := time.Now()
- function := "PutObject(bucketName, objectName, reader, objectContentType)"
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader, opts)"
args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- "objectContentType": "",
+ "bucketName": "",
+ "objectName": "",
+ "opts": "objectContentType",
}
// Seed random based on current time.
@@ -350,7 +511,8 @@ func testPutObjectReadAt() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -360,18 +522,18 @@ func testPutObjectReadAt() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "Make bucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
}
- // Generate data using 4 parts so that all 3 'workers' are utilized and a part is leftover.
- // Use different data for each part for multipart tests to ensure part order at the end.
- var reader = getDataReader("datafile-65-MB", sixtyFiveMiB)
+ bufSize := dataFileMap["datafile-65-MB"]
+ var reader = getDataReader("datafile-65-MB")
defer reader.Close()
// Save the data
@@ -382,65 +544,69 @@ func testPutObjectReadAt() {
objectContentType := "binary/octet-stream"
args["objectContentType"] = objectContentType
- n, err := c.PutObject(bucketName, objectName, reader, objectContentType)
-
+ n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: objectContentType})
if err != nil {
- failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
}
- if n != int64(sixtyFiveMiB) {
- failureLog(function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(sixtyFiveMiB)+" got "+string(n), err).Fatal()
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(bufSize)+" got "+string(n), err)
+ return
}
// Read the data back
- r, err := c.GetObject(bucketName, objectName)
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "Get Object failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Get Object failed", err)
+ return
}
st, err := r.Stat()
if err != nil {
- failureLog(function, args, startTime, "", "Stat Object failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Stat Object failed", err)
+ return
}
- if st.Size != int64(sixtyFiveMiB) {
- failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(sixtyFiveMiB)+" got "+string(st.Size), err).Fatal()
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Number of bytes in stat does not match, expected %d got %d", bufSize, st.Size), err)
+ return
}
if st.ContentType != objectContentType {
- failureLog(function, args, startTime, "", "Content types don't match", err).Fatal()
+ logError(testName, function, args, startTime, "", "Content types don't match", err)
+ return
}
if err := r.Close(); err != nil {
- failureLog(function, args, startTime, "", "Object Close failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Object Close failed", err)
+ return
}
if err := r.Close(); err == nil {
- failureLog(function, args, startTime, "", "Object is already closed, didn't return error on Close", err).Fatal()
- }
-
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Object is already closed, didn't return error on Close", err)
+ return
}
- err = c.RemoveBucket(bucketName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+ successLogger(testName, function, args, startTime).Info()
}
// Test PutObject using a large data to trigger multipart readat
func testPutObjectWithMetadata() {
// initialize logging params
startTime := time.Now()
- function := "PutObjectWithMetadata(bucketName, objectName, reader, metadata, progress)"
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader,size, opts)"
args := map[string]interface{}{
"bucketName": "",
"objectName": "",
- "metadata": "",
+ "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
}
- if isQuickMode() {
- ignoredLog(function, args, startTime, "Skipping functional tests for short runs").Info()
+ if !isFullMode() {
+ ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info()
return
}
@@ -455,7 +621,8 @@ func testPutObjectWithMetadata() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -465,18 +632,18 @@ func testPutObjectWithMetadata() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "Make bucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
}
- // Generate data using 2 parts
- // Use different data in each part for multipart tests to ensure part order at the end.
- var reader = getDataReader("datafile-65-MB", sixtyFiveMiB)
+ bufSize := dataFileMap["datafile-65-MB"]
+ var reader = getDataReader("datafile-65-MB")
defer reader.Close()
// Save the data
@@ -486,53 +653,58 @@ func testPutObjectWithMetadata() {
// Object custom metadata
customContentType := "custom/contenttype"
- n, err := c.PutObjectWithMetadata(bucketName, objectName, reader, map[string][]string{
- "Content-Type": {customContentType},
- }, nil)
args["metadata"] = map[string][]string{
"Content-Type": {customContentType},
}
+ n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{
+ ContentType: customContentType})
if err != nil {
- failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
}
- if n != int64(sixtyFiveMiB) {
- failureLog(function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(sixtyFiveMiB)+" got "+string(n), err).Fatal()
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(bufSize)+" got "+string(n), err)
+ return
}
// Read the data back
- r, err := c.GetObject(bucketName, objectName)
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
}
st, err := r.Stat()
if err != nil {
- failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
}
- if st.Size != int64(sixtyFiveMiB) {
- failureLog(function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(sixtyFiveMiB)+" got "+string(st.Size), err).Fatal()
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err)
+ return
}
if st.ContentType != customContentType {
- failureLog(function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err).Fatal()
+ logError(testName, function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err)
+ return
}
if err := r.Close(); err != nil {
- failureLog(function, args, startTime, "", "Object Close failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Object Close failed", err)
+ return
}
if err := r.Close(); err == nil {
- failureLog(function, args, startTime, "", "Object already closed, should respond with error", err).Fatal()
- }
-
- if err = c.RemoveObject(bucketName, objectName); err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err)
+ return
}
- if err = c.RemoveBucket(bucketName); err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+ successLogger(testName, function, args, startTime).Info()
}
// Test put object with streaming signature.
@@ -540,10 +712,13 @@ func testPutObjectStreaming() {
// initialize logging params
objectName := "test-object"
startTime := time.Now()
- function := "PutObjectStreaming(bucketName, objectName, reader)"
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader,size,opts)"
args := map[string]interface{}{
"bucketName": "",
"objectName": objectName,
+ "size": -1,
+ "opts": "",
}
// Seed random based on current time.
@@ -557,7 +732,8 @@ func testPutObjectStreaming() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -567,13 +743,13 @@ func testPutObjectStreaming() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()),
- "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
// Upload an object.
@@ -581,34 +757,32 @@ func testPutObjectStreaming() {
for _, size := range sizes {
data := bytes.Repeat([]byte("a"), int(size))
- n, err := c.PutObjectStreaming(bucketName, objectName, bytes.NewReader(data))
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(size), minio.PutObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "PutObjectStreaming failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err)
+ return
}
if n != size {
- failureLog(function, args, startTime, "", "Expected upload object size doesn't match with PutObjectStreaming return value", err).Fatal()
+ logError(testName, function, args, startTime, "", "Expected upload object size doesn't match with PutObjectStreaming return value", err)
+ return
}
}
- // Remove the object.
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- // Remove the bucket.
- err = c.RemoveBucket(bucketName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
- }
- successLogger(function, args, startTime).Info()
+ successLogger(testName, function, args, startTime).Info()
}
// Test listing partially uploaded objects.
func testListPartiallyUploaded() {
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
args := map[string]interface{}{
"bucketName": "",
@@ -627,7 +801,8 @@ func testListPartiallyUploaded() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
// Set user agent.
@@ -637,24 +812,27 @@ func testListPartiallyUploaded() {
// c.TraceOn(os.Stderr)
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
- r := bytes.NewReader(bytes.Repeat([]byte("0"), sixtyFiveMiB*2))
+ bufSize := dataFileMap["datafile-65-MB"]
+ r := bytes.NewReader(bytes.Repeat([]byte("0"), bufSize*2))
reader, writer := io.Pipe()
go func() {
i := 0
for i < 25 {
- _, cerr := io.CopyN(writer, r, (sixtyFiveMiB*2)/25)
+ _, cerr := io.CopyN(writer, r, (int64(bufSize)*2)/25)
if cerr != nil {
- failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Copy failed", err)
+ return
}
i++
r.Seek(0, 0)
@@ -665,12 +843,14 @@ func testListPartiallyUploaded() {
objectName := bucketName + "-resumable"
args["objectName"] = objectName
- _, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
+ _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize*2), minio.PutObjectOptions{ContentType: "application/octet-stream"})
if err == nil {
- failureLog(function, args, startTime, "", "PutObject should fail", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject should fail", err)
+ return
}
if !strings.Contains(err.Error(), "proactively closed to be verified later") {
- failureLog(function, args, startTime, "", "String not found in PutObject output", err).Fatal()
+ logError(testName, function, args, startTime, "", "String not found in PutObject output", err)
+ return
}
doneCh := make(chan struct{})
@@ -681,26 +861,27 @@ func testListPartiallyUploaded() {
multiPartObjectCh := c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)
for multiPartObject := range multiPartObjectCh {
if multiPartObject.Err != nil {
- failureLog(function, args, startTime, "", "Multipart object error", multiPartObject.Err).Fatal()
+ logError(testName, function, args, startTime, "", "Multipart object error", multiPartObject.Err)
+ return
}
}
- err = c.RemoveBucket(bucketName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+
+ successLogger(testName, function, args, startTime).Info()
}
// Test get object seeker from the end, using whence set to '2'.
func testGetObjectSeekEnd() {
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "GetObject(bucketName, objectName)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- }
+ args := map[string]interface{}{}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -713,7 +894,8 @@ func testGetObjectSeekEnd() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -723,17 +905,19 @@ func testGetObjectSeekEnd() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
// Generate 33K of data.
- var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
defer reader.Close()
// Save the data
@@ -742,75 +926,94 @@ func testGetObjectSeekEnd() {
buf, err := ioutil.ReadAll(reader)
if err != nil {
- failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
}
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
if err != nil {
- failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
}
- if n != int64(thirtyThreeKiB) {
- failureLog(function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal()
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(n), err)
+ return
}
// Read the data back
- r, err := c.GetObject(bucketName, objectName)
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
}
st, err := r.Stat()
if err != nil {
- failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
}
- if st.Size != int64(thirtyThreeKiB) {
- failureLog(function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(st.Size), err).Fatal()
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err)
+ return
}
pos, err := r.Seek(-100, 2)
if err != nil {
- failureLog(function, args, startTime, "", "Object Seek failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Object Seek failed", err)
+ return
}
if pos != st.Size-100 {
- failureLog(function, args, startTime, "", "Incorrect position", err).Fatal()
+ logError(testName, function, args, startTime, "", "Incorrect position", err)
+ return
}
buf2 := make([]byte, 100)
m, err := io.ReadFull(r, buf2)
if err != nil {
- failureLog(function, args, startTime, "", "Error reading through io.ReadFull", err).Fatal()
+ logError(testName, function, args, startTime, "", "Error reading through io.ReadFull", err)
+ return
}
if m != len(buf2) {
- failureLog(function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err).Fatal()
+ logError(testName, function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err)
+ return
}
hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:])
hexBuf2 := fmt.Sprintf("%02x", buf2[:m])
if hexBuf1 != hexBuf2 {
- failureLog(function, args, startTime, "", "Values at same index dont match", err).Fatal()
+ logError(testName, function, args, startTime, "", "Values at same index dont match", err)
+ return
}
pos, err = r.Seek(-100, 2)
if err != nil {
- failureLog(function, args, startTime, "", "Object Seek failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Object Seek failed", err)
+ return
}
if pos != st.Size-100 {
- failureLog(function, args, startTime, "", "Incorrect position", err).Fatal()
+ logError(testName, function, args, startTime, "", "Incorrect position", err)
+ return
}
if err = r.Close(); err != nil {
- failureLog(function, args, startTime, "", "ObjectClose failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ObjectClose failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
}
// Test get object reader to not throw error on being closed twice.
func testGetObjectClosedTwice() {
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "GetObject(bucketName, objectName)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- }
+ args := map[string]interface{}{}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -823,7 +1026,8 @@ func testGetObjectClosedTwice() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -833,67 +1037,75 @@ func testGetObjectClosedTwice() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
// Generate 33K of data.
- var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
defer reader.Close()
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName
- n, err := c.PutObject(bucketName, objectName, reader, "binary/octet-stream")
+ n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
if err != nil {
- failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
}
- if n != int64(thirtyThreeKiB) {
- failureLog(function, args, startTime, "", "PutObject response doesn't match sent bytes, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal()
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "PutObject response doesn't match sent bytes, expected "+string(int64(bufSize))+" got "+string(n), err)
+ return
}
// Read the data back
- r, err := c.GetObject(bucketName, objectName)
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
}
st, err := r.Stat()
if err != nil {
- failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
}
- if st.Size != int64(thirtyThreeKiB) {
- failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(st.Size), err).Fatal()
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err)
+ return
}
if err := r.Close(); err != nil {
- failureLog(function, args, startTime, "", "Object Close failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Object Close failed", err)
+ return
}
if err := r.Close(); err == nil {
- failureLog(function, args, startTime, "", "Already closed object. No error returned", err).Fatal()
+ logError(testName, function, args, startTime, "", "Already closed object. No error returned", err)
+ return
}
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+
+ successLogger(testName, function, args, startTime).Info()
}
// Test removing multiple objects with Remove API
func testRemoveMultipleObjects() {
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "RemoveObjects(bucketName, objectsCh)"
args := map[string]interface{}{
"bucketName": "",
@@ -911,7 +1123,8 @@ func testRemoveMultipleObjects() {
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
// Set user agent.
@@ -921,19 +1134,20 @@ func testRemoveMultipleObjects() {
// c.TraceOn(os.Stderr)
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
// Multi remove of 1100 objects
- nrObjects := 1100
+ nrObjects := 200
objectsCh := make(chan string)
@@ -942,9 +1156,9 @@ func testRemoveMultipleObjects() {
// Upload objects and send them to objectsCh
for i := 0; i < nrObjects; i++ {
objectName := "sample" + strconv.Itoa(i) + ".txt"
- _, err = c.PutObject(bucketName, objectName, r, "application/octet-stream")
+ _, err = c.PutObject(bucketName, objectName, r, 8, minio.PutObjectOptions{ContentType: "application/octet-stream"})
if err != nil {
- failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
continue
}
objectsCh <- objectName
@@ -958,27 +1172,27 @@ func testRemoveMultipleObjects() {
select {
case r, more := <-errorCh:
if more {
- failureLog(function, args, startTime, "", "Unexpected error", r.Err).Fatal()
+ logError(testName, function, args, startTime, "", "Unexpected error", r.Err)
+ return
}
}
- // Clean the bucket created by the test
- err = c.RemoveBucket(bucketName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+
+ successLogger(testName, function, args, startTime).Info()
}
// Tests removing partially uploaded objects.
func testRemovePartiallyUploaded() {
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "RemoveIncompleteUpload(bucketName, objectName)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- }
+ args := map[string]interface{}{}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -991,7 +1205,8 @@ func testRemovePartiallyUploaded() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
// Set user agent.
@@ -1001,13 +1216,14 @@ func testRemovePartiallyUploaded() {
// c.TraceOn(os.Stderr)
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024))
@@ -1018,7 +1234,8 @@ func testRemovePartiallyUploaded() {
for i < 25 {
_, cerr := io.CopyN(writer, r, 128*1024)
if cerr != nil {
- failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Copy failed", err)
+ return
}
i++
r.Seek(0, 0)
@@ -1029,34 +1246,40 @@ func testRemovePartiallyUploaded() {
objectName := bucketName + "-resumable"
args["objectName"] = objectName
- _, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
+ _, err = c.PutObject(bucketName, objectName, reader, 128*1024, minio.PutObjectOptions{ContentType: "application/octet-stream"})
if err == nil {
- failureLog(function, args, startTime, "", "PutObject should fail", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject should fail", err)
+ return
}
if !strings.Contains(err.Error(), "proactively closed to be verified later") {
- failureLog(function, args, startTime, "", "String not found", err).Fatal()
+ logError(testName, function, args, startTime, "", "String not found", err)
+ return
}
err = c.RemoveIncompleteUpload(bucketName, objectName)
if err != nil {
- failureLog(function, args, startTime, "", "RemoveIncompleteUpload failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "RemoveIncompleteUpload failed", err)
+ return
}
- err = c.RemoveBucket(bucketName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+
+ successLogger(testName, function, args, startTime).Info()
}
// Tests FPutObject of a big file to trigger multipart
func testFPutObjectMultipart() {
// initialize logging params
startTime := time.Now()
- function := "FPutObject(bucketName, objectName, fileName, objectContentType)"
+ testName := getFuncName()
+ function := "FPutObject(bucketName, objectName, fileName, opts)"
args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- "fileName": "",
- "objectContentType": "",
+ "bucketName": "",
+ "objectName": "",
+ "fileName": "",
+ "opts": "",
}
// Seed random based on current time.
@@ -1070,7 +1293,8 @@ func testFPutObjectMultipart() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -1080,36 +1304,38 @@ func testFPutObjectMultipart() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
// Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
- var fileName = getFilePath("datafile-65-MB")
- if os.Getenv("MINT_DATA_DIR") == "" {
+ var fileName = getMintDataDirFilePath("datafile-65-MB")
+ if fileName == "" {
// Make a temp file with minPartSize bytes of data.
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
if err != nil {
- failureLog(function, args, startTime, "", "TempFile creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "TempFile creation failed", err)
+ return
}
- // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
- _, err = io.Copy(file, getDataReader("non-existent", sixtyFiveMiB))
- if err != nil {
- failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
+ // Upload 2 parts to utilize all 3 'workers' in multipart and still have a part to upload.
+ if _, err = io.Copy(file, getDataReader("datafile-65-MB")); err != nil {
+ logError(testName, function, args, startTime, "", "Copy failed", err)
+ return
}
- err = file.Close()
- if err != nil {
- failureLog(function, args, startTime, "", "File Close failed", err).Fatal()
+ if err = file.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "File Close failed", err)
+ return
}
fileName = file.Name()
args["fileName"] = fileName
}
- totalSize := sixtyFiveMiB * 1
+ totalSize := dataFileMap["datafile-65-MB"]
// Set base object name
objectName := bucketName + "FPutObject" + "-standard"
args["objectName"] = objectName
@@ -1118,50 +1344,56 @@ func testFPutObjectMultipart() {
args["objectContentType"] = objectContentType
// Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
- n, err := c.FPutObject(bucketName, objectName, fileName, objectContentType)
+ n, err := c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ContentType: objectContentType})
if err != nil {
- failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "FPutObject failed", err)
+ return
}
if n != int64(totalSize) {
- failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "FPutObject failed", err)
+ return
}
- r, err := c.GetObject(bucketName, objectName)
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
}
objInfo, err := r.Stat()
if err != nil {
- failureLog(function, args, startTime, "", "Unexpected error", err).Fatal()
+ logError(testName, function, args, startTime, "", "Unexpected error", err)
+ return
}
if objInfo.Size != int64(totalSize) {
- failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err).Fatal()
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err)
+ return
}
if objInfo.ContentType != objectContentType {
- failureLog(function, args, startTime, "", "ContentType doesn't match", err).Fatal()
+ logError(testName, function, args, startTime, "", "ContentType doesn't match", err)
+ return
}
- // Remove all objects and bucket and temp file
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- err = c.RemoveBucket(bucketName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
- }
- successLogger(function, args, startTime).Info()
+ successLogger(testName, function, args, startTime).Info()
}
// Tests FPutObject with null contentType (default = application/octet-stream)
func testFPutObject() {
// initialize logging params
startTime := time.Now()
- function := "FPutObject(bucketName, objectName, fileName, objectContentType)"
+ testName := getFuncName()
+ function := "FPutObject(bucketName, objectName, fileName, opts)"
+
args := map[string]interface{}{
"bucketName": "",
"objectName": "",
+ "fileName": "",
+ "opts": "",
}
// Seed random based on current time.
@@ -1175,7 +1407,8 @@ func testFPutObject() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -1185,146 +1418,467 @@ func testFPutObject() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ location := "us-east-1"
// Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
+ args["bucketName"] = bucketName
+ args["location"] = location
+ function = "MakeBucket()bucketName, location"
+ err = c.MakeBucket(bucketName, location)
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
// Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part.
// Use different data in part for multipart tests to check parts are uploaded in correct order.
- var fName = getFilePath("datafile-65-MB")
- if os.Getenv("MINT_DATA_DIR") == "" {
+ var fName = getMintDataDirFilePath("datafile-65-MB")
+ if fName == "" {
// Make a temp file with minPartSize bytes of data.
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
if err != nil {
- failureLog(function, args, startTime, "", "TempFile creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "TempFile creation failed", err)
+ return
}
- // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
- var buffer = bytes.Repeat([]byte(string('a')), sixtyFiveMiB)
- if _, err = file.Write(buffer); err != nil {
- failureLog(function, args, startTime, "", "File write failed", err).Fatal()
+ // Upload 3 parts to utilize all 3 'workers' in multipart and still have a part to upload.
+ if _, err = io.Copy(file, getDataReader("datafile-65-MB")); err != nil {
+ logError(testName, function, args, startTime, "", "File copy failed", err)
+ return
}
// Close the file pro-actively for windows.
- err = file.Close()
- if err != nil {
- failureLog(function, args, startTime, "", "File close failed", err).Fatal()
+ if err = file.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "File close failed", err)
+ return
}
+ defer os.Remove(file.Name())
fName = file.Name()
}
- var totalSize = sixtyFiveMiB * 1
+ totalSize := dataFileMap["datafile-65-MB"]
// Set base object name
+ function = "FPutObject(bucketName, objectName, fileName, opts)"
objectName := bucketName + "FPutObject"
- args["objectName"] = objectName
+ args["objectName"] = objectName + "-standard"
+ args["fileName"] = fName
+ args["opts"] = minio.PutObjectOptions{ContentType: "application/octet-stream"}
// Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
- n, err := c.FPutObject(bucketName, objectName+"-standard", fName, "application/octet-stream")
+ n, err := c.FPutObject(bucketName, objectName+"-standard", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"})
+
if err != nil {
- failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "FPutObject failed", err)
+ return
}
if n != int64(totalSize) {
- failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err).Fatal()
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err)
+ return
}
// Perform FPutObject with no contentType provided (Expecting application/octet-stream)
- n, err = c.FPutObject(bucketName, objectName+"-Octet", fName, "")
+ args["objectName"] = objectName + "-Octet"
+ n, err = c.FPutObject(bucketName, objectName+"-Octet", fName, minio.PutObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "File close failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "File close failed", err)
+ return
}
if n != int64(totalSize) {
- failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err).Fatal()
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err)
+ return
}
srcFile, err := os.Open(fName)
if err != nil {
- failureLog(function, args, startTime, "", "File open failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "File open failed", err)
+ return
}
defer srcFile.Close()
// Add extension to temp file name
tmpFile, err := os.Create(fName + ".gtar")
if err != nil {
- failureLog(function, args, startTime, "", "File create failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "File create failed", err)
+ return
}
defer tmpFile.Close()
_, err = io.Copy(tmpFile, srcFile)
if err != nil {
- failureLog(function, args, startTime, "", "File copy failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "File copy failed", err)
+ return
}
// Perform FPutObject with no contentType provided (Expecting application/x-gtar)
- n, err = c.FPutObject(bucketName, objectName+"-GTar", fName+".gtar", "")
+ args["objectName"] = objectName + "-GTar"
+ n, err = c.FPutObject(bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "FPutObject failed", err)
+ return
}
if n != int64(totalSize) {
- failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err).Fatal()
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err)
+ return
}
// Check headers
- rStandard, err := c.StatObject(bucketName, objectName+"-standard")
+ function = "StatObject(bucketName, objectName, opts)"
+ args["objectName"] = objectName + "-standard"
+ rStandard, err := c.StatObject(bucketName, objectName+"-standard", minio.StatObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
}
if rStandard.ContentType != "application/octet-stream" {
- failureLog(function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err).Fatal()
+ logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err)
+ return
}
- rOctet, err := c.StatObject(bucketName, objectName+"-Octet")
+ function = "StatObject(bucketName, objectName, opts)"
+ args["objectName"] = objectName + "-Octet"
+ rOctet, err := c.StatObject(bucketName, objectName+"-Octet", minio.StatObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
}
if rOctet.ContentType != "application/octet-stream" {
- failureLog(function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err).Fatal()
+ logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rOctet.ContentType, err)
+ return
}
- rGTar, err := c.StatObject(bucketName, objectName+"-GTar")
+ function = "StatObject(bucketName, objectName, opts)"
+ args["objectName"] = objectName + "-GTar"
+ rGTar, err := c.StatObject(bucketName, objectName+"-GTar", minio.StatObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
}
if rGTar.ContentType != "application/x-gtar" {
- failureLog(function, args, startTime, "", "ContentType does not match, expected application/x-gtar, got "+rStandard.ContentType, err).Fatal()
+ logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-gtar, got "+rGTar.ContentType, err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ if err = os.Remove(fName + ".gtar"); err != nil {
+ logError(testName, function, args, startTime, "", "File remove failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Tests FPutObjectWithContext request context cancels after timeout
+func testFPutObjectWithContext() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "FPutObject(bucketName, objectName, fileName, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "fileName": "",
+ "opts": "",
}
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
- // Remove all objects and bucket and temp file
- err = c.RemoveObject(bucketName, objectName+"-standard")
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
- err = c.RemoveObject(bucketName, objectName+"-Octet")
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Upload 1 parts worth of data to use multipart upload.
+ // Use different data in part for multipart tests to check parts are uploaded in correct order.
+ var fName = getMintDataDirFilePath("datafile-1-MB")
+ if fName == "" {
+ // Make a temp file with 1 MiB bytes of data.
+ file, err := ioutil.TempFile(os.TempDir(), "FPutObjectWithContextTest")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "TempFile creation failed", err)
+ return
+ }
+
+ // Upload 1 parts to trigger multipart upload
+ if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil {
+ logError(testName, function, args, startTime, "", "File copy failed", err)
+ return
+ }
+ // Close the file pro-actively for windows.
+ if err = file.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "File close failed", err)
+ return
+ }
+ defer os.Remove(file.Name())
+ fName = file.Name()
}
+ totalSize := dataFileMap["datafile-1-MB"]
- err = c.RemoveObject(bucketName, objectName+"-GTar")
+ // Set base object name
+ objectName := bucketName + "FPutObjectWithContext"
+ args["objectName"] = objectName
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ args["ctx"] = ctx
+ defer cancel()
+
+ // Perform standard FPutObjectWithContext with contentType provided (Expecting application/octet-stream)
+ _, err = c.FPutObjectWithContext(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "FPutObjectWithContext should fail on short timeout", err)
+ return
+ }
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ defer cancel()
+ // Perform FPutObjectWithContext with a long timeout. Expect the put object to succeed
+ n, err := c.FPutObjectWithContext(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "FPutObjectWithContext shouldn't fail on long timeout", err)
+ return
+ }
+ if n != int64(totalSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err)
+ return
}
- err = c.RemoveBucket(bucketName)
+ _, err = c.StatObject(bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+
+}
+
+// Tests FPutObjectWithContext request context cancels after timeout
+func testFPutObjectWithContextV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "FPutObjectWithContext(ctx, bucketName, objectName, fileName, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "opts": "minio.PutObjectOptions{ContentType:objectContentType}",
}
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
- err = os.Remove(fName + ".gtar")
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
if err != nil {
- failureLog(function, args, startTime, "", "File remove failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Upload 1 parts worth of data to use multipart upload.
+ // Use different data in part for multipart tests to check parts are uploaded in correct order.
+ var fName = getMintDataDirFilePath("datafile-1-MB")
+ if fName == "" {
+ // Make a temp file with 1 MiB bytes of data.
+ file, err := ioutil.TempFile(os.TempDir(), "FPutObjectWithContextTest")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Temp file creation failed", err)
+ return
+ }
+
+ // Upload 1 parts to trigger multipart upload
+ if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil {
+ logError(testName, function, args, startTime, "", "File copy failed", err)
+ return
+ }
+
+ // Close the file pro-actively for windows.
+ if err = file.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "File close failed", err)
+ return
+ }
+ defer os.Remove(file.Name())
+ fName = file.Name()
+ }
+ totalSize := dataFileMap["datafile-1-MB"]
+
+ // Set base object name
+ objectName := bucketName + "FPutObjectWithContext"
+ args["objectName"] = objectName
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ args["ctx"] = ctx
+ defer cancel()
+
+ // Perform standard FPutObjectWithContext with contentType provided (Expecting application/octet-stream)
+ _, err = c.FPutObjectWithContext(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "FPutObjectWithContext should fail on short timeout", err)
+ return
+ }
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ defer cancel()
+ // Perform FPutObjectWithContext with a long timeout. Expect the put object to succeed
+ n, err := c.FPutObjectWithContext(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FPutObjectWithContext shouldn't fail on longer timeout", err)
+ return
+ }
+ if n != int64(totalSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match:wanted"+string(totalSize)+" got "+string(n), err)
+ return
+ }
+
+ _, err = c.StatObject(bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+
}
-// Tests get object ReaderSeeker interface methods.
-func testGetObjectReadSeekFunctional() {
+// Test validates putObject with context to see if request cancellation is honored.
+func testPutObjectWithContext() {
// initialize logging params
startTime := time.Now()
- function := "GetObject(bucketName, objectName)"
+ testName := getFuncName()
+ function := "PutObjectWithContext(ctx, bucketName, objectName, fileName, opts)"
args := map[string]interface{}{
+ "ctx": "",
"bucketName": "",
"objectName": "",
+ "opts": "",
}
+ // Instantiate new minio client object.
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Make a new bucket.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket call failed", err)
+ return
+ }
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+ objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
+ args["objectName"] = objectName
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ args["ctx"] = ctx
+ args["opts"] = minio.PutObjectOptions{ContentType: "binary/octet-stream"}
+ defer cancel()
+
+ _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "PutObjectWithContext should fail on short timeout", err)
+ return
+ }
+
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ args["ctx"] = ctx
+
+ defer cancel()
+ reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+ _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectWithContext with long timeout failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+
+}
+
+// Tests get object ReaderSeeker interface methods.
+func testGetObjectReadSeekFunctional() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -1337,7 +1891,8 @@ func testGetObjectReadSeekFunctional() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -1347,17 +1902,19 @@ func testGetObjectReadSeekFunctional() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
// Generate 33K of data.
- var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
defer reader.Close()
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
@@ -1365,43 +1922,46 @@ func testGetObjectReadSeekFunctional() {
buf, err := ioutil.ReadAll(reader)
if err != nil {
- failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
}
// Save the data
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
if err != nil {
- failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
}
- if n != int64(thirtyThreeKiB) {
- failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(n), err).Fatal()
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err)
+ return
}
defer func() {
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
}()
// Read the data back
- r, err := c.GetObject(bucketName, objectName)
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
}
st, err := r.Stat()
if err != nil {
- failureLog(function, args, startTime, "", "Stat object failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Stat object failed", err)
+ return
}
- if st.Size != int64(thirtyThreeKiB) {
- failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(st.Size), err).Fatal()
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
+ return
}
// This following function helps us to compare data from the reader after seek
@@ -1411,13 +1971,15 @@ func testGetObjectReadSeekFunctional() {
return
}
buffer := bytes.NewBuffer([]byte{})
- if _, err := io.CopyN(buffer, r, int64(thirtyThreeKiB)); err != nil {
+ if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil {
if err != io.EOF {
- failureLog(function, args, startTime, "", "CopyN failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "CopyN failed", err)
+ return
}
}
if !bytes.Equal(buf[start:end], buffer.Bytes()) {
- failureLog(function, args, startTime, "", "Incorrect read bytes v/s original buffer", err).Fatal()
+ logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
+ return
}
}
@@ -1436,23 +1998,23 @@ func testGetObjectReadSeekFunctional() {
// Start from offset 0, fetch data and compare
{0, 0, 0, nil, true, 0, 0},
// Start from offset 2048, fetch data and compare
- {2048, 0, 2048, nil, true, 2048, thirtyThreeKiB},
+ {2048, 0, 2048, nil, true, 2048, bufSize},
// Start from offset larger than possible
- {int64(thirtyThreeKiB) + 1024, 0, 0, seekErr, false, 0, 0},
+ {int64(bufSize) + 1024, 0, 0, seekErr, false, 0, 0},
// Move to offset 0 without comparing
{0, 0, 0, nil, false, 0, 0},
// Move one step forward and compare
- {1, 1, 1, nil, true, 1, thirtyThreeKiB},
+ {1, 1, 1, nil, true, 1, bufSize},
// Move larger than possible
- {int64(thirtyThreeKiB), 1, 0, seekErr, false, 0, 0},
+ {int64(bufSize), 1, 0, seekErr, false, 0, 0},
// Provide negative offset with CUR_SEEK
{int64(-1), 1, 0, seekErr, false, 0, 0},
// Test with whence SEEK_END and with positive offset
- {1024, 2, int64(thirtyThreeKiB) - 1024, io.EOF, true, 0, 0},
+ {1024, 2, int64(bufSize) - 1024, io.EOF, true, 0, 0},
// Test with whence SEEK_END and with negative offset
- {-1024, 2, int64(thirtyThreeKiB) - 1024, nil, true, thirtyThreeKiB - 1024, thirtyThreeKiB},
+ {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize},
// Test with whence SEEK_END and with large negative offset
- {-int64(thirtyThreeKiB) * 2, 2, 0, seekErr, true, 0, 0},
+ {-int64(bufSize) * 2, 2, 0, seekErr, true, 0, 0},
}
for i, testCase := range testCases {
@@ -1460,11 +2022,13 @@ func testGetObjectReadSeekFunctional() {
n, err := r.Seek(testCase.offset, testCase.whence)
// We expect an error
if testCase.err == seekErr && err == nil {
- failureLog(function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err).Fatal()
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err)
+ return
}
// We expect a specific error
if testCase.err != seekErr && testCase.err != err {
- failureLog(function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err).Fatal()
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err)
+ return
}
// If we expect an error go to the next loop
if testCase.err != nil {
@@ -1472,25 +2036,24 @@ func testGetObjectReadSeekFunctional() {
}
// Check the returned seek pos
if n != testCase.pos {
- failureLog(function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err).Fatal()
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err)
+ return
}
// Compare only if shouldCmp is activated
if testCase.shouldCmp {
cmpData(r, testCase.start, testCase.end)
}
}
- successLogger(function, args, startTime).Info()
+ successLogger(testName, function, args, startTime).Info()
}
// Tests get object ReaderAt interface methods.
func testGetObjectReadAtFunctional() {
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "GetObject(bucketName, objectName)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- }
+ args := map[string]interface{}{}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -1503,7 +2066,8 @@ func testGetObjectReadAtFunctional() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -1513,17 +2077,19 @@ func testGetObjectReadAtFunctional() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
// Generate 33K of data.
- var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
defer reader.Close()
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
@@ -1531,23 +2097,27 @@ func testGetObjectReadAtFunctional() {
buf, err := ioutil.ReadAll(reader)
if err != nil {
- failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
}
// Save the data
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
if err != nil {
- failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
}
- if n != int64(thirtyThreeKiB) {
- failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(n), err).Fatal()
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err)
+ return
}
// read the data back
- r, err := c.GetObject(bucketName, objectName)
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
}
offset := int64(2048)
@@ -1560,56 +2130,70 @@ func testGetObjectReadAtFunctional() {
// Test readAt before stat is called.
m, err := r.ReadAt(buf1, offset)
if err != nil {
- failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
}
if m != len(buf1) {
- failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err)
+ return
}
if !bytes.Equal(buf1, buf[offset:offset+512]) {
- failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
}
offset += 512
st, err := r.Stat()
if err != nil {
- failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
}
- if st.Size != int64(thirtyThreeKiB) {
- failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(st.Size), err).Fatal()
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
+ return
}
m, err = r.ReadAt(buf2, offset)
if err != nil {
- failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
}
if m != len(buf2) {
- failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err)
+ return
}
if !bytes.Equal(buf2, buf[offset:offset+512]) {
- failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
}
offset += 512
m, err = r.ReadAt(buf3, offset)
if err != nil {
- failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
}
if m != len(buf3) {
- failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err)
+ return
}
if !bytes.Equal(buf3, buf[offset:offset+512]) {
- failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
}
offset += 512
m, err = r.ReadAt(buf4, offset)
if err != nil {
- failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
}
if m != len(buf4) {
- failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err)
+ return
}
if !bytes.Equal(buf4, buf[offset:offset+512]) {
- failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
}
buf5 := make([]byte, n)
@@ -1617,14 +2201,17 @@ func testGetObjectReadAtFunctional() {
m, err = r.ReadAt(buf5, 0)
if err != nil {
if err != io.EOF {
- failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
}
}
if m != len(buf5) {
- failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err)
+ return
}
if !bytes.Equal(buf, buf5) {
- failureLog(function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err).Fatal()
+ logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
+ return
}
buf6 := make([]byte, n+1)
@@ -1632,24 +2219,23 @@ func testGetObjectReadAtFunctional() {
_, err = r.ReadAt(buf6, 0)
if err != nil {
if err != io.EOF {
- failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
}
}
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+ successLogger(testName, function, args, startTime).Info()
}
// Test Presigned Post Policy
func testPresignedPostPolicy() {
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "PresignedPostPolicy(policy)"
args := map[string]interface{}{
"policy": "",
@@ -1666,7 +2252,8 @@ func testPresignedPostPolicy() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -1676,92 +2263,182 @@ func testPresignedPostPolicy() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
// Make a new bucket in 'us-east-1' (source bucket).
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
// Generate 33K of data.
- var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
defer reader.Close()
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "")
buf, err := ioutil.ReadAll(reader)
if err != nil {
- failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
}
// Save the data
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
if err != nil {
- failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
}
- if n != int64(thirtyThreeKiB) {
- failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal()
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err)
+ return
}
policy := minio.NewPostPolicy()
if err := policy.SetBucket(""); err == nil {
- failureLog(function, args, startTime, "", "SetBucket did not fail for invalid conditions", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetBucket did not fail for invalid conditions", err)
+ return
}
if err := policy.SetKey(""); err == nil {
- failureLog(function, args, startTime, "", "SetKey did not fail for invalid conditions", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err)
+ return
}
if err := policy.SetKeyStartsWith(""); err == nil {
- failureLog(function, args, startTime, "", "SetKeyStartsWith did not fail for invalid conditions", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetKeyStartsWith did not fail for invalid conditions", err)
+ return
}
if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil {
- failureLog(function, args, startTime, "", "SetExpires did not fail for invalid conditions", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err)
+ return
}
if err := policy.SetContentType(""); err == nil {
- failureLog(function, args, startTime, "", "SetContentType did not fail for invalid conditions", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err)
+ return
}
if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil {
- failureLog(function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err)
+ return
+ }
+ if err := policy.SetUserMetadata("", ""); err == nil {
+ logError(testName, function, args, startTime, "", "SetUserMetadata did not fail for invalid conditions", err)
+ return
}
policy.SetBucket(bucketName)
policy.SetKey(objectName)
policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
- policy.SetContentType("image/png")
- policy.SetContentLengthRange(1024, 1024*1024)
- args["policy"] = policy
+ policy.SetContentType("binary/octet-stream")
+ policy.SetContentLengthRange(10, 1024*1024)
+ policy.SetUserMetadata(metadataKey, metadataValue)
+ args["policy"] = policy.String()
- _, _, err = c.PresignedPostPolicy(policy)
+ presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(policy)
if err != nil {
- failureLog(function, args, startTime, "", "PresignedPostPolicy failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err)
+ return
}
- policy = minio.NewPostPolicy()
+ var formBuf bytes.Buffer
+ writer := multipart.NewWriter(&formBuf)
+ for k, v := range formData {
+ writer.WriteField(k, v)
+ }
- // Remove all objects and buckets
- err = c.RemoveObject(bucketName, objectName)
+ // Get a 33KB file to upload and test if set post policy works
+ var filePath = getMintDataDirFilePath("datafile-33-kB")
+ if filePath == "" {
+ // Make a temp file with 33 KB data.
+ file, err := ioutil.TempFile(os.TempDir(), "PresignedPostPolicyTest")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "TempFile creation failed", err)
+ return
+ }
+ if _, err = io.Copy(file, getDataReader("datafile-33-kB")); err != nil {
+ logError(testName, function, args, startTime, "", "Copy failed", err)
+ return
+ }
+ if err = file.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "File Close failed", err)
+ return
+ }
+ filePath = file.Name()
+ }
+
+ // add file to post request
+ f, err := os.Open(filePath)
+ defer f.Close()
if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "File open failed", err)
+ return
+ }
+ w, err := writer.CreateFormFile("file", filePath)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CreateFormFile failed", err)
+ return
}
- err = c.RemoveBucket(bucketName)
+ _, err = io.Copy(w, f)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Copy failed", err)
+ return
+ }
+ writer.Close()
+
+ // make post request with correct form data
+ res, err := http.Post(presignedPostPolicyURL.String(), writer.FormDataContentType(), bytes.NewReader(formBuf.Bytes()))
if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Http request failed", err)
+ return
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusNoContent {
+ logError(testName, function, args, startTime, "", "Http request failed", errors.New(res.Status))
+ return
+ }
+
+ // expected path should be absolute path of the object
+ var scheme string
+ if mustParseBool(os.Getenv(enableHTTPS)) {
+ scheme = "https://"
+ } else {
+ scheme = "http://"
+ }
+
+ expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName
+
+ if val, ok := res.Header["Location"]; ok {
+ if val[0] != expectedLocation {
+ logError(testName, function, args, startTime, "", "Location in header response is incorrect", err)
+ return
+ }
+ } else {
+ logError(testName, function, args, startTime, "", "Location not found in header response", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+
+ successLogger(testName, function, args, startTime).Info()
}
// Tests copy object
func testCopyObject() {
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "CopyObject(dst, src)"
- args := map[string]interface{}{
- "dst": "",
- "src": "",
- }
+ args := map[string]interface{}{}
+
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -1773,7 +2450,8 @@ func testCopyObject() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -1783,163 +2461,353 @@ func testCopyObject() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
// Make a new bucket in 'us-east-1' (source bucket).
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
// Make a new bucket in 'us-east-1' (destination bucket).
err = c.MakeBucket(bucketName+"-copy", "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
// Generate 33K of data.
- var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.PutObject(bucketName, objectName, reader, "binary/octet-stream")
+ n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
if err != nil {
- failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
}
- if n != int64(thirtyThreeKiB) {
- failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(n), err).Fatal()
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err)
+ return
}
- r, err := c.GetObject(bucketName, objectName)
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
}
// Check the various fields of source object against destination object.
objInfo, err := r.Stat()
if err != nil {
- failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
}
// Copy Source
src := minio.NewSourceInfo(bucketName, objectName, nil)
+ args["src"] = src
// Set copy conditions.
// All invalid conditions first.
err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
if err == nil {
- failureLog(function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err)
+ return
}
err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
if err == nil {
- failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err)
+ return
}
err = src.SetMatchETagCond("")
if err == nil {
- failureLog(function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err)
+ return
}
err = src.SetMatchETagExceptCond("")
if err == nil {
- failureLog(function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err)
+ return
}
err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
if err != nil {
- failureLog(function, args, startTime, "", "SetModifiedSinceCond failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetModifiedSinceCond failed", err)
+ return
}
err = src.SetMatchETagCond(objInfo.ETag)
if err != nil {
- failureLog(function, args, startTime, "", "SetMatchETagCond failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetMatchETagCond failed", err)
+ return
}
- args["src"] = src
dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil)
args["dst"] = dst
if err != nil {
- failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
}
// Perform the Copy
err = c.CopyObject(dst, src)
if err != nil {
- failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "CopyObject failed", err)
+ return
}
// Source object
- r, err = c.GetObject(bucketName, objectName)
+ r, err = c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
}
// Destination object
- readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy")
+ readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
}
// Check the various fields of source object against destination object.
objInfo, err = r.Stat()
if err != nil {
- failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
}
objInfoCopy, err := readerCopy.Stat()
if err != nil {
- failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
}
if objInfo.Size != objInfoCopy.Size {
- failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err).Fatal()
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err)
+ return
}
// CopyObject again but with wrong conditions
src = minio.NewSourceInfo(bucketName, objectName, nil)
err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
if err != nil {
- failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond failed", err)
+ return
}
err = src.SetMatchETagExceptCond(objInfo.ETag)
if err != nil {
- failureLog(function, args, startTime, "", "SetMatchETagExceptCond failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetMatchETagExceptCond failed", err)
+ return
}
// Perform the Copy which should fail
err = c.CopyObject(dst, src)
if err == nil {
- failureLog(function, args, startTime, "", "CopyObject did not fail for invalid conditions", err).Fatal()
+ logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err)
+ return
}
- // Remove all objects and buckets
- err = c.RemoveObject(bucketName, objectName)
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+ if err = cleanupBucket(bucketName+"-copy", c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// TestEncryptionPutGet tests client side encryption
+func testEncryptionPutGet() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutEncryptedObject(bucketName, objectName, reader, cbcMaterials, metadata, progress)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "cbcMaterials": "",
+ "metadata": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
- err = c.RemoveObject(bucketName+"-copy", objectName+"-copy")
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
- err = c.RemoveBucket(bucketName)
+ // Generate a symmetric key
+ symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
+
+ // Generate an assymmetric key from predefine public and private certificates
+ privateKey, err := hex.DecodeString(
+ "30820277020100300d06092a864886f70d0101010500048202613082025d" +
+ "0201000281810087b42ea73243a3576dc4c0b6fa245d339582dfdbddc20c" +
+ "bb8ab666385034d997210c54ba79275c51162a1221c3fb1a4c7c61131ca6" +
+ "5563b319d83474ef5e803fbfa7e52b889e1893b02586b724250de7ac6351" +
+ "cc0b7c638c980acec0a07020a78eed7eaa471eca4b92071394e061346c06" +
+ "15ccce2f465dee2080a89e43f29b5702030100010281801dd5770c3af8b3" +
+ "c85cd18cacad81a11bde1acfac3eac92b00866e142301fee565365aa9af4" +
+ "57baebf8bb7711054d071319a51dd6869aef3848ce477a0dc5f0dbc0c336" +
+ "5814b24c820491ae2bb3c707229a654427e03307fec683e6b27856688f08" +
+ "bdaa88054c5eeeb773793ff7543ee0fb0e2ad716856f2777f809ef7e6fa4" +
+ "41024100ca6b1edf89e8a8f93cce4b98c76c6990a09eb0d32ad9d3d04fbf" +
+ "0b026fa935c44f0a1c05dd96df192143b7bda8b110ec8ace28927181fd8c" +
+ "d2f17330b9b63535024100aba0260afb41489451baaeba423bee39bcbd1e" +
+ "f63dd44ee2d466d2453e683bf46d019a8baead3a2c7fca987988eb4d565e" +
+ "27d6be34605953f5034e4faeec9bdb0241009db2cb00b8be8c36710aff96" +
+ "6d77a6dec86419baca9d9e09a2b761ea69f7d82db2ae5b9aae4246599bb2" +
+ "d849684d5ab40e8802cfe4a2b358ad56f2b939561d2902404e0ead9ecafd" +
+ "bb33f22414fa13cbcc22a86bdf9c212ce1a01af894e3f76952f36d6c904c" +
+ "bd6a7e0de52550c9ddf31f1e8bfe5495f79e66a25fca5c20b3af5b870241" +
+ "0083456232aa58a8c45e5b110494599bda8dbe6a094683a0539ddd24e19d" +
+ "47684263bbe285ad953d725942d670b8f290d50c0bca3d1dc9688569f1d5" +
+ "9945cb5c7d")
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err)
+ return
+ }
+
+ publicKey, err := hex.DecodeString("30819f300d06092a864886f70d010101050003818d003081890281810087" +
+ "b42ea73243a3576dc4c0b6fa245d339582dfdbddc20cbb8ab666385034d9" +
+ "97210c54ba79275c51162a1221c3fb1a4c7c61131ca65563b319d83474ef" +
+ "5e803fbfa7e52b889e1893b02586b724250de7ac6351cc0b7c638c980ace" +
+ "c0a07020a78eed7eaa471eca4b92071394e061346c0615ccce2f465dee20" +
+ "80a89e43f29b570203010001")
if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err)
+ return
}
- err = c.RemoveBucket(bucketName + "-copy")
+ // Generate an asymmetric key
+ asymKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "NewAsymmetricKey for symmetric Key generation failed", err)
+ return
+ }
+
+ testCases := []struct {
+ buf []byte
+ encKey encrypt.Key
+ }{
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 0)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 15)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 16)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 17)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 31)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 32)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 33)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*2)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*1024)},
+
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 0)},
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1)},
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 16)},
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 32)},
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024)},
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024*1024)},
+ }
+
+ for i, testCase := range testCases {
+ // Generate a random object name
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ // Secured object
+ cbcMaterials, err := encrypt.NewCBCSecureMaterials(testCase.encKey)
+ args["cbcMaterials"] = cbcMaterials
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewCBCSecureMaterials failed", err)
+ return
+ }
+
+ // Put encrypted data
+ _, err = c.PutEncryptedObject(bucketName, objectName, bytes.NewReader(testCase.buf), cbcMaterials)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetEncryptedObject(bucketName, objectName, cbcMaterials)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
+ return
+ }
+ defer r.Close()
+
+ // Compare the sent object with the received one
+ recvBuffer := bytes.NewBuffer([]byte{})
+ if _, err = io.Copy(recvBuffer, r); err != nil {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err)
+ return
+ }
+ if recvBuffer.Len() != len(testCase.buf) {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err)
+ return
+ }
+ if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+
+ successLogger(testName, function, args, startTime).Info()
}
-// TestEncryptionPutGet tests client side encryption
-func testEncryptionPutGet() {
+// TestEncryptionFPut tests client side encryption
+func testEncryptionFPut() {
// initialize logging params
startTime := time.Now()
- function := "PutEncryptedObject(bucketName, objectName, reader, cbcMaterials, metadata, progress)"
+ testName := getFuncName()
+ function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, cbcMaterials)"
args := map[string]interface{}{
"bucketName": "",
"objectName": "",
+ "filePath": "",
+ "contentType": "",
"cbcMaterials": "",
- "metadata": "",
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -1952,7 +2820,8 @@ func testEncryptionPutGet() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -1962,13 +2831,14 @@ func testEncryptionPutGet() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
// Generate a symmetric key
@@ -2000,7 +2870,8 @@ func testEncryptionPutGet() {
"9945cb5c7d")
if err != nil {
- failureLog(function, args, startTime, "", "DecodeString for symmetric Key generation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err)
+ return
}
publicKey, err := hex.DecodeString("30819f300d06092a864886f70d010101050003818d003081890281810087" +
@@ -2010,13 +2881,15 @@ func testEncryptionPutGet() {
"c0a07020a78eed7eaa471eca4b92071394e061346c0615ccce2f465dee20" +
"80a89e43f29b570203010001")
if err != nil {
- failureLog(function, args, startTime, "", "DecodeString for symmetric Key generation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err)
+ return
}
// Generate an asymmetric key
asymKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
if err != nil {
- failureLog(function, args, startTime, "", "NewAsymmetricKey for symmetric Key generation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "NewAsymmetricKey for symmetric Key generation failed", err)
+ return
}
// Object custom metadata
@@ -2057,54 +2930,70 @@ func testEncryptionPutGet() {
args["cbcMaterials"] = cbcMaterials
if err != nil {
- failureLog(function, args, startTime, "", "NewCBCSecureMaterials failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "NewCBCSecureMaterials failed", err)
+ return
}
-
- // Put encrypted data
- _, err = c.PutEncryptedObject(bucketName, objectName, bytes.NewReader(testCase.buf), cbcMaterials, map[string][]string{"Content-Type": {customContentType}}, nil)
+ // Generate a random file name.
+ fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ file, err := os.Create(fileName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "file create failed", err)
+ return
+ }
+ _, err = file.Write(testCase.buf)
if err != nil {
- failureLog(function, args, startTime, "", "PutEncryptedObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "file write failed", err)
+ return
+ }
+ file.Close()
+ // Put encrypted data
+ if _, err = c.FPutEncryptedObject(bucketName, objectName, fileName, cbcMaterials); err != nil {
+ logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err)
+ return
}
// Read the data back
r, err := c.GetEncryptedObject(bucketName, objectName, cbcMaterials)
if err != nil {
- failureLog(function, args, startTime, "", "GetEncryptedObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
+ return
}
defer r.Close()
// Compare the sent object with the received one
recvBuffer := bytes.NewBuffer([]byte{})
if _, err = io.Copy(recvBuffer, r); err != nil {
- failureLog(function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err).Fatal()
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err)
+ return
}
if recvBuffer.Len() != len(testCase.buf) {
- failureLog(function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err).Fatal()
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err)
+ return
}
if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
- failureLog(function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err).Fatal()
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err)
+ return
}
- // Remove test object
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- failureLog(function, args, startTime, "", "Test "+string(i+1)+", RemoveObject failed with: "+err.Error(), err).Fatal()
+ if err = os.Remove(fileName); err != nil {
+ logError(testName, function, args, startTime, "", "File remove failed", err)
+ return
}
-
}
- // Remove test bucket
- err = c.RemoveBucket(bucketName)
- if err != nil {
- err = c.RemoveBucket(bucketName)
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+
+ successLogger(testName, function, args, startTime).Info()
}
func testBucketNotification() {
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "SetBucketNotification(bucketName)"
args := map[string]interface{}{
"bucketName": "",
@@ -2115,7 +3004,7 @@ func testBucketNotification() {
os.Getenv("NOTIFY_REGION") == "" ||
os.Getenv("NOTIFY_ACCOUNTID") == "" ||
os.Getenv("NOTIFY_RESOURCE") == "" {
- ignoredLog(function, args, startTime, "Skipped notification test as it is not configured").Info()
+ ignoredLog(testName, function, args, startTime, "Skipped notification test as it is not configured").Info()
return
}
@@ -2129,7 +3018,8 @@ func testBucketNotification() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
// Enable to debug
@@ -2160,7 +3050,8 @@ func testBucketNotification() {
// because it is duplicated
bNotification.AddTopic(topicConfig)
if len(bNotification.TopicConfigs) != 1 {
- failureLog(function, args, startTime, "", "Duplicate entry added", err).Fatal()
+ logError(testName, function, args, startTime, "", "Duplicate entry added", err)
+ return
}
// Add and remove a queue config
@@ -2169,34 +3060,49 @@ func testBucketNotification() {
err = c.SetBucketNotification(bucketName, bNotification)
if err != nil {
- failureLog(function, args, startTime, "", "SetBucketNotification failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetBucketNotification failed", err)
+ return
}
bNotification, err = c.GetBucketNotification(bucketName)
if err != nil {
- failureLog(function, args, startTime, "", "GetBucketNotification failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetBucketNotification failed", err)
+ return
}
if len(bNotification.TopicConfigs) != 1 {
- failureLog(function, args, startTime, "", "Topic config is empty", err).Fatal()
+ logError(testName, function, args, startTime, "", "Topic config is empty", err)
+ return
}
if bNotification.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" {
- failureLog(function, args, startTime, "", "Couldn't get the suffix", err).Fatal()
+ logError(testName, function, args, startTime, "", "Couldn't get the suffix", err)
+ return
}
err = c.RemoveAllBucketNotification(bucketName)
if err != nil {
- failureLog(function, args, startTime, "", "RemoveAllBucketNotification failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "RemoveAllBucketNotification failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+
+ successLogger(testName, function, args, startTime).Info()
}
// Tests comprehensive list of all methods.
func testFunctional() {
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "testFunctional()"
+ function_all := ""
+ args := map[string]interface{}{}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -2208,7 +3114,8 @@ func testFunctional() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, nil, startTime, "", "Minio client object creation failed", err).Fatal()
+ logError(testName, function, nil, startTime, "", "Minio client object creation failed", err)
+ return
}
// Enable to debug
@@ -2218,152 +3125,179 @@ func testFunctional() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
// Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
function = "MakeBucket(bucketName, region)"
- args := map[string]interface{}{
- "bucketName": bucketName,
- }
+ function_all = "MakeBucket(bucketName, region)"
+ args["bucketName"] = bucketName
+ err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
// Generate a random file name.
fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
file, err := os.Create(fileName)
if err != nil {
- failureLog(function, args, startTime, "", "File creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "File creation failed", err)
+ return
}
for i := 0; i < 3; i++ {
buf := make([]byte, rand.Intn(1<<19))
_, err = file.Write(buf)
if err != nil {
- failureLog(function, args, startTime, "", "File write failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "File write failed", err)
+ return
}
}
file.Close()
// Verify if bucket exits and you have access.
var exists bool
- exists, err = c.BucketExists(bucketName)
function = "BucketExists(bucketName)"
+ function_all += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
}
+ exists, err = c.BucketExists(bucketName)
if err != nil {
- failureLog(function, args, startTime, "", "BucketExists failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "BucketExists failed", err)
+ return
}
if !exists {
- failureLog(function, args, startTime, "", "Could not find the bucket", err).Fatal()
+ logError(testName, function, args, startTime, "", "Could not find the bucket", err)
+ return
}
// Asserting the default bucket policy.
- policyAccess, err := c.GetBucketPolicy(bucketName, "")
function = "GetBucketPolicy(bucketName, objectPrefix)"
+ function_all += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectPrefix": "",
}
+ policyAccess, err := c.GetBucketPolicy(bucketName, "")
if err != nil {
- failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
+ return
}
if policyAccess != "none" {
- failureLog(function, args, startTime, "", "policy should be set to none", err).Fatal()
+ logError(testName, function, args, startTime, "", "policy should be set to none", err)
+ return
}
+
// Set the bucket policy to 'public readonly'.
- err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadOnly)
function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
+ function_all += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectPrefix": "",
"bucketPolicy": policy.BucketPolicyReadOnly,
}
+ err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadOnly)
if err != nil {
- failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
+ return
}
// should return policy `readonly`.
- policyAccess, err = c.GetBucketPolicy(bucketName, "")
function = "GetBucketPolicy(bucketName, objectPrefix)"
+ function_all += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectPrefix": "",
}
+ policyAccess, err = c.GetBucketPolicy(bucketName, "")
if err != nil {
- failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
+ return
}
if policyAccess != "readonly" {
- failureLog(function, args, startTime, "", "policy should be set to readonly", err).Fatal()
+ logError(testName, function, args, startTime, "", "policy should be set to readonly", err)
+ return
}
// Make the bucket 'public writeonly'.
- err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyWriteOnly)
function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
+ function_all += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectPrefix": "",
"bucketPolicy": policy.BucketPolicyWriteOnly,
}
+ err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyWriteOnly)
if err != nil {
- failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
+ return
}
// should return policy `writeonly`.
- policyAccess, err = c.GetBucketPolicy(bucketName, "")
function = "GetBucketPolicy(bucketName, objectPrefix)"
+ function_all += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectPrefix": "",
}
+ policyAccess, err = c.GetBucketPolicy(bucketName, "")
if err != nil {
- failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
+ return
}
if policyAccess != "writeonly" {
- failureLog(function, args, startTime, "", "policy should be set to writeonly", err).Fatal()
+ logError(testName, function, args, startTime, "", "policy should be set to writeonly", err)
+ return
}
// Make the bucket 'public read/write'.
- err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite)
function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
+ function_all += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectPrefix": "",
"bucketPolicy": policy.BucketPolicyReadWrite,
}
+ err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite)
if err != nil {
- failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
+ return
}
// should return policy `readwrite`.
- policyAccess, err = c.GetBucketPolicy(bucketName, "")
function = "GetBucketPolicy(bucketName, objectPrefix)"
+ function_all += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectPrefix": "",
}
+ policyAccess, err = c.GetBucketPolicy(bucketName, "")
if err != nil {
- failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
+ return
}
if policyAccess != "readwrite" {
- failureLog(function, args, startTime, "", "policy should be set to readwrite", err).Fatal()
+ logError(testName, function, args, startTime, "", "policy should be set to readwrite", err)
+ return
}
// List all buckets.
- buckets, err := c.ListBuckets()
function = "ListBuckets()"
+ function_all += ", " + function
args = nil
+ buckets, err := c.ListBuckets()
if len(buckets) == 0 {
- failureLog(function, args, startTime, "", "Found bucket list to be empty", err).Fatal()
+ logError(testName, function, args, startTime, "", "Found bucket list to be empty", err)
+ return
}
if err != nil {
- failureLog(function, args, startTime, "", "ListBuckets failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ListBuckets failed", err)
+ return
}
// Verify if previously created bucket is listed in list buckets.
@@ -2376,7 +3310,8 @@ func testFunctional() {
// If bucket not found error out.
if !bucketFound {
- failureLog(function, args, startTime, "", "Bucket: "+bucketName+" not found", err).Fatal()
+ logError(testName, function, args, startTime, "", "Bucket: "+bucketName+" not found", err)
+ return
}
objectName := bucketName + "unique"
@@ -2384,35 +3319,40 @@ func testFunctional() {
// Generate data
buf := bytes.Repeat([]byte("f"), 1<<19)
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "")
function = "PutObject(bucketName, objectName, reader, contentType)"
+ function_all += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
"contentType": "",
}
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
}
if n != int64(len(buf)) {
- failureLog(function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err).Fatal()
+ logError(testName, function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err)
+ return
}
- n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream")
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName + "-nolength",
"contentType": "binary/octet-stream",
}
+ n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
if err != nil {
- failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
}
if n != int64(len(buf)) {
- failureLog(function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err).Fatal()
+ logError(testName, function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err)
+ return
}
// Instantiate a done channel to close all listing.
@@ -2423,6 +3363,7 @@ func testFunctional() {
isRecursive := true // Recursive is true.
function = "ListObjects(bucketName, objectName, isRecursive, doneCh)"
+ function_all += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
@@ -2436,12 +3377,14 @@ func testFunctional() {
}
}
if !objFound {
- failureLog(function, args, startTime, "", "Object "+objectName+" not found", err).Fatal()
+ logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err)
+ return
}
objFound = false
isRecursive = true // Recursive is true.
function = "ListObjectsV2(bucketName, objectName, isRecursive, doneCh)"
+ function_all += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
@@ -2455,12 +3398,14 @@ func testFunctional() {
}
}
if !objFound {
- failureLog(function, args, startTime, "", "Object "+objectName+" not found", err).Fatal()
+ logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err)
+ return
}
incompObjNotFound := true
function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
+ function_all += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
@@ -2474,147 +3419,211 @@ func testFunctional() {
}
}
if !incompObjNotFound {
- failureLog(function, args, startTime, "", "Unexpected dangling incomplete upload found", err).Fatal()
+ logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err)
+ return
}
- newReader, err := c.GetObject(bucketName, objectName)
function = "GetObject(bucketName, objectName)"
+ function_all += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
}
+ newReader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
}
newReadBytes, err := ioutil.ReadAll(newReader)
if err != nil {
- failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
}
if !bytes.Equal(newReadBytes, buf) {
- failureLog(function, args, startTime, "", "GetObject bytes mismatch", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObject bytes mismatch", err)
+ return
}
- err = c.FGetObject(bucketName, objectName, fileName+"-f")
function = "FGetObject(bucketName, objectName, fileName)"
+ function_all += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
"fileName": fileName + "-f",
}
+ err = c.FGetObject(bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "FGetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "FGetObject failed", err)
+ return
+ }
+
+ function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": "",
+ "expires": 3600 * time.Second,
+ }
+ if _, err = c.PresignedHeadObject(bucketName, "", 3600*time.Second, nil); err == nil {
+ logError(testName, function, args, startTime, "", "PresignedHeadObject success", err)
+ return
}
// Generate presigned HEAD object url.
- presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil)
function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
+ function_all += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
"expires": 3600 * time.Second,
}
+ presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil)
if err != nil {
- failureLog(function, args, startTime, "", "PresignedHeadObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err)
+ return
}
// Verify if presigned url works.
resp, err := http.Head(presignedHeadURL.String())
if err != nil {
- failureLog(function, args, startTime, "", "PresignedHeadObject response incorrect", err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err)
+ return
}
if resp.StatusCode != http.StatusOK {
- failureLog(function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err)
+ return
}
if resp.Header.Get("ETag") == "" {
- failureLog(function, args, startTime, "", "PresignedHeadObject response incorrect", err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err)
+ return
}
resp.Body.Close()
+ function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": "",
+ "expires": 3600 * time.Second,
+ }
+ _, err = c.PresignedGetObject(bucketName, "", 3600*time.Second, nil)
+ if err == nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject success", err)
+ return
+ }
+
// Generate presigned GET object url.
- presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
+ function_all += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
"expires": 3600 * time.Second,
}
+ presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
if err != nil {
- failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
+ return
}
// Verify if presigned url works.
resp, err = http.Get(presignedGetURL.String())
if err != nil {
- failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
+ return
}
if resp.StatusCode != http.StatusOK {
- failureLog(function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
+ return
}
newPresignedBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
- failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
+ return
}
resp.Body.Close()
if !bytes.Equal(newPresignedBytes, buf) {
- failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
+ return
}
// Set request parameters.
reqParams := make(url.Values)
reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
- presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
"expires": 3600 * time.Second,
"reqParams": reqParams,
}
+ presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
if err != nil {
- failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
+ return
}
// Verify if presigned url works.
resp, err = http.Get(presignedGetURL.String())
if err != nil {
- failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
+ return
}
if resp.StatusCode != http.StatusOK {
- failureLog(function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
+ return
}
newPresignedBytes, err = ioutil.ReadAll(resp.Body)
if err != nil {
- failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
+ return
}
if !bytes.Equal(newPresignedBytes, buf) {
- failureLog(function, args, startTime, "", "Bytes mismatch for presigned GET URL", err).Fatal()
+ logError(testName, function, args, startTime, "", "Bytes mismatch for presigned GET URL", err)
+ return
}
if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
- failureLog(function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err).Fatal()
+ logError(testName, function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err)
+ return
}
- presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
+ function = "PresignedPutObject(bucketName, objectName, expires)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": "",
+ "expires": 3600 * time.Second,
+ }
+ _, err = c.PresignedPutObject(bucketName, "", 3600*time.Second)
+ if err == nil {
+ logError(testName, function, args, startTime, "", "PresignedPutObject success", err)
+ return
+ }
function = "PresignedPutObject(bucketName, objectName, expires)"
+ function_all += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
- "objectName": objectName,
+ "objectName": objectName + "-presigned",
"expires": 3600 * time.Second,
}
+ presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
if err != nil {
- failureLog(function, args, startTime, "", "PresignedPutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedPutObject failed", err)
+ return
}
buf = bytes.Repeat([]byte("g"), 1<<19)
req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
if err != nil {
- failureLog(function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err).Fatal()
+ logError(testName, function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err)
+ return
}
httpClient := &http.Client{
// Setting a sensible time out of 30secs to wait for response
@@ -2625,90 +3634,103 @@ func testFunctional() {
}
resp, err = httpClient.Do(req)
if err != nil {
- failureLog(function, args, startTime, "", "PresignedPutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedPutObject failed", err)
+ return
}
- newReader, err = c.GetObject(bucketName, objectName+"-presigned")
+ newReader, err = c.GetObject(bucketName, objectName+"-presigned", minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "GetObject after PresignedPutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObject after PresignedPutObject failed", err)
+ return
}
newReadBytes, err = ioutil.ReadAll(newReader)
if err != nil {
- failureLog(function, args, startTime, "", "ReadAll after GetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err)
+ return
}
if !bytes.Equal(newReadBytes, buf) {
- failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal()
+ logError(testName, function, args, startTime, "", "Bytes mismatch", err)
+ return
}
- err = c.RemoveObject(bucketName, objectName)
function = "RemoveObject(bucketName, objectName)"
+ function_all += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
"objectName": objectName,
}
+ err = c.RemoveObject(bucketName, objectName)
if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "RemoveObject failed", err)
+ return
}
- err = c.RemoveObject(bucketName, objectName+"-f")
args["objectName"] = objectName + "-f"
+ err = c.RemoveObject(bucketName, objectName+"-f")
if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "RemoveObject failed", err)
+ return
}
- err = c.RemoveObject(bucketName, objectName+"-nolength")
args["objectName"] = objectName + "-nolength"
+ err = c.RemoveObject(bucketName, objectName+"-nolength")
if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "RemoveObject failed", err)
+ return
}
- err = c.RemoveObject(bucketName, objectName+"-presigned")
args["objectName"] = objectName + "-presigned"
+ err = c.RemoveObject(bucketName, objectName+"-presigned")
if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "RemoveObject failed", err)
+ return
}
- err = c.RemoveBucket(bucketName)
function = "RemoveBucket(bucketName)"
+ function_all += ", " + function
args = map[string]interface{}{
"bucketName": bucketName,
}
+ err = c.RemoveBucket(bucketName)
if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "RemoveBucket failed", err)
+ return
}
err = c.RemoveBucket(bucketName)
if err == nil {
- failureLog(function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err).Fatal()
+ logError(testName, function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err)
+ return
}
if err.Error() != "The specified bucket does not exist" {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "RemoveBucket failed", err)
+ return
}
+
if err = os.Remove(fileName); err != nil {
- failureLog(function, args, startTime, "", "File Remove failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "File Remove failed", err)
+ return
}
if err = os.Remove(fileName + "-f"); err != nil {
- failureLog(function, args, startTime, "", "File Remove failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "File Remove failed", err)
+ return
}
- function = "testFunctional()"
- successLogger(function, args, startTime).Info()
+ successLogger(testName, function_all, args, startTime).Info()
}
// Test for validating GetObject Reader* methods functioning when the
// object is modified in the object store.
-func testGetObjectObjectModified() {
+func testGetObjectModified() {
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "GetObject(bucketName, objectName)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- }
+ args := map[string]interface{}{}
// Instantiate new minio client object.
c, err := minio.NewV4(
@@ -2717,8 +3739,10 @@ func testGetObjectObjectModified() {
os.Getenv(secretKey),
mustParseBool(os.Getenv(enableHTTPS)),
)
+
if err != nil {
- failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -2728,28 +3752,32 @@ func testGetObjectObjectModified() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Make a new bucket.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
defer c.RemoveBucket(bucketName)
// Upload an object.
objectName := "myobject"
+ args["objectName"] = objectName
content := "helloworld"
- _, err = c.PutObject(bucketName, objectName, strings.NewReader(content), "application/text")
+ _, err = c.PutObject(bucketName, objectName, strings.NewReader(content), int64(len(content)), minio.PutObjectOptions{ContentType: "application/text"})
if err != nil {
- failureLog(function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err).Fatal()
+ logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err)
+ return
}
defer c.RemoveObject(bucketName, objectName)
- reader, err := c.GetObject(bucketName, objectName)
+ reader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err).Fatal()
+ logError(testName, function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err)
+ return
}
defer reader.Close()
@@ -2757,35 +3785,46 @@ func testGetObjectObjectModified() {
b := make([]byte, 5)
n, err := reader.ReadAt(b, 0)
if err != nil {
- failureLog(function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err).Fatal()
+ logError(testName, function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err)
+ return
}
// Upload different contents to the same object while object is being read.
newContent := "goodbyeworld"
- _, err = c.PutObject(bucketName, objectName, strings.NewReader(newContent), "application/text")
+ _, err = c.PutObject(bucketName, objectName, strings.NewReader(newContent), int64(len(newContent)), minio.PutObjectOptions{ContentType: "application/text"})
if err != nil {
- failureLog(function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err).Fatal()
+ logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err)
+ return
}
// Confirm that a Stat() call in between doesn't change the Object's cached etag.
_, err = reader.Stat()
expectedError := "At least one of the pre-conditions you specified did not hold"
if err.Error() != expectedError {
- failureLog(function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err).Fatal()
+ logError(testName, function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err)
+ return
}
// Read again only to find object contents have been modified since last read.
_, err = reader.ReadAt(b, int64(n))
if err.Error() != expectedError {
- failureLog(function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err).Fatal()
+ logError(testName, function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err)
+ return
}
- successLogger(function, args, startTime).Info()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
}
// Test validates putObject to upload a file seeked at a given offset.
func testPutObjectUploadSeekedObject() {
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "PutObject(bucketName, objectName, fileToUpload, contentType)"
args := map[string]interface{}{
"bucketName": "",
@@ -2802,7 +3841,8 @@ func testPutObjectUploadSeekedObject() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -2812,95 +3852,105 @@ func testPutObjectUploadSeekedObject() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Make a new bucket.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
defer c.RemoveBucket(bucketName)
- tempfile, err := ioutil.TempFile("", "minio-go-upload-test-")
- args["fileToUpload"] = tempfile
+ var tempfile *os.File
- if err != nil {
- failureLog(function, args, startTime, "", "TempFile create failed", err).Fatal()
- }
-
- var data []byte
- if fileName := getFilePath("datafile-100-kB"); fileName != "" {
- data, _ = ioutil.ReadFile(fileName)
+ if fileName := getMintDataDirFilePath("datafile-100-kB"); fileName != "" {
+ tempfile, err = os.Open(fileName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "File open failed", err)
+ return
+ }
+ args["fileToUpload"] = fileName
} else {
- // Generate data more than 32K
- data = bytes.Repeat([]byte("1"), 120000)
- }
- var length = len(data)
- if _, err = tempfile.Write(data); err != nil {
- failureLog(function, args, startTime, "", "TempFile write failed", err).Fatal()
- }
+ tempfile, err = ioutil.TempFile("", "minio-go-upload-test-")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "TempFile create failed", err)
+ return
+ }
+ args["fileToUpload"] = tempfile.Name()
+ // Generate 100kB data
+ if _, err = io.Copy(tempfile, getDataReader("datafile-100-kB")); err != nil {
+ logError(testName, function, args, startTime, "", "File copy failed", err)
+ return
+ }
+
+ defer os.Remove(tempfile.Name())
+
+ // Seek back to the beginning of the file.
+ tempfile.Seek(0, 0)
+ }
+ var length = 100 * humanize.KiByte
objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
args["objectName"] = objectName
offset := length / 2
- if _, err := tempfile.Seek(int64(offset), 0); err != nil {
- failureLog(function, args, startTime, "", "TempFile seek failed", err).Fatal()
+ if _, err = tempfile.Seek(int64(offset), 0); err != nil {
+ logError(testName, function, args, startTime, "", "TempFile seek failed", err)
+ return
}
- n, err := c.PutObject(bucketName, objectName, tempfile, "binary/octet-stream")
+ n, err := c.PutObject(bucketName, objectName, tempfile, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
if err != nil {
- failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
}
if n != int64(length-offset) {
- failureLog(function, args, startTime, "", "Invalid length returned, expected "+string(int64(length-offset))+" got "+string(n), err).Fatal()
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid length returned, expected %d got %d", int64(length-offset), n), err)
+ return
}
tempfile.Close()
- if err = os.Remove(tempfile.Name()); err != nil {
- failureLog(function, args, startTime, "", "File remove failed", err).Fatal()
- }
-
- length = int(n)
- obj, err := c.GetObject(bucketName, objectName)
+ obj, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
}
n, err = obj.Seek(int64(offset), 0)
if err != nil {
- failureLog(function, args, startTime, "", "Seek failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Seek failed", err)
+ return
}
if n != int64(offset) {
- failureLog(function, args, startTime, "", "Invalid offset returned, expected "+string(int64(offset))+" got "+string(n), err).Fatal()
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(offset), n), err)
+ return
}
- n, err = c.PutObject(bucketName, objectName+"getobject", obj, "binary/octet-stream")
+ n, err = c.PutObject(bucketName, objectName+"getobject", obj, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
if err != nil {
- failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
}
if n != int64(length-offset) {
- failureLog(function, args, startTime, "", "Invalid offset returned, expected "+string(int64(length-offset))+" got "+string(n), err).Fatal()
- }
-
- if err = c.RemoveObject(bucketName, objectName); err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(length-offset), n), err)
+ return
}
- if err = c.RemoveObject(bucketName, objectName+"getobject"); err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- if err = c.RemoveBucket(bucketName); err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
- }
- successLogger(function, args, startTime).Info()
+ successLogger(testName, function, args, startTime).Info()
}
// Tests bucket re-create errors.
func testMakeBucketErrorV2() {
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "MakeBucket(bucketName, region)"
args := map[string]interface{}{
"bucketName": "",
@@ -2908,7 +3958,7 @@ func testMakeBucketErrorV2() {
}
if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
- ignoredLog(function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
+ ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
return
}
@@ -2923,7 +3973,8 @@ func testMakeBucketErrorV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -2933,31 +3984,39 @@ func testMakeBucketErrorV2() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ region := "eu-west-1"
args["bucketName"] = bucketName
+ args["region"] = region
// Make a new bucket in 'eu-west-1'.
- if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ if err = c.MakeBucket(bucketName, region); err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
- if err = c.MakeBucket(bucketName, "eu-west-1"); err == nil {
- failureLog(function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err).Fatal()
+ if err = c.MakeBucket(bucketName, region); err == nil {
+ logError(testName, function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err)
+ return
}
// Verify valid error response from server.
if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
- failureLog(function, args, startTime, "", "Invalid error returned by server", err).Fatal()
+ logError(testName, function, args, startTime, "", "Invalid error returned by server", err)
}
- if err = c.RemoveBucket(bucketName); err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+
+ successLogger(testName, function, args, startTime).Info()
}
// Test get object reader to not throw error on being closed twice.
func testGetObjectClosedTwiceV2() {
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "MakeBucket(bucketName, region)"
args := map[string]interface{}{
"bucketName": "",
@@ -2975,7 +4034,8 @@ func testGetObjectClosedTwiceV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -2985,73 +4045,78 @@ func testGetObjectClosedTwiceV2() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
// Generate 33K of data.
- var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
defer reader.Close()
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName
- n, err := c.PutObject(bucketName, objectName, reader, "binary/octet-stream")
+ n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
if err != nil {
- failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
}
- if n != int64(thirtyThreeKiB) {
- failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(n), err).Fatal()
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(n), err)
+ return
}
// Read the data back
- r, err := c.GetObject(bucketName, objectName)
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
}
st, err := r.Stat()
if err != nil {
- failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
}
- if st.Size != int64(thirtyThreeKiB) {
- failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(st.Size), err).Fatal()
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err)
+ return
}
if err := r.Close(); err != nil {
- failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
}
if err := r.Close(); err == nil {
- failureLog(function, args, startTime, "", "Object is already closed, should return error", err).Fatal()
+ logError(testName, function, args, startTime, "", "Object is already closed, should return error", err)
+ return
}
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+
+ successLogger(testName, function, args, startTime).Info()
}
// Tests removing partially uploaded objects.
func testRemovePartiallyUploadedV2() {
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "RemoveIncompleteUpload(bucketName, objectName)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- }
+ args := map[string]interface{}{}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -3064,7 +4129,8 @@ func testRemovePartiallyUploadedV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
}
// Set user agent.
@@ -3074,13 +4140,14 @@ func testRemovePartiallyUploadedV2() {
// c.TraceOn(os.Stderr)
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024))
@@ -3091,7 +4158,8 @@ func testRemovePartiallyUploadedV2() {
for i < 25 {
_, cerr := io.CopyN(writer, r, 128*1024)
if cerr != nil {
- failureLog(function, args, startTime, "", "Copy failed", cerr).Fatal()
+ logError(testName, function, args, startTime, "", "Copy failed", cerr)
+ return
}
i++
r.Seek(0, 0)
@@ -3102,34 +4170,40 @@ func testRemovePartiallyUploadedV2() {
objectName := bucketName + "-resumable"
args["objectName"] = objectName
- _, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
+ _, err = c.PutObject(bucketName, objectName, reader, -1, minio.PutObjectOptions{ContentType: "application/octet-stream"})
if err == nil {
- failureLog(function, args, startTime, "", "PutObject should fail", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject should fail", err)
+ return
}
if err.Error() != "proactively closed to be verified later" {
- failureLog(function, args, startTime, "", "Unexpected error, expected : proactively closed to be verified later", err).Fatal()
+ logError(testName, function, args, startTime, "", "Unexpected error, expected : proactively closed to be verified later", err)
+ return
}
err = c.RemoveIncompleteUpload(bucketName, objectName)
if err != nil {
- failureLog(function, args, startTime, "", "RemoveIncompleteUpload failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "RemoveIncompleteUpload failed", err)
+ return
}
- err = c.RemoveBucket(bucketName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+
+ successLogger(testName, function, args, startTime).Info()
}
// Tests FPutObject hidden contentType setting
func testFPutObjectV2() {
// initialize logging params
startTime := time.Now()
- function := "FPutObject(bucketName, objectName, fileName, contentType)"
+ testName := getFuncName()
+ function := "FPutObject(bucketName, objectName, fileName, opts)"
args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- "fileName": "",
- "contentType": "application/octet-stream",
+ "bucketName": "",
+ "objectName": "",
+ "fileName": "",
+ "opts": "",
}
// Seed random based on current time.
@@ -3143,7 +4217,8 @@ func testFPutObjectV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -3153,34 +4228,39 @@ func testFPutObjectV2() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
// Make a temp file with 11*1024*1024 bytes of data.
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
if err != nil {
- failureLog(function, args, startTime, "", "TempFile creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "TempFile creation failed", err)
+ return
}
r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
n, err := io.CopyN(file, r, 11*1024*1024)
if err != nil {
- failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Copy failed", err)
+ return
}
if n != int64(11*1024*1024) {
- failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal()
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err)
+ return
}
// Close the file pro-actively for windows.
err = file.Close()
if err != nil {
- failureLog(function, args, startTime, "", "File close failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "File close failed", err)
+ return
}
// Set base object name
@@ -3189,103 +4269,103 @@ func testFPutObjectV2() {
args["fileName"] = file.Name()
// Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
- n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), "application/octet-stream")
+ n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), minio.PutObjectOptions{ContentType: "application/octet-stream"})
if err != nil {
- failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "FPutObject failed", err)
+ return
}
if n != int64(11*1024*1024) {
- failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal()
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err)
+ return
}
// Perform FPutObject with no contentType provided (Expecting application/octet-stream)
- n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), "")
args["objectName"] = objectName + "-Octet"
args["contentType"] = ""
+ n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), minio.PutObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "FPutObject failed", err)
+ return
}
if n != int64(11*1024*1024) {
- failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal()
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err)
+ return
}
// Add extension to temp file name
fileName := file.Name()
err = os.Rename(file.Name(), fileName+".gtar")
if err != nil {
- failureLog(function, args, startTime, "", "Rename failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Rename failed", err)
+ return
}
// Perform FPutObject with no contentType provided (Expecting application/x-gtar)
- n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", "")
args["objectName"] = objectName + "-Octet"
args["contentType"] = ""
args["fileName"] = fileName + ".gtar"
+ n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", minio.PutObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "FPutObject failed", err)
+ return
}
if n != int64(11*1024*1024) {
- failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal()
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err)
+ return
}
// Check headers
- rStandard, err := c.StatObject(bucketName, objectName+"-standard")
+ rStandard, err := c.StatObject(bucketName, objectName+"-standard", minio.StatObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
}
if rStandard.ContentType != "application/octet-stream" {
- failureLog(function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err).Fatal()
+ logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err)
+ return
}
- rOctet, err := c.StatObject(bucketName, objectName+"-Octet")
+ rOctet, err := c.StatObject(bucketName, objectName+"-Octet", minio.StatObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
}
if rOctet.ContentType != "application/octet-stream" {
- failureLog(function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err).Fatal()
+ logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err)
+ return
}
- rGTar, err := c.StatObject(bucketName, objectName+"-GTar")
+ rGTar, err := c.StatObject(bucketName, objectName+"-GTar", minio.StatObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
}
if rGTar.ContentType != "application/x-gtar" {
- failureLog(function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err).Fatal()
- }
-
- // Remove all objects and bucket and temp file
- err = c.RemoveObject(bucketName, objectName+"-standard")
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
- }
-
- err = c.RemoveObject(bucketName, objectName+"-Octet")
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
- }
-
- err = c.RemoveObject(bucketName, objectName+"-GTar")
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err)
+ return
}
- err = c.RemoveBucket(bucketName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
err = os.Remove(fileName + ".gtar")
if err != nil {
- failureLog(function, args, startTime, "", "File remove failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "File remove failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+ successLogger(testName, function, args, startTime).Info()
}
// Tests various bucket supported formats.
func testMakeBucketRegionsV2() {
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "MakeBucket(bucketName, region)"
args := map[string]interface{}{
"bucketName": "",
@@ -3293,7 +4373,7 @@ func testMakeBucketRegionsV2() {
}
if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
- ignoredLog(function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
+ ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
return
}
@@ -3308,7 +4388,8 @@ func testMakeBucketRegionsV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -3318,16 +4399,18 @@ func testMakeBucketRegionsV2() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket in 'eu-central-1'.
if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
- if err = c.RemoveBucket(bucketName); err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
// Make a new bucket with '.' in its name, in 'us-west-2'. This
@@ -3336,25 +4419,26 @@ func testMakeBucketRegionsV2() {
if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil {
args["bucketName"] = bucketName + ".withperiod"
args["region"] = "us-west-2"
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
- // Remove the newly created bucket.
- if err = c.RemoveBucket(bucketName + ".withperiod"); err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName+".withperiod", c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+
+ successLogger(testName, function, args, startTime).Info()
}
// Tests get object ReaderSeeker interface methods.
func testGetObjectReadSeekFunctionalV2() {
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "GetObject(bucketName, objectName)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- }
+ args := map[string]interface{}{}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -3367,7 +4451,8 @@ func testGetObjectReadSeekFunctionalV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -3377,17 +4462,19 @@ func testGetObjectReadSeekFunctionalV2() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
// Generate 33K of data.
- var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
defer reader.Close()
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
@@ -3395,111 +4482,126 @@ func testGetObjectReadSeekFunctionalV2() {
buf, err := ioutil.ReadAll(reader)
if err != nil {
- failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
}
// Save the data.
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
if err != nil {
- failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
}
- if n != int64(thirtyThreeKiB) {
- failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal()
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err)
+ return
}
// Read the data back
- r, err := c.GetObject(bucketName, objectName)
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
}
st, err := r.Stat()
if err != nil {
- failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
}
- if st.Size != int64(thirtyThreeKiB) {
- failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(st.Size), err).Fatal()
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err)
+ return
}
offset := int64(2048)
n, err = r.Seek(offset, 0)
if err != nil {
- failureLog(function, args, startTime, "", "Seek failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Seek failed", err)
+ return
}
if n != offset {
- failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err).Fatal()
+ logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err)
+ return
}
n, err = r.Seek(0, 1)
if err != nil {
- failureLog(function, args, startTime, "", "Seek failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Seek failed", err)
+ return
}
if n != offset {
- failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err).Fatal()
+ logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err)
+ return
}
_, err = r.Seek(offset, 2)
if err == nil {
- failureLog(function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err).Fatal()
+ logError(testName, function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err)
+ return
}
n, err = r.Seek(-offset, 2)
if err != nil {
- failureLog(function, args, startTime, "", "Seek failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Seek failed", err)
+ return
}
if n != st.Size-offset {
- failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err).Fatal()
+ logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err)
+ return
}
var buffer1 bytes.Buffer
if _, err = io.CopyN(&buffer1, r, st.Size); err != nil {
if err != io.EOF {
- failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Copy failed", err)
+ return
}
}
if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) {
- failureLog(function, args, startTime, "", "Incorrect read bytes v/s original buffer", err).Fatal()
+ logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
+ return
}
// Seek again and read again.
n, err = r.Seek(offset-1, 0)
if err != nil {
- failureLog(function, args, startTime, "", "Seek failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Seek failed", err)
+ return
}
if n != (offset - 1) {
- failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err).Fatal()
+ logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err)
+ return
}
var buffer2 bytes.Buffer
if _, err = io.CopyN(&buffer2, r, st.Size); err != nil {
if err != io.EOF {
- failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Copy failed", err)
+ return
}
}
// Verify now lesser bytes.
if !bytes.Equal(buf[2047:], buffer2.Bytes()) {
- failureLog(function, args, startTime, "", "Incorrect read bytes v/s original buffer", err).Fatal()
+ logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
+ return
}
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+
+ successLogger(testName, function, args, startTime).Info()
}
// Tests get object ReaderAt interface methods.
func testGetObjectReadAtFunctionalV2() {
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "GetObject(bucketName, objectName)"
- args := map[string]interface{}{
- "bucketName": "",
- "objectName": "",
- }
+ args := map[string]interface{}{}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -3512,7 +4614,8 @@ func testGetObjectReadAtFunctionalV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -3522,17 +4625,19 @@ func testGetObjectReadAtFunctionalV2() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
// Generate 33K of data.
- var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
defer reader.Close()
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
@@ -3540,32 +4645,38 @@ func testGetObjectReadAtFunctionalV2() {
buf, err := ioutil.ReadAll(reader)
if err != nil {
- failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
}
// Save the data
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
if err != nil {
- failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
}
- if n != int64(thirtyThreeKiB) {
- failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(n), err).Fatal()
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(n), err)
+ return
}
// Read the data back
- r, err := c.GetObject(bucketName, objectName)
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
}
st, err := r.Stat()
if err != nil {
- failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
}
- if st.Size != int64(thirtyThreeKiB) {
- failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(st.Size), err).Fatal()
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err)
+ return
}
offset := int64(2048)
@@ -3577,35 +4688,44 @@ func testGetObjectReadAtFunctionalV2() {
m, err := r.ReadAt(buf2, offset)
if err != nil {
- failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
}
if m != len(buf2) {
- failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err)
+ return
}
if !bytes.Equal(buf2, buf[offset:offset+512]) {
- failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
}
offset += 512
m, err = r.ReadAt(buf3, offset)
if err != nil {
- failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
}
if m != len(buf3) {
- failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err)
+ return
}
if !bytes.Equal(buf3, buf[offset:offset+512]) {
- failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
}
offset += 512
m, err = r.ReadAt(buf4, offset)
if err != nil {
- failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
}
if m != len(buf4) {
- failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err)
+ return
}
if !bytes.Equal(buf4, buf[offset:offset+512]) {
- failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
}
buf5 := make([]byte, n)
@@ -3613,14 +4733,17 @@ func testGetObjectReadAtFunctionalV2() {
m, err = r.ReadAt(buf5, 0)
if err != nil {
if err != io.EOF {
- failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
}
}
if m != len(buf5) {
- failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err)
+ return
}
if !bytes.Equal(buf, buf5) {
- failureLog(function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err).Fatal()
+ logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
+ return
}
buf6 := make([]byte, n+1)
@@ -3628,29 +4751,26 @@ func testGetObjectReadAtFunctionalV2() {
_, err = r.ReadAt(buf6, 0)
if err != nil {
if err != io.EOF {
- failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
}
}
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+
+ successLogger(testName, function, args, startTime).Info()
}
// Tests copy object
func testCopyObjectV2() {
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "CopyObject(destination, source)"
- args := map[string]interface{}{
- "destination": "",
- "source": "",
- }
+ args := map[string]interface{}{}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
@@ -3663,7 +4783,8 @@ func testCopyObjectV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -3673,167 +4794,181 @@ func testCopyObjectV2() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
// Make a new bucket in 'us-east-1' (source bucket).
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
// Make a new bucket in 'us-east-1' (destination bucket).
err = c.MakeBucket(bucketName+"-copy", "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
// Generate 33K of data.
- var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
defer reader.Close()
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.PutObject(bucketName, objectName, reader, "binary/octet-stream")
+ n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
if err != nil {
- failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
}
- if n != int64(thirtyThreeKiB) {
- failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal()
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err)
+ return
}
- r, err := c.GetObject(bucketName, objectName)
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
}
// Check the various fields of source object against destination object.
objInfo, err := r.Stat()
if err != nil {
- failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
}
// Copy Source
src := minio.NewSourceInfo(bucketName, objectName, nil)
+ args["source"] = src
// Set copy conditions.
// All invalid conditions first.
err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
if err == nil {
- failureLog(function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err)
+ return
}
err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
if err == nil {
- failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err)
+ return
}
err = src.SetMatchETagCond("")
if err == nil {
- failureLog(function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err)
+ return
}
err = src.SetMatchETagExceptCond("")
if err == nil {
- failureLog(function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err)
+ return
}
err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
if err != nil {
- failureLog(function, args, startTime, "", "SetModifiedSinceCond failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetModifiedSinceCond failed", err)
+ return
}
err = src.SetMatchETagCond(objInfo.ETag)
if err != nil {
- failureLog(function, args, startTime, "", "SetMatchETagCond failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetMatchETagCond failed", err)
+ return
}
- args["source"] = src
dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil)
+ args["destination"] = dst
if err != nil {
- failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
}
- args["destination"] = dst
// Perform the Copy
err = c.CopyObject(dst, src)
if err != nil {
- failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "CopyObject failed", err)
+ return
}
// Source object
- r, err = c.GetObject(bucketName, objectName)
+ r, err = c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
}
// Destination object
- readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy")
+ readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
}
// Check the various fields of source object against destination object.
objInfo, err = r.Stat()
if err != nil {
- failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
}
objInfoCopy, err := readerCopy.Stat()
if err != nil {
- failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
}
if objInfo.Size != objInfoCopy.Size {
- failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err).Fatal()
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err)
+ return
}
// CopyObject again but with wrong conditions
src = minio.NewSourceInfo(bucketName, objectName, nil)
err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
if err != nil {
- failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond failed", err)
+ return
}
err = src.SetMatchETagExceptCond(objInfo.ETag)
if err != nil {
- failureLog(function, args, startTime, "", "SetMatchETagExceptCond failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetMatchETagExceptCond failed", err)
+ return
}
// Perform the Copy which should fail
err = c.CopyObject(dst, src)
if err == nil {
- failureLog(function, args, startTime, "", "CopyObject did not fail for invalid conditions", err).Fatal()
- }
-
- // Remove all objects and buckets
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
- }
-
- err = c.RemoveObject(bucketName+"-copy", objectName+"-copy")
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err)
+ return
}
- err = c.RemoveBucket(bucketName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
-
- err = c.RemoveBucket(bucketName + "-copy")
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ if err = cleanupBucket(bucketName+"-copy", c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+ successLogger(testName, function, args, startTime).Info()
}
func testComposeObjectErrorCasesWrapper(c *minio.Client) {
// initialize logging params
startTime := time.Now()
- function := "testComposeObjectErrorCasesWrapper(minioClient)"
+ testName := getFuncName()
+ function := "ComposeObject(destination, sourceList)"
args := map[string]interface{}{}
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
// Make a new bucket in 'us-east-1' (source bucket).
err := c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
// Test that more than 10K source objects cannot be
@@ -3842,13 +4977,20 @@ func testComposeObjectErrorCasesWrapper(c *minio.Client) {
srcSlice := srcArr[:]
dst, err := minio.NewDestinationInfo(bucketName, "object", nil, nil)
if err != nil {
- failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
}
+ args["destination"] = dst
+ // Just explain about srcArr in args["sourceList"]
+ // to stop having 10,001 null headers logged
+ args["sourceList"] = "source array of 10,001 elements"
if err := c.ComposeObject(dst, srcSlice); err == nil {
- failureLog(function, args, startTime, "", "Expected error in ComposeObject", err).Fatal()
+ logError(testName, function, args, startTime, "", "Expected error in ComposeObject", err)
+ return
} else if err.Error() != "There must be as least one and up to 10000 source objects." {
- failureLog(function, args, startTime, "", "Got unexpected error", err).Fatal()
+ logError(testName, function, args, startTime, "", "Got unexpected error", err)
+ return
}
// Create a source with invalid offset spec and check that
@@ -3856,31 +4998,43 @@ func testComposeObjectErrorCasesWrapper(c *minio.Client) {
// 1. Create the source object.
const badSrcSize = 5 * 1024 * 1024
buf := bytes.Repeat([]byte("1"), badSrcSize)
- _, err = c.PutObject(bucketName, "badObject", bytes.NewReader(buf), "")
+ _, err = c.PutObject(bucketName, "badObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
}
// 2. Set invalid range spec on the object (going beyond
// object size)
badSrc := minio.NewSourceInfo(bucketName, "badObject", nil)
err = badSrc.SetRange(1, badSrcSize)
if err != nil {
- failureLog(function, args, startTime, "", "Setting NewSourceInfo failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Setting NewSourceInfo failed", err)
+ return
}
// 3. ComposeObject call should fail.
if err := c.ComposeObject(dst, []minio.SourceInfo{badSrc}); err == nil {
- failureLog(function, args, startTime, "", "ComposeObject expected to fail", err).Fatal()
+ logError(testName, function, args, startTime, "", "ComposeObject expected to fail", err)
+ return
} else if !strings.Contains(err.Error(), "has invalid segment-to-copy") {
- failureLog(function, args, startTime, "", "Got invalid error", err).Fatal()
+ logError(testName, function, args, startTime, "", "Got invalid error", err)
+ return
}
- successLogger(function, args, startTime).Info()
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
}
// Test expected error cases
func testComposeObjectErrorCasesV2() {
// initialize logging params
startTime := time.Now()
- function := "testComposeObjectErrorCasesV2()"
+ testName := getFuncName()
+ function := "ComposeObject(destination, sourceList)"
args := map[string]interface{}{}
// Instantiate new minio client object
@@ -3891,7 +5045,8 @@ func testComposeObjectErrorCasesV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
}
testComposeObjectErrorCasesWrapper(c)
@@ -3900,26 +5055,29 @@ func testComposeObjectErrorCasesV2() {
func testComposeMultipleSources(c *minio.Client) {
// initialize logging params
startTime := time.Now()
- function := "ComposeObject(destination, sources)"
+ testName := getFuncName()
+ function := "ComposeObject(destination, sourceList)"
args := map[string]interface{}{
"destination": "",
- "sources": "",
+ "sourceList": "",
}
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
// Make a new bucket in 'us-east-1' (source bucket).
err := c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
// Upload a small source object
const srcSize = 1024 * 1024 * 5
buf := bytes.Repeat([]byte("1"), srcSize)
- _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), "binary/octet-stream")
+ _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
if err != nil {
- failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
}
// We will append 10 copies of the object.
@@ -3930,37 +5088,48 @@ func testComposeMultipleSources(c *minio.Client) {
// make the last part very small
err = srcs[9].SetRange(0, 0)
if err != nil {
- failureLog(function, args, startTime, "", "SetRange failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetRange failed", err)
+ return
}
- args["sources"] = srcs
+ args["sourceList"] = srcs
dst, err := minio.NewDestinationInfo(bucketName, "dstObject", nil, nil)
args["destination"] = dst
if err != nil {
- failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
}
err = c.ComposeObject(dst, srcs)
if err != nil {
- failureLog(function, args, startTime, "", "ComposeObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ComposeObject failed", err)
+ return
}
- objProps, err := c.StatObject(bucketName, "dstObject")
+ objProps, err := c.StatObject(bucketName, "dstObject", minio.StatObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
}
if objProps.Size != 9*srcSize+1 {
- failureLog(function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err).Fatal()
+ logError(testName, function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err)
+ return
+ }
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+ successLogger(testName, function, args, startTime).Info()
}
// Test concatenating multiple objects objects
func testCompose10KSourcesV2() {
// initialize logging params
startTime := time.Now()
- function := "testCompose10KSourcesV2(minioClient)"
+ testName := getFuncName()
+ function := "ComposeObject(destination, sourceList)"
args := map[string]interface{}{}
// Instantiate new minio client object
@@ -3971,7 +5140,8 @@ func testCompose10KSourcesV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
}
testComposeMultipleSources(c)
@@ -3980,15 +5150,17 @@ func testCompose10KSourcesV2() {
func testEncryptedCopyObjectWrapper(c *minio.Client) {
// initialize logging params
startTime := time.Now()
- function := "testEncryptedCopyObjectWrapper(minioClient)"
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
args := map[string]interface{}{}
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
// Make a new bucket in 'us-east-1' (source bucket).
err := c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
key1 := minio.NewSSEInfo([]byte("32byteslongsecretkeymustbegiven1"), "AES256")
@@ -3997,54 +5169,69 @@ func testEncryptedCopyObjectWrapper(c *minio.Client) {
// 1. create an sse-c encrypted object to copy by uploading
const srcSize = 1024 * 1024
buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
- metadata := make(map[string][]string)
+ metadata := make(map[string]string)
for k, v := range key1.GetSSEHeaders() {
- metadata[k] = append(metadata[k], v)
+ metadata[k] = v
}
- _, err = c.PutObjectWithSize(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), metadata, nil)
+ _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: metadata, Progress: nil})
if err != nil {
- failureLog(function, args, startTime, "", "PutObjectWithSize failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
}
// 2. copy object and change encryption key
src := minio.NewSourceInfo(bucketName, "srcObject", &key1)
+ args["source"] = src
dst, err := minio.NewDestinationInfo(bucketName, "dstObject", &key2, nil)
if err != nil {
- failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
}
+ args["destination"] = dst
err = c.CopyObject(dst, src)
if err != nil {
- failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "CopyObject failed", err)
+ return
}
// 3. get copied object and check if content is equal
- reqH := minio.NewGetReqHeaders()
+ opts := minio.GetObjectOptions{}
for k, v := range key2.GetSSEHeaders() {
- reqH.Set(k, v)
+ opts.Set(k, v)
}
coreClient := minio.Core{c}
- reader, _, err := coreClient.GetObject(bucketName, "dstObject", reqH)
+ reader, _, err := coreClient.GetObject(bucketName, "dstObject", opts)
if err != nil {
- failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
}
defer reader.Close()
decBytes, err := ioutil.ReadAll(reader)
if err != nil {
- failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
}
if !bytes.Equal(decBytes, buf) {
- failureLog(function, args, startTime, "", "Downloaded object mismatched for encrypted object", err).Fatal()
+ logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err)
+ return
}
- successLogger(function, args, startTime).Info()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
}
// Test encrypted copy object
func testEncryptedCopyObject() {
// initialize logging params
startTime := time.Now()
- function := "testEncryptedCopyObject()"
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
args := map[string]interface{}{}
// Instantiate new minio client object
@@ -4055,7 +5242,8 @@ func testEncryptedCopyObject() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
}
// c.TraceOn(os.Stderr)
@@ -4066,7 +5254,8 @@ func testEncryptedCopyObject() {
func testEncryptedCopyObjectV2() {
// initialize logging params
startTime := time.Now()
- function := "testEncryptedCopyObjectV2()"
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
args := map[string]interface{}{}
// Instantiate new minio client object
@@ -4077,7 +5266,8 @@ func testEncryptedCopyObjectV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
}
testEncryptedCopyObjectWrapper(c)
@@ -4086,7 +5276,8 @@ func testEncryptedCopyObjectV2() {
func testUserMetadataCopying() {
// initialize logging params
startTime := time.Now()
- function := "testUserMetadataCopying()"
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
args := map[string]interface{}{}
// Instantiate new minio client object
@@ -4097,7 +5288,8 @@ func testUserMetadataCopying() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
// c.TraceOn(os.Stderr)
@@ -4107,24 +5299,24 @@ func testUserMetadataCopying() {
func testUserMetadataCopyingWrapper(c *minio.Client) {
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "CopyObject(destination, source)"
- args := map[string]interface{}{
- "destination": "",
- "source": "",
- }
+ args := map[string]interface{}{}
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
// Make a new bucket in 'us-east-1' (source bucket).
err := c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
fetchMeta := func(object string) (h http.Header) {
- objInfo, err := c.StatObject(bucketName, object)
+ objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
}
h = make(http.Header)
for k, vs := range objInfo.Metadata {
@@ -4142,13 +5334,17 @@ func testUserMetadataCopyingWrapper(c *minio.Client) {
buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
metadata := make(http.Header)
metadata.Set("x-amz-meta-myheader", "myvalue")
- _, err = c.PutObjectWithMetadata(bucketName, "srcObject",
- bytes.NewReader(buf), metadata, nil)
+ m := make(map[string]string)
+ m["x-amz-meta-myheader"] = "myvalue"
+ _, err = c.PutObject(bucketName, "srcObject",
+ bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: m})
if err != nil {
- failureLog(function, args, startTime, "", "PutObjectWithMetadata failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObjectWithMetadata failed", err)
+ return
}
if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) {
- failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Metadata match failed", err)
+ return
}
// 2. create source
@@ -4156,46 +5352,49 @@ func testUserMetadataCopyingWrapper(c *minio.Client) {
// 2.1 create destination with metadata set
dst1, err := minio.NewDestinationInfo(bucketName, "dstObject-1", nil, map[string]string{"notmyheader": "notmyvalue"})
if err != nil {
- failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
}
// 3. Check that copying to an object with metadata set resets
// the headers on the copy.
- err = c.CopyObject(dst1, src)
- args["destination"] = dst1
args["source"] = src
-
+ args["destination"] = dst1
+ err = c.CopyObject(dst1, src)
if err != nil {
- failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "CopyObject failed", err)
+ return
}
expectedHeaders := make(http.Header)
expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) {
- failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Metadata match failed", err)
+ return
}
// 4. create destination with no metadata set and same source
dst2, err := minio.NewDestinationInfo(bucketName, "dstObject-2", nil, nil)
if err != nil {
- failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
-
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
}
src = minio.NewSourceInfo(bucketName, "srcObject", nil)
// 5. Check that copying to an object with no metadata set,
// copies metadata.
- err = c.CopyObject(dst2, src)
- args["destination"] = dst2
args["source"] = src
-
+ args["destination"] = dst2
+ err = c.CopyObject(dst2, src)
if err != nil {
- failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "CopyObject failed", err)
+ return
}
expectedHeaders = metadata
if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) {
- failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Metadata match failed", err)
+ return
}
// 6. Compose a pair of sources.
@@ -4205,21 +5404,23 @@ func testUserMetadataCopyingWrapper(c *minio.Client) {
}
dst3, err := minio.NewDestinationInfo(bucketName, "dstObject-3", nil, nil)
if err != nil {
- failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
}
- err = c.ComposeObject(dst3, srcs)
function = "ComposeObject(destination, sources)"
- args["destination"] = dst3
args["source"] = srcs
-
+ args["destination"] = dst3
+ err = c.ComposeObject(dst3, srcs)
if err != nil {
- failureLog(function, args, startTime, "", "ComposeObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ComposeObject failed", err)
+ return
}
// Check that no headers are copied in this case
if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) {
- failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Metadata match failed", err)
+ return
}
// 7. Compose a pair of sources with dest user metadata set.
@@ -4229,31 +5430,41 @@ func testUserMetadataCopyingWrapper(c *minio.Client) {
}
dst4, err := minio.NewDestinationInfo(bucketName, "dstObject-4", nil, map[string]string{"notmyheader": "notmyvalue"})
if err != nil {
- failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
}
- err = c.ComposeObject(dst4, srcs)
function = "ComposeObject(destination, sources)"
- args["destination"] = dst4
args["source"] = srcs
-
+ args["destination"] = dst4
+ err = c.ComposeObject(dst4, srcs)
if err != nil {
- failureLog(function, args, startTime, "", "ComposeObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ComposeObject failed", err)
+ return
}
// Check that no headers are copied in this case
expectedHeaders = make(http.Header)
expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) {
- failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Metadata match failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+
+ successLogger(testName, function, args, startTime).Info()
}
func testUserMetadataCopyingV2() {
// initialize logging params
startTime := time.Now()
- function := "testUserMetadataCopyingV2()"
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
args := map[string]interface{}{}
// Instantiate new minio client object
@@ -4264,23 +5475,252 @@ func testUserMetadataCopyingV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
+ return
}
// c.TraceOn(os.Stderr)
testUserMetadataCopyingWrapper(c)
}
+func testStorageClassMetadataPutObject() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "testStorageClassMetadataPutObject()"
+ args := map[string]interface{}{}
+ testName := getFuncName()
+
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err)
+ return
+ }
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ fetchMeta := func(object string) (h http.Header) {
+ objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ h = make(http.Header)
+ for k, vs := range objInfo.Metadata {
+ if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") {
+ for _, v := range vs {
+ h.Add(k, v)
+ }
+ }
+ }
+ return h
+ }
+
+ metadata := make(http.Header)
+ metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
+
+ emptyMetadata := make(http.Header)
+
+ const srcSize = 1024 * 1024
+ buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB
+
+ _, err = c.PutObject(bucketName, "srcObjectRRSClass",
+ bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // Get the returned metadata
+ returnedMeta := fetchMeta("srcObjectRRSClass")
+
+ // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways)
+ if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) {
+ logError(testName, function, args, startTime, "", "Metadata match failed", err)
+ return
+ }
+
+ metadata = make(http.Header)
+ metadata.Set("x-amz-storage-class", "STANDARD")
+
+ _, err = c.PutObject(bucketName, "srcObjectSSClass",
+ bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClass")) {
+ logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+func testStorageClassInvalidMetadataPutObject() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "testStorageClassInvalidMetadataPutObject()"
+ args := map[string]interface{}{}
+ testName := getFuncName()
+
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err)
+ return
+ }
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ const srcSize = 1024 * 1024
+ buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB
+
+ _, err = c.PutObject(bucketName, "srcObjectRRSClass",
+ bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "INVALID_STORAGE_CLASS"})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "PutObject with invalid storage class passed, was expected to fail", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+func testStorageClassMetadataCopyObject() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "testStorageClassMetadataCopyObject()"
+ args := map[string]interface{}{}
+ testName := getFuncName()
+
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err)
+ return
+ }
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ fetchMeta := func(object string) (h http.Header) {
+ objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ h = make(http.Header)
+ for k, vs := range objInfo.Metadata {
+ if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") {
+ for _, v := range vs {
+ h.Add(k, v)
+ }
+ }
+ }
+ return h
+ }
+
+ metadata := make(http.Header)
+ metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
+
+ emptyMetadata := make(http.Header)
+
+ const srcSize = 1024 * 1024
+ buf := bytes.Repeat([]byte("abcde"), srcSize)
+
+ // Put an object with RRS Storage class
+ _, err = c.PutObject(bucketName, "srcObjectRRSClass",
+ bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // Make server side copy of object uploaded in previous step
+ src := minio.NewSourceInfo(bucketName, "srcObjectRRSClass", nil)
+ dst, err := minio.NewDestinationInfo(bucketName, "srcObjectRRSClassCopy", nil, nil)
+ c.CopyObject(dst, src)
+
+ // Get the returned metadata
+ returnedMeta := fetchMeta("srcObjectRRSClassCopy")
+
+ // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways)
+ if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) {
+ logError(testName, function, args, startTime, "", "Metadata match failed", err)
+ return
+ }
+
+ metadata = make(http.Header)
+ metadata.Set("x-amz-storage-class", "STANDARD")
+
+ // Put an object with Standard Storage class
+ _, err = c.PutObject(bucketName, "srcObjectSSClass",
+ bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // Make server side copy of object uploaded in previous step
+ src = minio.NewSourceInfo(bucketName, "srcObjectSSClass", nil)
+ dst, err = minio.NewDestinationInfo(bucketName, "srcObjectSSClassCopy", nil, nil)
+ c.CopyObject(dst, src)
+
+ // Fetch the meta data of copied object
+ if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClassCopy")) {
+ logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
// Test put object with size -1 byte object.
func testPutObjectNoLengthV2() {
// initialize logging params
startTime := time.Now()
- function := "PutObjectWithSize(bucketName, objectName, reader, size, metadata, progress)"
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader, size, opts)"
args := map[string]interface{}{
"bucketName": "",
"objectName": "",
"size": -1,
- "metadata": nil,
+ "opts": "",
}
// Seed random based on current time.
@@ -4294,7 +5734,8 @@ func testPutObjectNoLengthV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -4304,55 +5745,56 @@ func testPutObjectNoLengthV2() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()),
- "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
objectName := bucketName + "unique"
args["objectName"] = objectName
- // Generate data using 4 parts so that all 3 'workers' are utilized and a part is leftover.
- // Use different data for each part for multipart tests to ensure part order at the end.
- var reader = getDataReader("datafile-65-MB", sixtyFiveMiB)
+ bufSize := dataFileMap["datafile-65-MB"]
+ var reader = getDataReader("datafile-65-MB")
defer reader.Close()
+ args["size"] = bufSize
// Upload an object.
- n, err := c.PutObjectWithSize(bucketName, objectName, reader, -1, nil, nil)
+ n, err := c.PutObject(bucketName, objectName, reader, -1, minio.PutObjectOptions{})
+
if err != nil {
- failureLog(function, args, startTime, "", "PutObjectWithSize failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err)
+ return
}
- if n != int64(sixtyFiveMiB) {
- failureLog(function, args, startTime, "", "Expected upload object size "+string(sixtyFiveMiB)+" got "+string(n), err).Fatal()
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Expected upload object size "+string(bufSize)+" got "+string(n), err)
+ return
}
- // Remove the object.
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- // Remove the bucket.
- err = c.RemoveBucket(bucketName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
- }
- successLogger(function, args, startTime).Info()
+ successLogger(testName, function, args, startTime).Info()
}
// Test put objects of unknown size.
func testPutObjectsUnknownV2() {
// initialize logging params
startTime := time.Now()
- function := "PutObjectStreaming(bucketName, objectName, reader)"
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader,size,opts)"
args := map[string]interface{}{
"bucketName": "",
"objectName": "",
+ "size": "",
+ "opts": "",
}
// Seed random based on current time.
@@ -4366,7 +5808,8 @@ func testPutObjectsUnknownV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -4376,14 +5819,14 @@ func testPutObjectsUnknownV2() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()),
- "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
// Issues are revealed by trying to upload multiple files of unknown size
@@ -4403,39 +5846,39 @@ func testPutObjectsUnknownV2() {
objectName := fmt.Sprintf("%sunique%d", bucketName, i)
args["objectName"] = objectName
- n, err := c.PutObjectStreaming(bucketName, objectName, rpipe)
+ n, err := c.PutObject(bucketName, objectName, rpipe, -1, minio.PutObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "PutObjectStreaming failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err)
+ return
}
+ args["size"] = n
if n != int64(4) {
- failureLog(function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(n), err).Fatal()
+ logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(n), err)
+ return
}
- // Remove the object.
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
- }
}
- // Remove the bucket.
- err = c.RemoveBucket(bucketName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- successLogger(function, args, startTime).Info()
+
+ successLogger(testName, function, args, startTime).Info()
}
// Test put object with 0 byte object.
func testPutObject0ByteV2() {
// initialize logging params
startTime := time.Now()
- function := "PutObjectWithSize(bucketName, objectName, reader, size, metadata, progress)"
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader, size, opts)"
args := map[string]interface{}{
"bucketName": "",
"objectName": "",
"size": 0,
- "metadata": nil,
+ "opts": "",
}
// Seed random based on current time.
@@ -4449,7 +5892,8 @@ func testPutObject0ByteV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
+ return
}
// Enable tracing, write to stderr.
@@ -4459,45 +5903,47 @@ func testPutObject0ByteV2() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()),
- "minio-go-test")
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
objectName := bucketName + "unique"
+ args["objectName"] = objectName
+ args["opts"] = minio.PutObjectOptions{}
// Upload an object.
- n, err := c.PutObjectWithSize(bucketName, objectName, bytes.NewReader([]byte("")), 0, nil, nil)
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{})
+
if err != nil {
- failureLog(function, args, startTime, "", "PutObjectWithSize failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err)
+ return
}
if n != 0 {
- failureLog(function, args, startTime, "", "Expected upload object size 0 but got "+string(n), err).Fatal()
+ logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(n), err)
+ return
}
- // Remove the object.
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
}
- // Remove the bucket.
- err = c.RemoveBucket(bucketName)
- if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
- }
- successLogger(function, args, startTime).Info()
+ successLogger(testName, function, args, startTime).Info()
}
// Test expected error cases
func testComposeObjectErrorCases() {
// initialize logging params
startTime := time.Now()
- function := "testComposeObjectErrorCases()"
+ testName := getFuncName()
+ function := "ComposeObject(destination, sourceList)"
args := map[string]interface{}{}
// Instantiate new minio client object
@@ -4508,7 +5954,8 @@ func testComposeObjectErrorCases() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
testComposeObjectErrorCasesWrapper(c)
@@ -4518,7 +5965,8 @@ func testComposeObjectErrorCases() {
func testCompose10KSources() {
// initialize logging params
startTime := time.Now()
- function := "testCompose10KSources()"
+ testName := getFuncName()
+ function := "ComposeObject(destination, sourceList)"
args := map[string]interface{}{}
// Instantiate new minio client object
@@ -4529,7 +5977,8 @@ func testCompose10KSources() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
}
testComposeMultipleSources(c)
@@ -4539,7 +5988,9 @@ func testCompose10KSources() {
func testFunctionalV2() {
// initialize logging params
startTime := time.Now()
+ testName := getFuncName()
function := "testFunctionalV2()"
+ function_all := ""
args := map[string]interface{}{}
// Seed random based on current time.
@@ -4552,7 +6003,8 @@ func testFunctionalV2() {
mustParseBool(os.Getenv(enableHTTPS)),
)
if err != nil {
- failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
+ return
}
// Enable to debug
@@ -4562,52 +6014,81 @@ func testFunctionalV2() {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ location := "us-east-1"
// Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
+ function = "MakeBucket(bucketName, location)"
+ function_all = "MakeBucket(bucketName, location)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "location": location,
+ }
+ err = c.MakeBucket(bucketName, location)
if err != nil {
- failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
// Generate a random file name.
fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
file, err := os.Create(fileName)
if err != nil {
- failureLog(function, args, startTime, "", "file create failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "file create failed", err)
+ return
}
for i := 0; i < 3; i++ {
buf := make([]byte, rand.Intn(1<<19))
_, err = file.Write(buf)
if err != nil {
- failureLog(function, args, startTime, "", "file write failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "file write failed", err)
+ return
}
}
file.Close()
// Verify if bucket exits and you have access.
var exists bool
+ function = "BucketExists(bucketName)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ }
exists, err = c.BucketExists(bucketName)
if err != nil {
- failureLog(function, args, startTime, "", "BucketExists failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "BucketExists failed", err)
+ return
}
if !exists {
- failureLog(function, args, startTime, "", "Could not find existing bucket "+bucketName, err).Fatal()
+ logError(testName, function, args, startTime, "", "Could not find existing bucket "+bucketName, err)
+ return
}
// Make the bucket 'public read/write'.
+ function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ "bucketPolicy": policy.BucketPolicyReadWrite,
+ }
err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite)
if err != nil {
- failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
+ return
}
// List all buckets.
+ function = "ListBuckets()"
+ function_all += ", " + function
+ args = nil
buckets, err := c.ListBuckets()
if len(buckets) == 0 {
- failureLog(function, args, startTime, "", "List buckets cannot be empty", err).Fatal()
+ logError(testName, function, args, startTime, "", "List buckets cannot be empty", err)
+ return
}
if err != nil {
- failureLog(function, args, startTime, "", "ListBuckets failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ListBuckets failed", err)
+ return
}
// Verify if previously created bucket is listed in list buckets.
@@ -4620,7 +6101,8 @@ func testFunctionalV2() {
// If bucket not found error out.
if !bucketFound {
- failureLog(function, args, startTime, "", "Bucket "+bucketName+"not found", err).Fatal()
+ logError(testName, function, args, startTime, "", "Bucket "+bucketName+"not found", err)
+ return
}
objectName := bucketName + "unique"
@@ -4628,21 +6110,32 @@ func testFunctionalV2() {
// Generate data
buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19))
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "")
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "contentType": "",
+ }
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
}
if n != int64(len(buf)) {
- failureLog(function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err).Fatal()
+ logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err)
+ return
}
- n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream")
+ objectName_noLength := objectName + "-nolength"
+ args["objectName"] = objectName_noLength
+ n, err = c.PutObject(bucketName, objectName_noLength, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
if err != nil {
- failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
}
if n != int64(len(buf)) {
- failureLog(function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err).Fatal()
+ logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err)
+ return
}
// Instantiate a done channel to close all listing.
@@ -4651,18 +6144,13 @@ func testFunctionalV2() {
objFound := false
isRecursive := true // Recursive is true.
- for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
- if obj.Key == objectName {
- objFound = true
- break
- }
- }
- if !objFound {
- failureLog(function, args, startTime, "", "Could not find existing object "+objectName, err).Fatal()
+ function = "ListObjects(bucketName, objectName, isRecursive, doneCh)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "isRecursive": isRecursive,
}
-
- objFound = false
- isRecursive = true // Recursive is true.
for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
if obj.Key == objectName {
objFound = true
@@ -4670,10 +6158,18 @@ func testFunctionalV2() {
}
}
if !objFound {
- failureLog(function, args, startTime, "", "Could not find existing object "+objectName, err).Fatal()
+ logError(testName, function, args, startTime, "", "Could not find existing object "+objectName, err)
+ return
}
incompObjNotFound := true
+ function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "isRecursive": isRecursive,
+ }
for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
if objIncompl.Key != "" {
incompObjNotFound = false
@@ -4681,106 +6177,164 @@ func testFunctionalV2() {
}
}
if !incompObjNotFound {
- failureLog(function, args, startTime, "", "Unexpected dangling incomplete upload found", err).Fatal()
+ logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err)
+ return
}
- newReader, err := c.GetObject(bucketName, objectName)
+ function = "GetObject(bucketName, objectName)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ }
+ newReader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
}
newReadBytes, err := ioutil.ReadAll(newReader)
if err != nil {
- failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
}
if !bytes.Equal(newReadBytes, buf) {
- failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal()
+ logError(testName, function, args, startTime, "", "Bytes mismatch", err)
+ return
}
- err = c.FGetObject(bucketName, objectName, fileName+"-f")
+ function = "FGetObject(bucketName, objectName, fileName)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "fileName": fileName + "-f",
+ }
+ err = c.FGetObject(bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "FgetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "FgetObject failed", err)
+ return
}
// Generate presigned HEAD object url.
+ function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "expires": 3600 * time.Second,
+ }
presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil)
if err != nil {
- failureLog(function, args, startTime, "", "PresignedHeadObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err)
+ return
}
// Verify if presigned url works.
resp, err := http.Head(presignedHeadURL.String())
if err != nil {
- failureLog(function, args, startTime, "", "PresignedHeadObject URL head request failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err)
+ return
}
if resp.StatusCode != http.StatusOK {
- failureLog(function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err)
+ return
}
if resp.Header.Get("ETag") == "" {
- failureLog(function, args, startTime, "", "Got empty ETag", err).Fatal()
+ logError(testName, function, args, startTime, "", "Got empty ETag", err)
+ return
}
resp.Body.Close()
// Generate presigned GET object url.
+ function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "expires": 3600 * time.Second,
+ }
presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
if err != nil {
- failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
+ return
}
// Verify if presigned url works.
resp, err = http.Get(presignedGetURL.String())
if err != nil {
- failureLog(function, args, startTime, "", "PresignedGetObject URL GET request failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedGetObject URL GET request failed", err)
+ return
}
if resp.StatusCode != http.StatusOK {
- failureLog(function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
+ return
}
newPresignedBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
- failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
}
resp.Body.Close()
if !bytes.Equal(newPresignedBytes, buf) {
- failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal()
+ logError(testName, function, args, startTime, "", "Bytes mismatch", err)
+ return
}
// Set request parameters.
reqParams := make(url.Values)
reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
// Generate presigned GET object url.
+ args["reqParams"] = reqParams
presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
if err != nil {
- failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
+ return
}
// Verify if presigned url works.
resp, err = http.Get(presignedGetURL.String())
if err != nil {
- failureLog(function, args, startTime, "", "PresignedGetObject URL GET request failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedGetObject URL GET request failed", err)
+ return
}
if resp.StatusCode != http.StatusOK {
- failureLog(function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
+ return
}
newPresignedBytes, err = ioutil.ReadAll(resp.Body)
if err != nil {
- failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
}
if !bytes.Equal(newPresignedBytes, buf) {
- failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal()
+ logError(testName, function, args, startTime, "", "Bytes mismatch", err)
+ return
}
// Verify content disposition.
if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
- failureLog(function, args, startTime, "", "wrong Content-Disposition received ", err).Fatal()
+ logError(testName, function, args, startTime, "", "wrong Content-Disposition received ", err)
+ return
}
+ function = "PresignedPutObject(bucketName, objectName, expires)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName + "-presigned",
+ "expires": 3600 * time.Second,
+ }
presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
if err != nil {
- failureLog(function, args, startTime, "", "PresignedPutObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PresignedPutObject failed", err)
+ return
}
+
// Generate data more than 32K
buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024)
req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
if err != nil {
- failureLog(function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err)
+ return
}
httpClient := &http.Client{
// Setting a sensible time out of 30secs to wait for response
@@ -4791,57 +6345,524 @@ func testFunctionalV2() {
}
resp, err = httpClient.Do(req)
if err != nil {
- failureLog(function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err)
+ return
}
- newReader, err = c.GetObject(bucketName, objectName+"-presigned")
+ function = "GetObject(bucketName, objectName)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName + "-presigned",
+ }
+ newReader, err = c.GetObject(bucketName, objectName+"-presigned", minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
}
newReadBytes, err = ioutil.ReadAll(newReader)
if err != nil {
- failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
}
if !bytes.Equal(newReadBytes, buf) {
- failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal()
+ logError(testName, function, args, startTime, "", "Bytes mismatch", err)
+ return
}
- err = c.RemoveObject(bucketName, objectName)
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ if err = os.Remove(fileName); err != nil {
+ logError(testName, function, args, startTime, "", "File remove failed", err)
+ return
+ }
+ if err = os.Remove(fileName + "-f"); err != nil {
+ logError(testName, function, args, startTime, "", "File removes failed", err)
+ return
+ }
+ successLogger(testName, function_all, args, startTime).Info()
+}
+
+// Test get object with GetObjectWithContext
+func testGetObjectWithContext() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObjectWithContext(ctx, bucketName, objectName)"
+ args := map[string]interface{}{
+ "ctx": "",
+ "bucketName": "",
+ "objectName": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "Minio client v4 object creation failed", err)
+ return
}
- err = c.RemoveObject(bucketName, objectName+"-f")
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
}
- err = c.RemoveObject(bucketName, objectName+"-nolength")
+
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
}
- err = c.RemoveObject(bucketName, objectName+"-presigned")
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ args["ctx"] = ctx
+ defer cancel()
+
+ r, err := c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObjectWithContext failed unexpectedly", err)
+ return
}
- err = c.RemoveBucket(bucketName)
+ if _, err = r.Stat(); err == nil {
+ logError(testName, function, args, startTime, "", "GetObjectWithContext should fail on short timeout", err)
+ return
+ }
+
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ args["ctx"] = ctx
+ defer cancel()
+
+ // Read the data back
+ r, err = c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{})
if err != nil {
- failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ logError(testName, function, args, startTime, "", "GetObjectWithContext failed", err)
+ return
}
- err = c.RemoveBucket(bucketName)
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "object Stat call failed", err)
+ return
+ }
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes in stat does not match: want "+string(bufSize)+", got"+string(st.Size), err)
+ return
+ }
+ if err := r.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "object Close() call failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+
+}
+
+// Test get object with FGetObjectWithContext
+func testFGetObjectWithContext() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "FGetObjectWithContext(ctx, bucketName, objectName, fileName)"
+ args := map[string]interface{}{
+ "ctx": "",
+ "bucketName": "",
+ "objectName": "",
+ "fileName": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client v4 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ bufSize := dataFileMap["datafile-1-MB"]
+ var reader = getDataReader("datafile-1-MB")
+ defer reader.Close()
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ args["ctx"] = ctx
+ defer cancel()
+
+ fileName := "tempfile-context"
+ args["fileName"] = fileName
+ // Read the data back
+ err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
if err == nil {
- failureLog(function, args, startTime, "", "RemoveBucket should fail as bucket does not exist", err).Fatal()
+ logError(testName, function, args, startTime, "", "FGetObjectWithContext should fail on short timeout", err)
+ return
}
- if err.Error() != "The specified bucket does not exist" {
- failureLog(function, args, startTime, "", "RemoveBucket failed with wrong error message", err).Fatal()
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ defer cancel()
+
+ // Read the data back
+ err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FGetObjectWithContext with long timeout failed", err)
+ return
}
- if err = os.Remove(fileName); err != nil {
- failureLog(function, args, startTime, "", "File remove failed", err).Fatal()
+ if err = os.Remove(fileName + "-fcontext"); err != nil {
+ logError(testName, function, args, startTime, "", "Remove file failed", err)
+ return
}
- if err = os.Remove(fileName + "-f"); err != nil {
- failureLog(function, args, startTime, "", "File removes failed", err).Fatal()
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+
+}
+
+// Test validates putObject with context to see if request cancellation is honored for V2.
+func testPutObjectWithContextV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObjectWithContext(ctx, bucketName, objectName, reader, size, opts)"
+ args := map[string]interface{}{
+ "ctx": "",
+ "bucketName": "",
+ "objectName": "",
+ "size": "",
+ "opts": "",
+ }
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Make a new bucket.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ defer c.RemoveBucket(bucketName)
+ bufSize := dataFileMap["datatfile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
+ args["objectName"] = objectName
+
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ args["ctx"] = ctx
+ args["size"] = bufSize
+ defer cancel()
+
+ _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectWithContext with short timeout failed", err)
+ return
+ }
+
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ args["ctx"] = ctx
+
+ defer cancel()
+ reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+ _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectWithContext with long timeout failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+
+}
+
+// Test get object with GetObjectWithContext
+func testGetObjectWithContextV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObjectWithContext(ctx, bucketName, objectName)"
+ args := map[string]interface{}{
+ "ctx": "",
+ "bucketName": "",
+ "objectName": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ args["ctx"] = ctx
+ defer cancel()
+
+ r, err := c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObjectWithContext failed unexpectedly", err)
+ return
}
- successLogger(function, args, startTime).Info()
+ if _, err = r.Stat(); err == nil {
+ logError(testName, function, args, startTime, "", "GetObjectWithContext should fail on short timeout", err)
+ return
+ }
+
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ defer cancel()
+
+ // Read the data back
+ r, err = c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObjectWithContext shouldn't fail on longer timeout", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "object Stat call failed", err)
+ return
+ }
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(bufSize)+" got "+string(st.Size), err)
+ return
+ }
+ if err := r.Close(); err != nil {
+ logError(testName, function, args, startTime, "", " object Close() call failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+
+}
+
+// Test get object with FGetObjectWithContext
+func testFGetObjectWithContextV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "FGetObjectWithContext(ctx, bucketName, objectName,fileName)"
+ args := map[string]interface{}{
+ "ctx": "",
+ "bucketName": "",
+ "objectName": "",
+ "fileName": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket call failed", err)
+ return
+ }
+
+ bufSize := dataFileMap["datatfile-1-MB"]
+ var reader = getDataReader("datafile-1-MB")
+ defer reader.Close()
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ args["ctx"] = ctx
+ defer cancel()
+
+ fileName := "tempfile-context"
+ args["fileName"] = fileName
+
+ // Read the data back
+ err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "FGetObjectWithContext should fail on short timeout", err)
+ return
+ }
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ defer cancel()
+
+ // Read the data back
+ err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FGetObjectWithContext call shouldn't fail on long timeout", err)
+ return
+ }
+
+ if err = os.Remove(fileName + "-fcontext"); err != nil {
+ logError(testName, function, args, startTime, "", "Remove file failed", err)
+ return
+ }
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+
}
// Convert string to bool and always return false if any error
@@ -4862,8 +6883,10 @@ func main() {
log.SetFormatter(&mintFormatter)
// log Info or above -- success cases are Info level, failures are Fatal level
log.SetLevel(log.InfoLevel)
+
+ tls := mustParseBool(os.Getenv(enableHTTPS))
// execute tests
- if !isQuickMode() {
+ if isFullMode() {
testMakeBucketErrorV2()
testGetObjectClosedTwiceV2()
testRemovePartiallyUploadedV2()
@@ -4875,11 +6898,14 @@ func main() {
testFunctionalV2()
testComposeObjectErrorCasesV2()
testCompose10KSourcesV2()
- testEncryptedCopyObjectV2()
testUserMetadataCopyingV2()
testPutObject0ByteV2()
testPutObjectNoLengthV2()
testPutObjectsUnknownV2()
+ testGetObjectWithContextV2()
+ testFPutObjectWithContextV2()
+ testFGetObjectWithContextV2()
+ testPutObjectWithContextV2()
testMakeBucketError()
testMakeBucketRegions()
testPutObjectWithMetadata()
@@ -4897,14 +6923,27 @@ func main() {
testPresignedPostPolicy()
testCopyObject()
testEncryptionPutGet()
+ testEncryptionFPut()
testComposeObjectErrorCases()
testCompose10KSources()
testUserMetadataCopying()
- testEncryptedCopyObject()
testBucketNotification()
testFunctional()
- testGetObjectObjectModified()
+ testGetObjectModified()
testPutObjectUploadSeekedObject()
+ testGetObjectWithContext()
+ testFPutObjectWithContext()
+ testFGetObjectWithContext()
+ testPutObjectWithContext()
+ testStorageClassMetadataPutObject()
+ testStorageClassInvalidMetadataPutObject()
+ testStorageClassMetadataCopyObject()
+
+ // SSE-C tests will only work over TLS connection.
+ if tls {
+ testEncryptedCopyObjectV2()
+ testEncryptedCopyObject()
+ }
} else {
testFunctional()
testFunctionalV2()
diff --git a/vendor/github.com/minio/minio-go/request-headers_test.go b/vendor/github.com/minio/minio-go/get-options_test.go
index f026cd0a2..c5344a0c6 100644
--- a/vendor/github.com/minio/minio-go/request-headers_test.go
+++ b/vendor/github.com/minio/minio-go/get-options_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -40,17 +41,17 @@ func TestSetHeader(t *testing.T) {
{1, -5, fmt.Errorf("Invalid range specified: start=1 end=-5"), ""},
}
for i, testCase := range testCases {
- rh := NewGetReqHeaders()
- err := rh.SetRange(testCase.start, testCase.end)
+ opts := GetObjectOptions{}
+ err := opts.SetRange(testCase.start, testCase.end)
if err == nil && testCase.errVal != nil {
t.Errorf("Test %d: Expected to fail with '%v' but it passed",
i+1, testCase.errVal)
} else if err != nil && testCase.errVal.Error() != err.Error() {
t.Errorf("Test %d: Expected error '%v' but got error '%v'",
i+1, testCase.errVal, err)
- } else if err == nil && rh.Get("Range") != testCase.expected {
+ } else if err == nil && opts.headers["Range"] != testCase.expected {
t.Errorf("Test %d: Expected range header '%s', but got '%s'",
- i+1, testCase.expected, rh.Get("Range"))
+ i+1, testCase.expected, opts.headers["Range"])
}
}
}
diff --git a/vendor/github.com/minio/minio-go/hook-reader.go b/vendor/github.com/minio/minio-go/hook-reader.go
index bc9ece049..8f32291d4 100644
--- a/vendor/github.com/minio/minio-go/hook-reader.go
+++ b/vendor/github.com/minio/minio-go/hook-reader.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/pkg/credentials/chain.go
index 6b0e57440..e29826f48 100644
--- a/vendor/github.com/minio/minio-go/pkg/credentials/chain.go
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/chain.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,8 +17,6 @@
package credentials
-import "fmt"
-
// A Chain will search for a provider which returns credentials
// and cache that provider until Retrieve is called again.
//
@@ -27,11 +25,11 @@ import "fmt"
// Providers in the list.
//
// If none of the Providers retrieve valid credentials Value, ChainProvider's
-// Retrieve() will return the error, collecting all errors from all providers.
+// Retrieve() will return the no credentials value.
//
// If a Provider is found which returns valid credentials Value ChainProvider
// will cache that Provider for all calls to IsExpired(), until Retrieve is
-// called again.
+// called again after IsExpired() is true.
//
// creds := credentials.NewChainCredentials(
// []credentials.Provider{
@@ -58,28 +56,30 @@ func NewChainCredentials(providers []Provider) *Credentials {
})
}
-// Retrieve returns the credentials value or error if no provider returned
-// without error.
+// Retrieve returns the credentials value, returns no credentials(anonymous)
+// if no credentials provider returned any value.
//
-// If a provider is found it will be cached and any calls to IsExpired()
-// will return the expired state of the cached provider.
+// If a provider is found with credentials, it will be cached and any calls
+// to IsExpired() will return the expired state of the cached provider.
func (c *Chain) Retrieve() (Value, error) {
- var errs []error
for _, p := range c.Providers {
- creds, err := p.Retrieve()
- if err != nil {
- errs = append(errs, err)
+ creds, _ := p.Retrieve()
+ // Always prioritize non-anonymous providers, if any.
+ if creds.AccessKeyID == "" && creds.SecretAccessKey == "" {
continue
- } // Success.
+ }
c.curr = p
return creds, nil
}
- c.curr = nil
- return Value{}, fmt.Errorf("No valid providers found %v", errs)
+ // At this point we have exhausted all the providers and
+ // are left without any credentials return anonymous.
+ return Value{
+ SignerType: SignatureAnonymous,
+ }, nil
}
// IsExpired will returned the expired state of the currently cached provider
-// if there is one. If there is no current provider, true will be returned.
+// if there is one. If there is no current provider, true will be returned.
func (c *Chain) IsExpired() bool {
if c.curr != nil {
return c.curr.IsExpired()
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/chain_test.go b/vendor/github.com/minio/minio-go/pkg/credentials/chain_test.go
index cb5a6dda5..d26e376ff 100644
--- a/vendor/github.com/minio/minio-go/pkg/credentials/chain_test.go
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/chain_test.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -76,7 +76,14 @@ func TestChainGet(t *testing.T) {
}
func TestChainIsExpired(t *testing.T) {
- credProvider := &credProvider{expired: true}
+ credProvider := &credProvider{
+ creds: Value{
+ AccessKeyID: "UXHW",
+ SecretAccessKey: "MYSECRET",
+ SessionToken: "",
+ },
+ expired: true,
+ }
p := &Chain{
Providers: []Provider{
credProvider,
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go
index cc3000532..4bfdad413 100644
--- a/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/credentials_test.go b/vendor/github.com/minio/minio-go/pkg/credentials/credentials_test.go
index cbfb673b7..92c77c4cb 100644
--- a/vendor/github.com/minio/minio-go/pkg/credentials/credentials_test.go
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/credentials_test.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/pkg/credentials/doc.go
index fa1908aeb..c48784ba8 100644
--- a/vendor/github.com/minio/minio-go/pkg/credentials/doc.go
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/doc.go
@@ -1,3 +1,20 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
// Package credentials provides credential retrieval and management
// for S3 compatible object storage.
//
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go
index 11934433c..f9b2cc33a 100644
--- a/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go
index 791087ef5..d72e77185 100644
--- a/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/env_test.go b/vendor/github.com/minio/minio-go/pkg/credentials/env_test.go
index 2f72bea40..09cd77f7a 100644
--- a/vendor/github.com/minio/minio-go/pkg/credentials/env_test.go
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/env_test.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go
index 1be621385..5ad68303a 100644
--- a/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -22,7 +22,7 @@ import (
"path/filepath"
"github.com/go-ini/ini"
- homedir "github.com/minio/go-homedir"
+ homedir "github.com/mitchellh/go-homedir"
)
// A FileAWSCredentials retrieves credentials from the current user's home
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go
index 9e26dd302..c282c2a2c 100644
--- a/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ import (
"path/filepath"
"runtime"
- homedir "github.com/minio/go-homedir"
+ homedir "github.com/mitchellh/go-homedir"
)
// A FileMinioClient retrieves credentials from the current user's home
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/file_test.go b/vendor/github.com/minio/minio-go/pkg/credentials/file_test.go
index c62c53365..c85c10494 100644
--- a/vendor/github.com/minio/minio-go/pkg/credentials/file_test.go
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/file_test.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go
index b862cf538..637df7466 100644
--- a/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -46,18 +46,6 @@ type IAM struct {
endpoint string
}
-// redirectHeaders copies all headers when following a redirect URL.
-// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800)
-func redirectHeaders(req *http.Request, via []*http.Request) error {
- if len(via) == 0 {
- return nil
- }
- for key, val := range via[0].Header {
- req.Header[key] = val
- }
- return nil
-}
-
// IAM Roles for Amazon EC2
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
const (
@@ -74,8 +62,7 @@ func NewIAM(endpoint string) *Credentials {
}
p := &IAM{
Client: &http.Client{
- Transport: http.DefaultTransport,
- CheckRedirect: redirectHeaders,
+ Transport: http.DefaultTransport,
},
endpoint: endpoint,
}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws_test.go b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws_test.go
index 3e5ad3ec0..86ea66bf6 100644
--- a/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws_test.go
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws_test.go
@@ -1,3 +1,20 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package credentials
import (
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go b/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go
index c64ad6c23..1b768e8c3 100644
--- a/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/pkg/credentials/static.go
index 25aff5696..8b0ba711c 100644
--- a/vendor/github.com/minio/minio-go/pkg/credentials/static.go
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/static.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/static_test.go b/vendor/github.com/minio/minio-go/pkg/credentials/static_test.go
index 491b1554b..f1d2d856c 100644
--- a/vendor/github.com/minio/minio-go/pkg/credentials/static_test.go
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/static_test.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go b/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go
index be45e52f4..b0f2d6e08 100644
--- a/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go
+++ b/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go b/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go
index 8b8554336..482922ab7 100644
--- a/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go
+++ b/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go b/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go
index 8814845e3..0ed95f5ff 100644
--- a/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go
+++ b/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go
index 078bcd1db..737b810ac 100644
--- a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go
+++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition_test.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition_test.go
index 419868f38..9e4aa8fb6 100644
--- a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition_test.go
+++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go
index b2d46e178..9dda99efc 100644
--- a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go
+++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy_test.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy_test.go
index b1862c639..1e5196f7c 100644
--- a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy_test.go
+++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go
index d831436cd..156a6d63a 100644
--- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -32,7 +33,6 @@ import (
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming
const (
streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
- streamingEncoding = "aws-chunked"
streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD"
emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
payloadChunkSize = 64 * 1024
@@ -99,9 +99,8 @@ func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int
if sessionToken != "" {
req.Header.Set("X-Amz-Security-Token", sessionToken)
}
- req.Header.Add("Content-Encoding", streamingEncoding)
- req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
+ req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
// Set content length with streaming signature for each chunk included.
req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize))
req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10))
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming_test.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming_test.go
index 1f49f2234..297ab97be 100644
--- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming_test.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -32,7 +33,7 @@ func TestGetSeedSignature(t *testing.T) {
req := NewRequest("PUT", "/examplebucket/chunkObject.txt", body)
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
- req.URL.Host = "s3.amazonaws.com"
+ req.Host = "s3.amazonaws.com"
reqTime, err := time.Parse("20060102T150405Z", "20130524T000000Z")
if err != nil {
@@ -42,7 +43,7 @@ func TestGetSeedSignature(t *testing.T) {
req = StreamingSignV4(req, accessKeyID, secretAccessKeyID, "", "us-east-1", int64(dataLen), reqTime)
actualSeedSignature := req.Body.(*StreamingReader).seedSignature
- expectedSeedSignature := "007480502de61457e955731b0f5d191f7e6f54a8a0f6cc7974a5ebd887965686"
+ expectedSeedSignature := "38cab3af09aa15ddf29e26e36236f60fb6bfb6243a20797ae9a8183674526079"
if actualSeedSignature != expectedSeedSignature {
t.Errorf("Expected %s but received %s", expectedSeedSignature, actualSeedSignature)
}
@@ -68,13 +69,14 @@ func TestSetStreamingAuthorization(t *testing.T) {
req := NewRequest("PUT", "/examplebucket/chunkObject.txt", nil)
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
+ req.Host = ""
req.URL.Host = "s3.amazonaws.com"
dataLen := int64(65 * 1024)
reqTime, _ := time.Parse(iso8601DateFormat, "20130524T000000Z")
req = StreamingSignV4(req, accessKeyID, secretAccessKeyID, "", location, dataLen, reqTime)
- expectedAuthorization := "AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,SignedHeaders=content-encoding;host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length;x-amz-storage-class,Signature=007480502de61457e955731b0f5d191f7e6f54a8a0f6cc7974a5ebd887965686"
+ expectedAuthorization := "AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length;x-amz-storage-class,Signature=38cab3af09aa15ddf29e26e36236f60fb6bfb6243a20797ae9a8183674526079"
actualAuthorization := req.Header.Get("Authorization")
if actualAuthorization != expectedAuthorization {
@@ -92,6 +94,7 @@ func TestStreamingReader(t *testing.T) {
req := NewRequest("PUT", "/examplebucket/chunkObject.txt", nil)
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
req.ContentLength = 65 * 1024
+ req.Host = ""
req.URL.Host = "s3.amazonaws.com"
baseReader := ioutil.NopCloser(bytes.NewReader(bytes.Repeat([]byte("a"), 65*1024)))
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go
index 39c4e0187..0b90c41f6 100644
--- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -39,22 +40,23 @@ const (
)
// Encode input URL path to URL encoded path.
-func encodeURL2Path(u *url.URL) (path string) {
+func encodeURL2Path(req *http.Request) (path string) {
+ reqHost := getHostAddr(req)
// Encode URL path.
- if isS3, _ := filepath.Match("*.s3*.amazonaws.com", u.Host); isS3 {
- bucketName := u.Host[:strings.LastIndex(u.Host, ".s3")]
+ if isS3, _ := filepath.Match("*.s3*.amazonaws.com", reqHost); isS3 {
+ bucketName := reqHost[:strings.LastIndex(reqHost, ".s3")]
path = "/" + bucketName
- path += u.Path
+ path += req.URL.Path
path = s3utils.EncodePath(path)
return
}
- if strings.HasSuffix(u.Host, ".storage.googleapis.com") {
- path = "/" + strings.TrimSuffix(u.Host, ".storage.googleapis.com")
- path += u.Path
+ if strings.HasSuffix(reqHost, ".storage.googleapis.com") {
+ path = "/" + strings.TrimSuffix(reqHost, ".storage.googleapis.com")
+ path += req.URL.Path
path = s3utils.EncodePath(path)
return
}
- path = s3utils.EncodePath(u.Path)
+ path = s3utils.EncodePath(req.URL.Path)
return
}
@@ -76,7 +78,7 @@ func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in
}
// Get presigned string to sign.
- stringToSign := preStringifyHTTPReq(req)
+ stringToSign := preStringToSignV2(req)
hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(stringToSign))
@@ -85,7 +87,7 @@ func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in
query := req.URL.Query()
// Handle specially for Google Cloud Storage.
- if strings.Contains(req.URL.Host, ".storage.googleapis.com") {
+ if strings.Contains(getHostAddr(&req), ".storage.googleapis.com") {
query.Set("GoogleAccessId", accessKeyID)
} else {
query.Set("AWSAccessKeyId", accessKeyID)
@@ -145,7 +147,7 @@ func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request
}
// Calculate HMAC for secretAccessKey.
- stringToSign := stringifyHTTPReq(req)
+ stringToSign := stringToSignV2(req)
hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(stringToSign))
@@ -170,15 +172,14 @@ func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request
// Expires + "\n" +
// CanonicalizedProtocolHeaders +
// CanonicalizedResource;
-func preStringifyHTTPReq(req http.Request) string {
+func preStringToSignV2(req http.Request) string {
buf := new(bytes.Buffer)
// Write standard headers.
writePreSignV2Headers(buf, req)
// Write canonicalized protocol headers if any.
writeCanonicalizedHeaders(buf, req)
// Write canonicalized Query resources if any.
- isPreSign := true
- writeCanonicalizedResource(buf, req, isPreSign)
+ writeCanonicalizedResource(buf, req)
return buf.String()
}
@@ -198,15 +199,14 @@ func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) {
// Date + "\n" +
// CanonicalizedProtocolHeaders +
// CanonicalizedResource;
-func stringifyHTTPReq(req http.Request) string {
+func stringToSignV2(req http.Request) string {
buf := new(bytes.Buffer)
// Write standard headers.
writeSignV2Headers(buf, req)
// Write canonicalized protocol headers if any.
writeCanonicalizedHeaders(buf, req)
// Write canonicalized Query resources if any.
- isPreSign := false
- writeCanonicalizedResource(buf, req, isPreSign)
+ writeCanonicalizedResource(buf, req)
return buf.String()
}
@@ -253,17 +253,27 @@ func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
}
}
-// The following list is already sorted and should always be, otherwise we could
-// have signature-related issues
+// AWS S3 Signature V2 calculation rule is give here:
+// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign
+
+// Whitelist resource list that will be used in query string for signature-V2 calculation.
+// The list should be alphabetically sorted
var resourceList = []string{
"acl",
"delete",
+ "lifecycle",
"location",
"logging",
"notification",
"partNumber",
"policy",
"requestPayment",
+ "response-cache-control",
+ "response-content-disposition",
+ "response-content-encoding",
+ "response-content-language",
+ "response-content-type",
+ "response-expires",
"torrent",
"uploadId",
"uploads",
@@ -278,22 +288,11 @@ var resourceList = []string{
// CanonicalizedResource = [ "/" + Bucket ] +
// <HTTP-Request-URI, from the protocol name up to the query string> +
// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
-func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, isPreSign bool) {
+func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) {
// Save request URL.
requestURL := req.URL
// Get encoded URL path.
- path := encodeURL2Path(requestURL)
- if isPreSign {
- // Get encoded URL path.
- if len(requestURL.Query()) > 0 {
- // Keep the usual queries unescaped for string to sign.
- query, _ := url.QueryUnescape(s3utils.QueryEncode(requestURL.Query()))
- path = path + "?" + query
- }
- buf.WriteString(path)
- return
- }
- buf.WriteString(path)
+ buf.WriteString(encodeURL2Path(&req))
if requestURL.RawQuery != "" {
var n int
vals, _ := url.ParseQuery(requestURL.RawQuery)
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2_test.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2_test.go
index 3c0e0ecea..042b6e65c 100644
--- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2_test.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go
index 0d75dc162..daf02fedf 100644
--- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -143,7 +144,7 @@ func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) strin
buf.WriteByte(':')
switch {
case k == "host":
- buf.WriteString(req.URL.Host)
+ buf.WriteString(getHostAddr(&req))
fallthrough
default:
for idx, v := range vals[k] {
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4_test.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4_test.go
new file mode 100644
index 000000000..a109a4f2a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4_test.go
@@ -0,0 +1,50 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package s3signer
+
+import (
+ "io"
+ "net/http"
+ "strings"
+ "testing"
+)
+
+func TestRequestHost(t *testing.T) {
+ req, _ := buildRequest("dynamodb", "us-east-1", "{}")
+ req.URL.RawQuery = "Foo=z&Foo=o&Foo=m&Foo=a"
+ req.Host = "myhost"
+ canonicalHeaders := getCanonicalHeaders(*req, v4IgnoredHeaders)
+
+ if !strings.Contains(canonicalHeaders, "host:"+req.Host) {
+ t.Errorf("canonical host header invalid")
+ }
+}
+
+func buildRequest(serviceName, region, body string) (*http.Request, io.ReadSeeker) {
+ endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
+ reader := strings.NewReader(body)
+ req, _ := http.NewRequest("POST", endpoint, reader)
+ req.URL.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()"
+ req.Header.Add("X-Amz-Target", "prefix.Operation")
+ req.Header.Add("Content-Type", "application/x-amz-json-1.0")
+ req.Header.Add("Content-Length", string(len(body)))
+ req.Header.Add("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* (+)")
+ req.Header.Add("X-Amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)")
+ req.Header.Add("X-amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)")
+ return req, reader
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go
index 85ff063df..d53483e4e 100644
--- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/test-utils_test.go b/vendor/github.com/minio/minio-go/pkg/s3signer/test-utils_test.go
index 049e5813d..cf96d66c8 100644
--- a/vendor/github.com/minio/minio-go/pkg/s3signer/test-utils_test.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/test-utils_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go b/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go
index 0619b3082..33b175208 100644
--- a/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,6 +20,7 @@ package s3signer
import (
"crypto/hmac"
"crypto/sha256"
+ "net/http"
)
// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
@@ -37,3 +39,11 @@ func sumHMAC(key []byte, data []byte) []byte {
hash.Write(data)
return hash.Sum(nil)
}
+
+// getHostAddr returns host header if available, otherwise returns host from URL
+func getHostAddr(req *http.Request) string {
+ if req.Host != "" {
+ return req.Host
+ }
+ return req.URL.Host
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/utils_test.go b/vendor/github.com/minio/minio-go/pkg/s3signer/utils_test.go
index 26f609013..407eddab3 100644
--- a/vendor/github.com/minio/minio-go/pkg/s3signer/utils_test.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/utils_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,6 +19,7 @@ package s3signer
import (
"fmt"
+ "net/http"
"net/url"
"testing"
)
@@ -65,7 +67,7 @@ func TestEncodeURL2Path(t *testing.T) {
t.Fatal("Error:", err)
}
urlPath := "/" + bucketName + "/" + o.encodedObjName
- if urlPath != encodeURL2Path(u) {
+ if urlPath != encodeURL2Path(&http.Request{URL: u}) {
t.Fatal("Error")
}
}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go
index bdc8d4e91..bfeb73e41 100644
--- a/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -80,18 +81,56 @@ func IsVirtualHostSupported(endpointURL url.URL, bucketName string) bool {
return IsAmazonEndpoint(endpointURL) || IsGoogleEndpoint(endpointURL)
}
-// AmazonS3Host - regular expression used to determine if an arg is s3 host.
-var AmazonS3Host = regexp.MustCompile("^s3[.-]?(.*?)\\.amazonaws\\.com$")
+// Refer for region styles - https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
-// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint.
-func IsAmazonEndpoint(endpointURL url.URL) bool {
- if IsAmazonChinaEndpoint(endpointURL) {
- return true
+// amazonS3HostHyphen - regular expression used to determine if an arg is s3 host in hyphenated style.
+var amazonS3HostHyphen = regexp.MustCompile(`^s3-(.*?)\.amazonaws\.com$`)
+
+// amazonS3HostDualStack - regular expression used to determine if an arg is s3 host dualstack.
+var amazonS3HostDualStack = regexp.MustCompile(`^s3\.dualstack\.(.*?)\.amazonaws\.com$`)
+
+// amazonS3HostDot - regular expression used to determine if an arg is s3 host in . style.
+var amazonS3HostDot = regexp.MustCompile(`^s3\.(.*?)\.amazonaws\.com$`)
+
+// amazonS3ChinaHost - regular expression used to determine if the arg is s3 china host.
+var amazonS3ChinaHost = regexp.MustCompile(`^s3\.(cn.*?)\.amazonaws\.com\.cn$`)
+
+// GetRegionFromURL - returns a region from url host.
+func GetRegionFromURL(endpointURL url.URL) string {
+ if endpointURL == sentinelURL {
+ return ""
+ }
+ if endpointURL.Host == "s3-external-1.amazonaws.com" {
+ return ""
}
if IsAmazonGovCloudEndpoint(endpointURL) {
+ return "us-gov-west-1"
+ }
+ parts := amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ return ""
+}
+
+// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint.
+func IsAmazonEndpoint(endpointURL url.URL) bool {
+ if endpointURL.Host == "s3-external-1.amazonaws.com" || endpointURL.Host == "s3.amazonaws.com" {
return true
}
- return AmazonS3Host.MatchString(endpointURL.Host)
+ return GetRegionFromURL(endpointURL) != ""
}
// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint.
@@ -111,19 +150,6 @@ func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool {
return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com"
}
-// IsAmazonChinaEndpoint - Match if it is exactly Amazon S3 China endpoint.
-// Customers who wish to use the new Beijing Region are required
-// to sign up for a separate set of account credentials unique to
-// the China (Beijing) Region. Customers with existing AWS credentials
-// will not be able to access resources in the new Region, and vice versa.
-// For more info https://aws.amazon.com/about-aws/whats-new/2013/12/18/announcing-the-aws-china-beijing-region/
-func IsAmazonChinaEndpoint(endpointURL url.URL) bool {
- if endpointURL == sentinelURL {
- return false
- }
- return endpointURL.Host == "s3.cn-north-1.amazonaws.com.cn"
-}
-
// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint.
func IsGoogleEndpoint(endpointURL url.URL) bool {
if endpointURL == sentinelURL {
diff --git a/vendor/github.com/minio/minio-go/pkg/s3utils/utils_test.go b/vendor/github.com/minio/minio-go/pkg/s3utils/utils_test.go
index d3b4d4331..55eaaeacf 100644
--- a/vendor/github.com/minio/minio-go/pkg/s3utils/utils_test.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3utils/utils_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -22,6 +23,66 @@ import (
"testing"
)
+// Tests get region from host URL.
+func TestGetRegionFromURL(t *testing.T) {
+ testCases := []struct {
+ u url.URL
+ expectedRegion string
+ }{
+ {
+ u: url.URL{Host: "storage.googleapis.com"},
+ expectedRegion: "",
+ },
+ {
+ u: url.URL{Host: "s3.cn-north-1.amazonaws.com.cn"},
+ expectedRegion: "cn-north-1",
+ },
+ {
+ u: url.URL{Host: "s3.cn-northwest-1.amazonaws.com.cn"},
+ expectedRegion: "cn-northwest-1",
+ },
+ {
+ u: url.URL{Host: "s3-fips-us-gov-west-1.amazonaws.com"},
+ expectedRegion: "us-gov-west-1",
+ },
+ {
+ u: url.URL{Host: "s3-us-gov-west-1.amazonaws.com"},
+ expectedRegion: "us-gov-west-1",
+ },
+ {
+ u: url.URL{Host: "192.168.1.1"},
+ expectedRegion: "",
+ },
+ {
+ u: url.URL{Host: "s3-eu-west-1.amazonaws.com"},
+ expectedRegion: "eu-west-1",
+ },
+ {
+ u: url.URL{Host: "s3.eu-west-1.amazonaws.com"},
+ expectedRegion: "eu-west-1",
+ },
+ {
+ u: url.URL{Host: "s3.dualstack.eu-west-1.amazonaws.com"},
+ expectedRegion: "eu-west-1",
+ },
+ {
+ u: url.URL{Host: "s3.amazonaws.com"},
+ expectedRegion: "",
+ },
+ {
+ u: url.URL{Host: "s3-external-1.amazonaws.com"},
+ expectedRegion: "",
+ },
+ }
+
+ for i, testCase := range testCases {
+ region := GetRegionFromURL(testCase.u)
+ if testCase.expectedRegion != region {
+ t.Errorf("Test %d: Expected region %s, got %s", i+1, testCase.expectedRegion, region)
+ }
+ }
+}
+
// Tests for 'isValidDomain(host string) bool'.
func TestIsValidDomain(t *testing.T) {
testCases := []struct {
@@ -32,6 +93,7 @@ func TestIsValidDomain(t *testing.T) {
}{
{"s3.amazonaws.com", true},
{"s3.cn-north-1.amazonaws.com.cn", true},
+ {"s3.cn-northwest-1.amazonaws.com.cn", true},
{"s3.amazonaws.com_", false},
{"%$$$", false},
{"s3.amz.test.com", true},
@@ -119,9 +181,17 @@ func TestIsAmazonEndpoint(t *testing.T) {
{"https://amazons3.amazonaws.com", false},
{"-192.168.1.1", false},
{"260.192.1.1", false},
+ {"https://s3-.amazonaws.com", false},
+ {"https://s3..amazonaws.com", false},
+ {"https://s3.dualstack.us-west-1.amazonaws.com.cn", false},
+ {"https://s3..us-west-1.amazonaws.com.cn", false},
// valid inputs.
{"https://s3.amazonaws.com", true},
+ {"https://s3-external-1.amazonaws.com", true},
{"https://s3.cn-north-1.amazonaws.com.cn", true},
+ {"https://s3-us-west-1.amazonaws.com", true},
+ {"https://s3.us-west-1.amazonaws.com", true},
+ {"https://s3.dualstack.us-west-1.amazonaws.com", true},
}
for i, testCase := range testCases {
@@ -137,41 +207,6 @@ func TestIsAmazonEndpoint(t *testing.T) {
}
-// Tests validate Amazon S3 China endpoint validator.
-func TestIsAmazonChinaEndpoint(t *testing.T) {
- testCases := []struct {
- url string
- // Expected result.
- result bool
- }{
- {"https://192.168.1.1", false},
- {"192.168.1.1", false},
- {"http://storage.googleapis.com", false},
- {"https://storage.googleapis.com", false},
- {"storage.googleapis.com", false},
- {"s3.amazonaws.com", false},
- {"https://amazons3.amazonaws.com", false},
- {"-192.168.1.1", false},
- {"260.192.1.1", false},
- // s3.amazonaws.com is not a valid Amazon S3 China end point.
- {"https://s3.amazonaws.com", false},
- // valid input.
- {"https://s3.cn-north-1.amazonaws.com.cn", true},
- }
-
- for i, testCase := range testCases {
- u, err := url.Parse(testCase.url)
- if err != nil {
- t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
- }
- result := IsAmazonChinaEndpoint(*u)
- if testCase.result != result {
- t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
- }
- }
-
-}
-
// Tests validate Google Cloud end point validator.
func TestIsGoogleEndpoint(t *testing.T) {
testCases := []struct {
diff --git a/vendor/github.com/minio/minio-go/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/pkg/set/stringset.go
index 9f33488e0..efd02629b 100644
--- a/vendor/github.com/minio/minio-go/pkg/set/stringset.go
+++ b/vendor/github.com/minio/minio-go/pkg/set/stringset.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/pkg/set/stringset_test.go b/vendor/github.com/minio/minio-go/pkg/set/stringset_test.go
index e276fec5a..d7e6aa799 100644
--- a/vendor/github.com/minio/minio-go/pkg/set/stringset_test.go
+++ b/vendor/github.com/minio/minio-go/pkg/set/stringset_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/post-policy.go b/vendor/github.com/minio/minio-go/post-policy.go
index 5e716124a..b3ae7050a 100644
--- a/vendor/github.com/minio/minio-go/post-policy.go
+++ b/vendor/github.com/minio/minio-go/post-policy.go
@@ -1,3 +1,20 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package minio
import (
@@ -167,6 +184,28 @@ func (p *PostPolicy) SetSuccessStatusAction(status string) error {
return nil
}
+// SetUserMetadata - Set user metadata as a key/value couple.
+// Can be retrieved through a HEAD request or an event.
+func (p *PostPolicy) SetUserMetadata(key string, value string) error {
+ if strings.TrimSpace(key) == "" || key == "" {
+ return ErrInvalidArgument("Key is empty")
+ }
+ if strings.TrimSpace(value) == "" || value == "" {
+ return ErrInvalidArgument("Value is empty")
+ }
+ headerName := fmt.Sprintf("x-amz-meta-%s", key)
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: fmt.Sprintf("$%s", headerName),
+ value: value,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData[headerName] = value
+ return nil
+}
+
// addNewPolicy - internal helper to validate adding new policies.
func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error {
if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" {
diff --git a/vendor/github.com/minio/minio-go/retry-continous.go b/vendor/github.com/minio/minio-go/retry-continous.go
index e300af69c..f31dfa6f2 100644
--- a/vendor/github.com/minio/minio-go/retry-continous.go
+++ b/vendor/github.com/minio/minio-go/retry-continous.go
@@ -1,3 +1,20 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package minio
import "time"
diff --git a/vendor/github.com/minio/minio-go/retry.go b/vendor/github.com/minio/minio-go/retry.go
index 1de5107e4..c21a76d79 100644
--- a/vendor/github.com/minio/minio-go/retry.go
+++ b/vendor/github.com/minio/minio-go/retry.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -25,7 +26,7 @@ import (
)
// MaxRetry is the maximum number of retries before stopping.
-var MaxRetry = 5
+var MaxRetry = 10
// MaxJitter will randomize over the full exponential backoff time
const MaxJitter = 1.0
diff --git a/vendor/github.com/minio/minio-go/s3-endpoints.go b/vendor/github.com/minio/minio-go/s3-endpoints.go
index c02f3f1fa..058929501 100644
--- a/vendor/github.com/minio/minio-go/s3-endpoints.go
+++ b/vendor/github.com/minio/minio-go/s3-endpoints.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,15 +18,15 @@
package minio
// awsS3EndpointMap Amazon S3 endpoint map.
-// "cn-north-1" adds support for AWS China.
var awsS3EndpointMap = map[string]string{
"us-east-1": "s3.amazonaws.com",
"us-east-2": "s3-us-east-2.amazonaws.com",
"us-west-2": "s3-us-west-2.amazonaws.com",
"us-west-1": "s3-us-west-1.amazonaws.com",
- "ca-central-1": "s3.ca-central-1.amazonaws.com",
+ "ca-central-1": "s3-ca-central-1.amazonaws.com",
"eu-west-1": "s3-eu-west-1.amazonaws.com",
"eu-west-2": "s3-eu-west-2.amazonaws.com",
+ "eu-west-3": "s3-eu-west-3.amazonaws.com",
"eu-central-1": "s3-eu-central-1.amazonaws.com",
"ap-south-1": "s3-ap-south-1.amazonaws.com",
"ap-southeast-1": "s3-ap-southeast-1.amazonaws.com",
@@ -35,6 +36,7 @@ var awsS3EndpointMap = map[string]string{
"sa-east-1": "s3-sa-east-1.amazonaws.com",
"us-gov-west-1": "s3-us-gov-west-1.amazonaws.com",
"cn-north-1": "s3.cn-north-1.amazonaws.com.cn",
+ "cn-northwest-1": "s3.cn-northwest-1.amazonaws.com.cn",
}
// getS3Endpoint get Amazon S3 endpoint based on the bucket location.
diff --git a/vendor/github.com/minio/minio-go/s3-error.go b/vendor/github.com/minio/minio-go/s3-error.go
index c5aff9bbc..f9e82334a 100644
--- a/vendor/github.com/minio/minio-go/s3-error.go
+++ b/vendor/github.com/minio/minio-go/s3-error.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/test-utils_test.go b/vendor/github.com/minio/minio-go/test-utils_test.go
index b109dfaf7..6f6443ccf 100644
--- a/vendor/github.com/minio/minio-go/test-utils_test.go
+++ b/vendor/github.com/minio/minio-go/test-utils_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/transport.go b/vendor/github.com/minio/minio-go/transport.go
index d286bd7ae..e2dafe172 100644
--- a/vendor/github.com/minio/minio-go/transport.go
+++ b/vendor/github.com/minio/minio-go/transport.go
@@ -2,7 +2,7 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/minio/minio-go/transport_1_6.go b/vendor/github.com/minio/minio-go/transport_1_6.go
deleted file mode 100644
index 77e7d76fc..000000000
--- a/vendor/github.com/minio/minio-go/transport_1_6.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// +build go1.6,!go1.7,!go1.8
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "net/http"
- "time"
-)
-
-// This default transport is similar to http.DefaultTransport
-// but with additional DisableCompression:
-var defaultMinioTransport http.RoundTripper = &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- TLSHandshakeTimeout: 10 * time.Second,
- ExpectContinueTimeout: 1 * time.Second,
- // Set this value so that the underlying transport round-tripper
- // doesn't try to auto decode the body of objects with
- // content-encoding set to `gzip`.
- //
- // Refer:
- // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
- DisableCompression: true,
-}
diff --git a/vendor/github.com/minio/minio-go/utils.go b/vendor/github.com/minio/minio-go/utils.go
index 6f54639e0..0f92546d3 100644
--- a/vendor/github.com/minio/minio-go/utils.go
+++ b/vendor/github.com/minio/minio-go/utils.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,6 +20,8 @@ package minio
import (
"crypto/md5"
"crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
"encoding/xml"
"io"
"io/ioutil"
@@ -38,18 +41,18 @@ func xmlDecoder(body io.Reader, v interface{}) error {
return d.Decode(v)
}
-// sum256 calculate sha256 sum for an input byte array.
-func sum256(data []byte) []byte {
+// sum256 calculate sha256sum for an input byte array, returns hex encoded.
+func sum256Hex(data []byte) string {
hash := sha256.New()
hash.Write(data)
- return hash.Sum(nil)
+ return hex.EncodeToString(hash.Sum(nil))
}
-// sumMD5 calculate sumMD5 sum for an input byte array.
-func sumMD5(data []byte) []byte {
+// sumMD5Base64 calculate md5sum for an input byte array, returns base64 encoded.
+func sumMD5Base64(data []byte) string {
hash := md5.New()
hash.Write(data)
- return hash.Sum(nil)
+ return base64.StdEncoding.EncodeToString(hash.Sum(nil))
}
// getEndpointURL - construct a new endpoint.
@@ -109,10 +112,13 @@ func closeResponse(resp *http.Response) {
}
}
-var emptySHA256 = sum256(nil)
+var (
+ // Hex encoded string of nil sha256sum bytes.
+ emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
-// Sentinel URL is the default url value which is invalid.
-var sentinelURL = url.URL{}
+ // Sentinel URL is the default url value which is invalid.
+ sentinelURL = url.URL{}
+)
// Verify if input endpoint URL is valid.
func isValidEndpointURL(endpointURL url.URL) error {
@@ -203,12 +209,81 @@ func getDefaultLocation(u url.URL, regionOverride string) (location string) {
if regionOverride != "" {
return regionOverride
}
- if s3utils.IsAmazonChinaEndpoint(u) {
- return "cn-north-1"
+ region := s3utils.GetRegionFromURL(u)
+ if region == "" {
+ region = "us-east-1"
}
- if s3utils.IsAmazonGovCloudEndpoint(u) {
- return "us-gov-west-1"
+ return region
+}
+
+var supportedHeaders = []string{
+ "content-type",
+ "cache-control",
+ "content-encoding",
+ "content-disposition",
+ // Add more supported headers here.
+}
+
+// cseHeaders is list of client side encryption headers
+var cseHeaders = []string{
+ "X-Amz-Iv",
+ "X-Amz-Key",
+ "X-Amz-Matdesc",
+}
+
+// isStorageClassHeader returns true if the header is a supported storage class header
+func isStorageClassHeader(headerKey string) bool {
+ return strings.ToLower(amzStorageClass) == strings.ToLower(headerKey)
+}
+
+// isStandardHeader returns true if header is a supported header and not a custom header
+func isStandardHeader(headerKey string) bool {
+ key := strings.ToLower(headerKey)
+ for _, header := range supportedHeaders {
+ if strings.ToLower(header) == key {
+ return true
+ }
}
- // Default to location to 'us-east-1'.
- return "us-east-1"
+ return false
+}
+
+// isCSEHeader returns true if header is a client side encryption header.
+func isCSEHeader(headerKey string) bool {
+ key := strings.ToLower(headerKey)
+ for _, h := range cseHeaders {
+ header := strings.ToLower(h)
+ if (header == key) ||
+ (("x-amz-meta-" + header) == key) {
+ return true
+ }
+ }
+ return false
+}
+
+// sseHeaders is list of server side encryption headers
+var sseHeaders = []string{
+ "x-amz-server-side-encryption",
+ "x-amz-server-side-encryption-aws-kms-key-id",
+ "x-amz-server-side-encryption-context",
+ "x-amz-server-side-encryption-customer-algorithm",
+ "x-amz-server-side-encryption-customer-key",
+ "x-amz-server-side-encryption-customer-key-MD5",
+}
+
+// isSSEHeader returns true if header is a server side encryption header.
+func isSSEHeader(headerKey string) bool {
+ key := strings.ToLower(headerKey)
+ for _, h := range sseHeaders {
+ if strings.ToLower(h) == key {
+ return true
+ }
+ }
+ return false
+}
+
+// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header.
+func isAmzHeader(headerKey string) bool {
+ key := strings.ToLower(headerKey)
+
+ return strings.HasPrefix(key, "x-amz-meta-") || key == "x-amz-acl"
}
diff --git a/vendor/github.com/minio/minio-go/utils_test.go b/vendor/github.com/minio/minio-go/utils_test.go
index ba297112e..5411cc91a 100644
--- a/vendor/github.com/minio/minio-go/utils_test.go
+++ b/vendor/github.com/minio/minio-go/utils_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -13,6 +14,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package minio
import (
@@ -80,8 +82,10 @@ func TestGetEndpointURL(t *testing.T) {
}{
{"s3.amazonaws.com", true, "https://s3.amazonaws.com", nil, true},
{"s3.cn-north-1.amazonaws.com.cn", true, "https://s3.cn-north-1.amazonaws.com.cn", nil, true},
+ {"s3.cn-northwest-1.amazonaws.com.cn", true, "https://s3.cn-northwest-1.amazonaws.com.cn", nil, true},
{"s3.amazonaws.com", false, "http://s3.amazonaws.com", nil, true},
{"s3.cn-north-1.amazonaws.com.cn", false, "http://s3.cn-north-1.amazonaws.com.cn", nil, true},
+ {"s3.cn-northwest-1.amazonaws.com.cn", false, "http://s3.cn-northwest-1.amazonaws.com.cn", nil, true},
{"192.168.1.1:9000", false, "http://192.168.1.1:9000", nil, true},
{"192.168.1.1:9000", true, "https://192.168.1.1:9000", nil, true},
{"s3.amazonaws.com:443", true, "https://s3.amazonaws.com:443", nil, true},
@@ -198,7 +202,13 @@ func TestDefaultBucketLocation(t *testing.T) {
regionOverride: "",
expectedLocation: "cn-north-1",
},
- // No region provided, no standard region strings provided as well. - Test 5.
+ // China region should be honored, region override not provided. - Test 5.
+ {
+ endpointURL: url.URL{Host: "s3.cn-northwest-1.amazonaws.com.cn"},
+ regionOverride: "",
+ expectedLocation: "cn-northwest-1",
+ },
+ // No region provided, no standard region strings provided as well. - Test 6.
{
endpointURL: url.URL{Host: "s3.amazonaws.com"},
regionOverride: "",
@@ -289,3 +299,105 @@ func TestIsValidBucketName(t *testing.T) {
}
}
+
+// Tests if header is standard supported header
+func TestIsStandardHeader(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ header string
+ // Expected result.
+ expectedValue bool
+ }{
+ {"content-encoding", true},
+ {"content-type", true},
+ {"cache-control", true},
+ {"content-disposition", true},
+ {"random-header", false},
+ }
+
+ for i, testCase := range testCases {
+ actual := isStandardHeader(testCase.header)
+ if actual != testCase.expectedValue {
+ t.Errorf("Test %d: Expected to pass, but failed", i+1)
+ }
+ }
+
+}
+
+// Tests if header is server encryption header
+func TestIsSSEHeader(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ header string
+ // Expected result.
+ expectedValue bool
+ }{
+ {"x-amz-server-side-encryption", true},
+ {"x-amz-server-side-encryption-aws-kms-key-id", true},
+ {"x-amz-server-side-encryption-context", true},
+ {"x-amz-server-side-encryption-customer-algorithm", true},
+ {"x-amz-server-side-encryption-customer-key", true},
+ {"x-amz-server-side-encryption-customer-key-MD5", true},
+ {"random-header", false},
+ }
+
+ for i, testCase := range testCases {
+ actual := isSSEHeader(testCase.header)
+ if actual != testCase.expectedValue {
+ t.Errorf("Test %d: Expected to pass, but failed", i+1)
+ }
+ }
+}
+
+// Tests if header is client encryption header
+func TestIsCSEHeader(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ header string
+ // Expected result.
+ expectedValue bool
+ }{
+ {"x-amz-iv", true},
+ {"x-amz-key", true},
+ {"x-amz-matdesc", true},
+ {"x-amz-meta-x-amz-iv", true},
+ {"x-amz-meta-x-amz-key", true},
+ {"x-amz-meta-x-amz-matdesc", true},
+ {"random-header", false},
+ }
+
+ for i, testCase := range testCases {
+ actual := isCSEHeader(testCase.header)
+ if actual != testCase.expectedValue {
+ t.Errorf("Test %d: Expected to pass, but failed", i+1)
+ }
+ }
+
+}
+
+// Tests if header is x-amz-meta or x-amz-acl
+func TestIsAmzHeader(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ header string
+ // Expected result.
+ expectedValue bool
+ }{
+ {"x-amz-iv", false},
+ {"x-amz-key", false},
+ {"x-amz-matdesc", false},
+ {"x-amz-meta-x-amz-iv", true},
+ {"x-amz-meta-x-amz-key", true},
+ {"x-amz-meta-x-amz-matdesc", true},
+ {"x-amz-acl", true},
+ {"random-header", false},
+ }
+
+ for i, testCase := range testCases {
+ actual := isAmzHeader(testCase.header)
+ if actual != testCase.expectedValue {
+ t.Errorf("Test %d: Expected to pass, but failed", i+1)
+ }
+ }
+
+}
diff --git a/vendor/github.com/minio/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE
index f9c841a51..f9c841a51 100644
--- a/vendor/github.com/minio/go-homedir/LICENSE
+++ b/vendor/github.com/mitchellh/go-homedir/LICENSE
diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md
new file mode 100644
index 000000000..d70706d5b
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/README.md
@@ -0,0 +1,14 @@
+# go-homedir
+
+This is a Go library for detecting the user's home directory without
+the use of cgo, so the library can be used in cross-compilation environments.
+
+Usage is incredibly simple, just call `homedir.Dir()` to get the home directory
+for a user, and `homedir.Expand()` to expand the `~` in a path to the home
+directory.
+
+**Why not just use `os/user`?** The built-in `os/user` package requires
+cgo on Darwin systems. This means that any Go code that uses that package
+cannot cross compile. But 99% of the time the use for `os/user` is just to
+retrieve the home directory, which we can do for the current user without
+cgo. This library does that, enabling cross-compilation.
diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go
new file mode 100644
index 000000000..47e1f9ef8
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/homedir.go
@@ -0,0 +1,137 @@
+package homedir
+
+import (
+ "bytes"
+ "errors"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// DisableCache will disable caching of the home directory. Caching is enabled
+// by default.
+var DisableCache bool
+
+var homedirCache string
+var cacheLock sync.RWMutex
+
+// Dir returns the home directory for the executing user.
+//
+// This uses an OS-specific method for discovering the home directory.
+// An error is returned if a home directory cannot be detected.
+func Dir() (string, error) {
+ if !DisableCache {
+ cacheLock.RLock()
+ cached := homedirCache
+ cacheLock.RUnlock()
+ if cached != "" {
+ return cached, nil
+ }
+ }
+
+ cacheLock.Lock()
+ defer cacheLock.Unlock()
+
+ var result string
+ var err error
+ if runtime.GOOS == "windows" {
+ result, err = dirWindows()
+ } else {
+ // Unix-like system, so just assume Unix
+ result, err = dirUnix()
+ }
+
+ if err != nil {
+ return "", err
+ }
+ homedirCache = result
+ return result, nil
+}
+
+// Expand expands the path to include the home directory if the path
+// is prefixed with `~`. If it isn't prefixed with `~`, the path is
+// returned as-is.
+func Expand(path string) (string, error) {
+ if len(path) == 0 {
+ return path, nil
+ }
+
+ if path[0] != '~' {
+ return path, nil
+ }
+
+ if len(path) > 1 && path[1] != '/' && path[1] != '\\' {
+ return "", errors.New("cannot expand user-specific home dir")
+ }
+
+ dir, err := Dir()
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(dir, path[1:]), nil
+}
+
+func dirUnix() (string, error) {
+ // First prefer the HOME environmental variable
+ if home := os.Getenv("HOME"); home != "" {
+ return home, nil
+ }
+
+ // If that fails, try getent
+ var stdout bytes.Buffer
+ cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err != nil {
+ // If the error is ErrNotFound, we ignore it. Otherwise, return it.
+ if err != exec.ErrNotFound {
+ return "", err
+ }
+ } else {
+ if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
+ // username:password:uid:gid:gecos:home:shell
+ passwdParts := strings.SplitN(passwd, ":", 7)
+ if len(passwdParts) > 5 {
+ return passwdParts[5], nil
+ }
+ }
+ }
+
+ // If all else fails, try the shell
+ stdout.Reset()
+ cmd = exec.Command("sh", "-c", "cd && pwd")
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err != nil {
+ return "", err
+ }
+
+ result := strings.TrimSpace(stdout.String())
+ if result == "" {
+ return "", errors.New("blank output when reading home directory")
+ }
+
+ return result, nil
+}
+
+func dirWindows() (string, error) {
+ // First prefer the HOME environmental variable
+ if home := os.Getenv("HOME"); home != "" {
+ return home, nil
+ }
+
+ drive := os.Getenv("HOMEDRIVE")
+ path := os.Getenv("HOMEPATH")
+ home := drive + path
+ if drive == "" || path == "" {
+ home = os.Getenv("USERPROFILE")
+ }
+ if home == "" {
+ return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank")
+ }
+
+ return home, nil
+}
diff --git a/vendor/github.com/minio/go-homedir/homedir_test.go b/vendor/github.com/mitchellh/go-homedir/homedir_test.go
index a45121ff1..e4054e72a 100644
--- a/vendor/github.com/minio/go-homedir/homedir_test.go
+++ b/vendor/github.com/mitchellh/go-homedir/homedir_test.go
@@ -30,8 +30,6 @@ func BenchmarkDir(b *testing.B) {
}
func TestDir(t *testing.T) {
- // NOTE: This test is not portable. If user.Current() worked
- // everywhere, we wouldn't need our package in the first place.
u, err := user.Current()
if err != nil {
t.Fatalf("err: %s", err)
diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml
index 5c14c1339..d9deadb86 100644
--- a/vendor/github.com/mitchellh/mapstructure/.travis.yml
+++ b/vendor/github.com/mitchellh/mapstructure/.travis.yml
@@ -1,7 +1,8 @@
-language: go
+language: go
+
+go:
+ - 1.9.x
+ - tip
-go:
- - 1.8.1
-
script:
- - go test
+ - go test
diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md
index 659d6885f..7ecc785e4 100644
--- a/vendor/github.com/mitchellh/mapstructure/README.md
+++ b/vendor/github.com/mitchellh/mapstructure/README.md
@@ -1,4 +1,4 @@
-# mapstructure
+# mapstructure [![Godoc](https://godoc.org/github.com/mitchell/mapstructure?status.svg)](https://godoc.org/github.com/mitchell/mapstructure)
mapstructure is a Go library for decoding generic map values to structures
and vice versa, while providing helpful error handling.
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
index 30a9957c6..39ec1e943 100644
--- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
@@ -237,6 +237,8 @@ func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error
err = d.decodePtr(name, data, val)
case reflect.Slice:
err = d.decodeSlice(name, data, val)
+ case reflect.Array:
+ err = d.decodeArray(name, data, val)
case reflect.Func:
err = d.decodeFunc(name, data, val)
default:
@@ -292,12 +294,22 @@ func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value)
val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
- case dataKind == reflect.Slice && d.config.WeaklyTypedInput:
+ case dataKind == reflect.Slice && d.config.WeaklyTypedInput,
+ dataKind == reflect.Array && d.config.WeaklyTypedInput:
dataType := dataVal.Type()
elemKind := dataType.Elem().Kind()
- switch {
- case elemKind == reflect.Uint8:
- val.SetString(string(dataVal.Interface().([]uint8)))
+ switch elemKind {
+ case reflect.Uint8:
+ var uints []uint8
+ if dataKind == reflect.Array {
+ uints = make([]uint8, dataVal.Len(), dataVal.Len())
+ for i := range uints {
+ uints[i] = dataVal.Index(i).Interface().(uint8)
+ }
+ } else {
+ uints = dataVal.Interface().([]uint8)
+ }
+ val.SetString(string(uints))
default:
converted = false
}
@@ -647,6 +659,73 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
return nil
}
+func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataValKind := dataVal.Kind()
+ valType := val.Type()
+ valElemType := valType.Elem()
+ arrayType := reflect.ArrayOf(valType.Len(), valElemType)
+
+ valArray := val
+
+ if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields {
+ // Check input type
+ if dataValKind != reflect.Array && dataValKind != reflect.Slice {
+ if d.config.WeaklyTypedInput {
+ switch {
+ // Empty maps turn into empty arrays
+ case dataValKind == reflect.Map:
+ if dataVal.Len() == 0 {
+ val.Set(reflect.Zero(arrayType))
+ return nil
+ }
+
+ // All other types we try to convert to the array type
+ // and "lift" it into it. i.e. a string becomes a string array.
+ default:
+ // Just re-try this function with data as a slice.
+ return d.decodeArray(name, []interface{}{data}, val)
+ }
+ }
+
+ return fmt.Errorf(
+ "'%s': source data must be an array or slice, got %s", name, dataValKind)
+
+ }
+ if dataVal.Len() > arrayType.Len() {
+ return fmt.Errorf(
+ "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len())
+
+ }
+
+ // Make a new array to hold our result, same size as the original data.
+ valArray = reflect.New(arrayType).Elem()
+ }
+
+ // Accumulate any errors
+ errors := make([]string, 0)
+
+ for i := 0; i < dataVal.Len(); i++ {
+ currentData := dataVal.Index(i).Interface()
+ currentField := valArray.Index(i)
+
+ fieldName := fmt.Sprintf("%s[%d]", name, i)
+ if err := d.decode(fieldName, currentData, currentField); err != nil {
+ errors = appendErrors(errors, err)
+ }
+ }
+
+ // Finally, set the value to the array we built up
+ val.Set(valArray)
+
+ // If there were errors, we return those
+ if len(errors) > 0 {
+ return &Error{errors}
+ }
+
+ return nil
+}
+
func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
dataVal := reflect.Indirect(reflect.ValueOf(data))
@@ -716,7 +795,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
errors = appendErrors(errors,
fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind))
} else {
- structs = append(structs, val.FieldByName(fieldType.Name))
+ structs = append(structs, structVal.FieldByName(fieldType.Name))
}
continue
}
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go
index 08e495664..ecfb76987 100644
--- a/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go
@@ -258,3 +258,21 @@ func TestDecodeSliceToEmptySliceWOZeroing(t *testing.T) {
}
}
}
+
+// #70
+func TestNextSquashMapstructure(t *testing.T) {
+ data := &struct {
+ Level1 struct {
+ Level2 struct {
+ Foo string
+ } `mapstructure:",squash"`
+ } `mapstructure:",squash"`
+ }{}
+ err := Decode(map[interface{}]interface{}{"foo": "baz"}, &data)
+ if err != nil {
+ t.Fatalf("should not error: %s", err)
+ }
+ if data.Level1.Level2.Foo != "baz" {
+ t.Fatal("value should be baz")
+ }
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go
index 547af7331..89861edda 100644
--- a/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go
@@ -49,6 +49,13 @@ type EmbeddedSlice struct {
Vunique string
}
+type ArrayAlias [2]string
+
+type EmbeddedArray struct {
+ ArrayAlias `mapstructure:"array_alias"`
+ Vunique string
+}
+
type SquashOnNonStructType struct {
InvalidSquashType int `mapstructure:",squash"`
}
@@ -85,6 +92,15 @@ type SliceOfStruct struct {
Value []Basic
}
+type Array struct {
+ Vfoo string
+ Vbar [2]string
+}
+
+type ArrayOfStruct struct {
+ Value [2]Basic
+}
+
type Func struct {
Foo func() string
}
@@ -112,14 +128,19 @@ type TypeConversionResult struct {
FloatToBool bool
FloatToString string
SliceUint8ToString string
+ ArrayUint8ToString string
StringToInt int
StringToUint uint
StringToBool bool
StringToFloat float32
StringToStrSlice []string
StringToIntSlice []int
+ StringToStrArray [1]string
+ StringToIntArray [1]int
SliceToMap map[string]interface{}
MapToSlice []interface{}
+ ArrayToMap map[string]interface{}
+ MapToArray [1]interface{}
}
func TestBasicTypes(t *testing.T) {
@@ -322,6 +343,29 @@ func TestDecode_EmbeddedSlice(t *testing.T) {
}
}
+func TestDecode_EmbeddedArray(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "array_alias": [2]string{"foo", "bar"},
+ "vunique": "bar",
+ }
+
+ var result EmbeddedArray
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an err: %s", err.Error())
+ }
+
+ if !reflect.DeepEqual(result.ArrayAlias, ArrayAlias([2]string{"foo", "bar"})) {
+ t.Errorf("array value: %#v", result.ArrayAlias)
+ }
+
+ if result.Vunique != "bar" {
+ t.Errorf("vunique value should be 'bar': %#v", result.Vunique)
+ }
+}
+
func TestDecode_EmbeddedSquash(t *testing.T) {
t.Parallel()
@@ -582,14 +626,19 @@ func TestDecode_TypeConversion(t *testing.T) {
"FloatToBool": 42.42,
"FloatToString": 42.42,
"SliceUint8ToString": []uint8("foo"),
+ "ArrayUint8ToString": [3]uint8{'f', 'o', 'o'},
"StringToInt": "42",
"StringToUint": "42",
"StringToBool": "1",
"StringToFloat": "42.42",
"StringToStrSlice": "A",
"StringToIntSlice": "42",
+ "StringToStrArray": "A",
+ "StringToIntArray": "42",
"SliceToMap": []interface{}{},
"MapToSlice": map[string]interface{}{},
+ "ArrayToMap": []interface{}{},
+ "MapToArray": map[string]interface{}{},
}
expectedResultStrict := TypeConversionResult{
@@ -622,14 +671,19 @@ func TestDecode_TypeConversion(t *testing.T) {
FloatToBool: true,
FloatToString: "42.42",
SliceUint8ToString: "foo",
+ ArrayUint8ToString: "foo",
StringToInt: 42,
StringToUint: 42,
StringToBool: true,
StringToFloat: 42.42,
StringToStrSlice: []string{"A"},
StringToIntSlice: []int{42},
+ StringToStrArray: [1]string{"A"},
+ StringToIntArray: [1]int{42},
SliceToMap: map[string]interface{}{},
MapToSlice: []interface{}{},
+ ArrayToMap: map[string]interface{}{},
+ MapToArray: [1]interface{}{},
}
// Test strict type conversion
@@ -965,6 +1019,99 @@ func TestSliceToMap(t *testing.T) {
}
}
+func TestArray(t *testing.T) {
+ t.Parallel()
+
+ inputStringArray := map[string]interface{}{
+ "vfoo": "foo",
+ "vbar": [2]string{"foo", "bar"},
+ }
+
+ inputStringArrayPointer := map[string]interface{}{
+ "vfoo": "foo",
+ "vbar": &[2]string{"foo", "bar"},
+ }
+
+ outputStringArray := &Array{
+ "foo",
+ [2]string{"foo", "bar"},
+ }
+
+ testArrayInput(t, inputStringArray, outputStringArray)
+ testArrayInput(t, inputStringArrayPointer, outputStringArray)
+}
+
+func TestInvalidArray(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vfoo": "foo",
+ "vbar": 42,
+ }
+
+ result := Array{}
+ err := Decode(input, &result)
+ if err == nil {
+ t.Errorf("expected failure")
+ }
+}
+
+func TestArrayOfStruct(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "value": []map[string]interface{}{
+ {"vstring": "one"},
+ {"vstring": "two"},
+ },
+ }
+
+ var result ArrayOfStruct
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got unexpected error: %s", err)
+ }
+
+ if len(result.Value) != 2 {
+ t.Fatalf("expected two values, got %d", len(result.Value))
+ }
+
+ if result.Value[0].Vstring != "one" {
+ t.Errorf("first value should be 'one', got: %s", result.Value[0].Vstring)
+ }
+
+ if result.Value[1].Vstring != "two" {
+ t.Errorf("second value should be 'two', got: %s", result.Value[1].Vstring)
+ }
+}
+
+func TestArrayToMap(t *testing.T) {
+ t.Parallel()
+
+ input := []map[string]interface{}{
+ {
+ "foo": "bar",
+ },
+ {
+ "bar": "baz",
+ },
+ }
+
+ var result map[string]interface{}
+ err := WeakDecode(input, &result)
+ if err != nil {
+ t.Fatalf("got an error: %s", err)
+ }
+
+ expected := map[string]interface{}{
+ "foo": "bar",
+ "bar": "baz",
+ }
+ if !reflect.DeepEqual(result, expected) {
+ t.Errorf("bad: %#v", result)
+ }
+}
+
func TestInvalidType(t *testing.T) {
t.Parallel()
@@ -1191,3 +1338,31 @@ func testSliceInput(t *testing.T, input map[string]interface{}, expected *Slice)
}
}
}
+
+func testArrayInput(t *testing.T, input map[string]interface{}, expected *Array) {
+ var result Array
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got error: %s", err)
+ }
+
+ if result.Vfoo != expected.Vfoo {
+ t.Errorf("Vfoo expected '%s', got '%s'", expected.Vfoo, result.Vfoo)
+ }
+
+ if result.Vbar == [2]string{} {
+ t.Fatalf("Vbar a slice, got '%#v'", result.Vbar)
+ }
+
+ if len(result.Vbar) != len(expected.Vbar) {
+ t.Errorf("Vbar length should be %d, got %d", len(expected.Vbar), len(result.Vbar))
+ }
+
+ for i, v := range result.Vbar {
+ if v != expected.Vbar[i] {
+ t.Errorf(
+ "Vbar[%d] should be '%#v', got '%#v'",
+ i, expected.Vbar[i], v)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/.gitignore b/vendor/github.com/olivere/elastic/.gitignore
new file mode 100644
index 000000000..306ffbd83
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/.gitignore
@@ -0,0 +1,33 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+/.vscode/
+/debug.test
+/generator
+/cluster-test/cluster-test
+/cluster-test/*.log
+/cluster-test/es-chaos-monkey
+/spec
+/tmp
+/CHANGELOG-3.0.html
+
diff --git a/vendor/github.com/olivere/elastic/.travis.yml b/vendor/github.com/olivere/elastic/.travis.yml
new file mode 100644
index 000000000..b4322c13c
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/.travis.yml
@@ -0,0 +1,15 @@
+sudo: required
+language: go
+script: go test -race -v . ./config
+go:
+ - 1.8.x
+ - 1.9.x
+ # - tip
+matrix:
+ allow_failures:
+ - go: tip
+services:
+ - docker
+before_install:
+ - sudo sysctl -w vm.max_map_count=262144
+ - docker run -d --rm -p 9200:9200 -e "http.host=0.0.0.0" -e "transport.host=127.0.0.1" -e "bootstrap.memory_lock=true" -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" docker.elastic.co/elasticsearch/elasticsearch:6.1.2 elasticsearch -Expack.security.enabled=false -Enetwork.host=_local_,_site_ -Enetwork.publish_host=_local_
diff --git a/vendor/github.com/olivere/elastic/CHANGELOG-3.0.md b/vendor/github.com/olivere/elastic/CHANGELOG-3.0.md
new file mode 100644
index 000000000..07f3e66bf
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/CHANGELOG-3.0.md
@@ -0,0 +1,363 @@
+# Elastic 3.0
+
+Elasticsearch 2.0 comes with some [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/breaking-changes-2.0.html). You will probably need to upgrade your application and/or rewrite part of it due to those changes.
+
+We use that window of opportunity to also update Elastic (the Go client) from version 2.0 to 3.0. This will introduce both changes due to the Elasticsearch 2.0 update as well as changes that make Elastic cleaner by removing some old cruft.
+
+So, to summarize:
+
+1. Elastic 2.0 is compatible with Elasticsearch 1.7+ and is still actively maintained.
+2. Elastic 3.0 is compatible with Elasticsearch 2.0+ and will soon become the new master branch.
+
+The rest of the document is a list of all changes in Elastic 3.0.
+
+## Pointer types
+
+All types have changed to be pointer types, not value types. This not only is cleaner but also simplifies the API as illustrated by the following example:
+
+Example for Elastic 2.0 (old):
+
+```go
+q := elastic.NewMatchAllQuery()
+res, err := elastic.Search("one").Query(&q).Do() // notice the & here
+```
+
+Example for Elastic 3.0 (new):
+
+```go
+q := elastic.NewMatchAllQuery()
+res, err := elastic.Search("one").Query(q).Do() // no more &
+// ... which can be simplified as:
+res, err := elastic.Search("one").Query(elastic.NewMatchAllQuery()).Do()
+```
+
+It also helps to prevent [subtle issues](https://github.com/olivere/elastic/issues/115#issuecomment-130753046).
+
+## Query/filter merge
+
+One of the biggest changes in Elasticsearch 2.0 is the [merge of queries and filters](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_queries_and_filters_merged). In Elasticsearch 1.x, you had a whole range of queries and filters that were basically identical (e.g. `term_query` and `term_filter`).
+
+The practical aspect of the merge is that you can now basically use queries where once you had to use filters instead. For Elastic 3.0 this means: We could remove a whole bunch of files. Yay!
+
+Notice that some methods still come by "filter", e.g. `PostFilter`. However, they accept a `Query` now when they used to accept a `Filter` before.
+
+Example for Elastic 2.0 (old):
+
+```go
+q := elastic.NewMatchAllQuery()
+f := elastic.NewTermFilter("tag", "important")
+res, err := elastic.Search().Index("one").Query(&q).PostFilter(f)
+```
+
+Example for Elastic 3.0 (new):
+
+```go
+q := elastic.NewMatchAllQuery()
+f := elastic.NewTermQuery("tag", "important") // it's a query now!
+res, err := elastic.Search().Index("one").Query(q).PostFilter(f)
+```
+
+## Facets are removed
+
+[Facets have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_facets_have_been_removed) in Elasticsearch 2.0. You need to use aggregations now.
+
+## Errors
+
+Elasticsearch 2.0 returns more information about an error in the HTTP response body. Elastic 3.0 now reads this information and makes it accessible by the consumer.
+
+Errors and all its details are now returned in [`Error`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59).
+
+### HTTP Status 404 (Not Found)
+
+When Elasticsearch does not find an entity or an index, it generally returns HTTP status code 404. In Elastic 2.0 this was a valid result and didn't raise an error from the `Do` functions. This has now changed in Elastic 3.0.
+
+Starting with Elastic 3.0, there are only two types of responses considered successful. First, responses with HTTP status codes [200..299]. Second, HEAD requests which return HTTP status 404. The latter is used by Elasticsearch to e.g. check for existence of indices or documents. All other responses will return an error.
+
+To check for HTTP Status 404 (with non-HEAD requests), e.g. when trying to get or delete a missing document, you can use the [`IsNotFound`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L84) helper (see below).
+
+The following example illustrates how to check for a missing document in Elastic 2.0 and what has changed in 3.0.
+
+Example for Elastic 2.0 (old):
+
+```go
+res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do()
+if err != nil {
+ // Something else went wrong (but 404 is NOT an error in Elastic 2.0)
+}
+if !res.Found {
+ // Document has not been found
+}
+```
+
+Example for Elastic 3.0 (new):
+
+```go
+res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do()
+if err != nil {
+ if elastic.IsNotFound(err) {
+ // Document has not been found
+ } else {
+ // Something else went wrong
+ }
+}
+```
+
+### HTTP Status 408 (Timeouts)
+
+Elasticsearch now responds with HTTP status code 408 (Timeout) when a request fails due to a timeout. E.g. if you specify a timeout with the Cluster Health API, the HTTP response status will be 408 if the timeout is raised. See [here](https://github.com/elastic/elasticsearch/commit/fe3179d9cccb569784434b2135ca9ae13d5158d3) for the specific commit to the Cluster Health API.
+
+To check for HTTP Status 408, we introduced the [`IsTimeout`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L101) helper.
+
+Example for Elastic 2.0 (old):
+
+```go
+health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do()
+if err != nil {
+ // ...
+}
+if health.TimedOut {
+ // We have a timeout
+}
+```
+
+Example for Elastic 3.0 (new):
+
+```go
+health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do()
+if elastic.IsTimeout(err) {
+ // We have a timeout
+}
+```
+
+### Bulk Errors
+
+The error response of a bulk operation used to be a simple string in Elasticsearch 1.x.
+In Elasticsearch 2.0, it returns a structured JSON object with a lot more details about the error.
+These errors are now captured in an object of type [`ErrorDetails`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59) which is used in [`BulkResponseItem`](https://github.com/olivere/elastic/blob/release-branch.v3/bulk.go#L206).
+
+### Removed specific Elastic errors
+
+The specific error types `ErrMissingIndex`, `ErrMissingType`, and `ErrMissingId` have been removed. They were only used by `DeleteService` and are replaced by a generic error message.
+
+## Numeric types
+
+Elastic 3.0 has settled to use `float64` everywhere. It used to be a mix of `float32` and `float64` in Elastic 2.0. E.g. all boostable queries in Elastic 3.0 now have a boost type of `float64` where it used to be `float32`.
+
+## Pluralization
+
+Some services accept zero, one or more indices or types to operate on.
+E.g. in the `SearchService` accepts a list of zero, one, or more indices to
+search and therefor had a func called `Index(index string)` and a func
+called `Indices(indices ...string)`.
+
+Elastic 3.0 now only uses the singular form that, when applicable, accepts a
+variadic type. E.g. in the case of the `SearchService`, you now only have
+one func with the following signature: `Index(indices ...string)`.
+
+Notice this is only limited to `Index(...)` and `Type(...)`. There are other
+services with variadic functions. These have not been changed.
+
+## Multiple calls to variadic functions
+
+Some services with variadic functions have cleared the underlying slice when
+called while other services just add to the existing slice. This has now been
+normalized to always add to the underlying slice.
+
+Example for Elastic 2.0 (old):
+
+```go
+// Would only cleared scroll id "two"
+// because ScrollId cleared the values when called multiple times
+client.ClearScroll().ScrollId("one").ScrollId("two").Do()
+```
+
+Example for Elastic 3.0 (new):
+
+```go
+// Now (correctly) clears both scroll id "one" and "two"
+// because ScrollId no longer clears the values when called multiple times
+client.ClearScroll().ScrollId("one").ScrollId("two").Do()
+```
+
+## Ping service requires URL
+
+The `Ping` service raised some issues because it is different from all
+other services. If not explicitly given a URL, it always pings `127.0.0.1:9200`.
+
+Users expected to ping the cluster, but that is not possible as the cluster
+can be a set of many nodes: So which node do we ping then?
+
+To make it more clear, the `Ping` function on the client now requires users
+to explicitly set the URL of the node to ping.
+
+## Meta fields
+
+Many of the meta fields e.g. `_parent` or `_routing` are now
+[part of the top-level of a document](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_mapping_changes.html#migration-meta-fields)
+and are no longer returned as parts of the `fields` object. We had to change
+larger parts of e.g. the `Reindexer` to get it to work seamlessly with Elasticsearch 2.0.
+
+Notice that all stored meta-fields are now [returned by default](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_crud_and_routing_changes.html#_all_stored_meta_fields_returned_by_default).
+
+## HasParentQuery / HasChildQuery
+
+`NewHasParentQuery` and `NewHasChildQuery` must now include both parent/child type and query. It is now in line with the Java API.
+
+Example for Elastic 2.0 (old):
+
+```go
+allQ := elastic.NewMatchAllQuery()
+q := elastic.NewHasChildFilter("tweet").Query(&allQ)
+```
+
+Example for Elastic 3.0 (new):
+
+```go
+q := elastic.NewHasChildQuery("tweet", elastic.NewMatchAllQuery())
+```
+
+## SetBasicAuth client option
+
+You can now tell Elastic to pass HTTP Basic Auth credentials with each request. In previous versions of Elastic you had to set up your own `http.Transport` to do this. This should make it more convenient to use Elastic in combination with [Shield](https://www.elastic.co/products/shield) in its [basic setup](https://www.elastic.co/guide/en/shield/current/enable-basic-auth.html).
+
+Example:
+
+```go
+client, err := elastic.NewClient(elastic.SetBasicAuth("user", "secret"))
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+## Delete-by-Query API
+
+The Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_delete_by_query_is_now_a_plugin). It is no longer core part of Elasticsearch. You can [install it as a plugin as described here](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html).
+
+Elastic 3.0 still contains the `DeleteByQueryService`, but you need to install the plugin first. If you don't install it and use `DeleteByQueryService` you will most probably get a 404.
+
+An older version of this document stated the following:
+
+> Elastic 3.0 still contains the `DeleteByQueryService` but it will fail with `ErrPluginNotFound` when the plugin is not installed.
+>
+> Example for Elastic 3.0 (new):
+>
+> ```go
+> _, err := client.DeleteByQuery().Query(elastic.NewTermQuery("client", "1")).Do()
+> if err == elastic.ErrPluginNotFound {
+> // Delete By Query API is not available
+> }
+> ```
+
+I have decided that this is not a good way to handle the case of a missing plugin. The main reason is that with this logic, you'd always have to check if the plugin is missing in case of an error. This is not only slow, but it also puts logic into a service where it should really be just opaque and return the response of Elasticsearch.
+
+If you rely on certain plugins to be installed, you should check on startup. That's where the following two helpers come into play.
+
+## HasPlugin and SetRequiredPlugins
+
+Some of the core functionality of Elasticsearch has now been moved into plugins. E.g. the Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html).
+
+You need to make sure to add these plugins to your Elasticsearch installation to still be able to use the `DeleteByQueryService`. You can test this now with the `HasPlugin(name string)` helper in the client.
+
+Example for Elastic 3.0 (new):
+
+```go
+err, found := client.HasPlugin("delete-by-query")
+if err == nil && found {
+ // ... Delete By Query API is available
+}
+```
+
+To simplify this process, there is now a `SetRequiredPlugins` helper that can be passed as an option func when creating a new client. If the plugin is not installed, the client wouldn't be created in the first place.
+
+```go
+// Will raise an error if the "delete-by-query" plugin is NOT installed
+client, err := elastic.NewClient(elastic.SetRequiredPlugins("delete-by-query"))
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+Notice that there also is a way to define [mandatory plugins](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-plugins.html#_mandatory_plugins) in the Elasticsearch configuration file.
+
+## Common Query has been renamed to Common Terms Query
+
+The `CommonQuery` has been renamed to `CommonTermsQuery` to be in line with the [Java API](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_java_api_changes.html#_query_filter_refactoring).
+
+## Remove `MoreLikeThis` and `MoreLikeThisField`
+
+The More Like This API and the More Like This Field query [have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_more_like_this) and replaced with the `MoreLikeThisQuery`.
+
+## Remove Filtered Query
+
+With the merge of queries and filters, the [filtered query became deprecated](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated). While it is only deprecated and therefore still available in Elasticsearch 2.0, we have decided to remove it from Elastic 3.0. Why? Because we think that when you're already forced to rewrite many of your application code, it might be a good chance to get rid of things that are deprecated as well. So you might simply change your filtered query with a boolean query as [described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated).
+
+## Remove FuzzyLikeThis and FuzzyLikeThisField
+
+Both have been removed from Elasticsearch 2.0 as well.
+
+## Remove LimitFilter
+
+The `limit` filter is [deprecated in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_limit_literal_filter_deprecated) and becomes a no-op. Now is a good chance to remove it from your application as well. Use the `terminate_after` parameter in your search [as described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-body.html) to achieve similar effects.
+
+## Remove `_cache` and `_cache_key` from filters
+
+Both have been [removed from Elasticsearch 2.0 as well](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_filter_auto_caching).
+
+## Partial fields are gone
+
+Partial fields are [removed in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_search_changes.html#_partial_fields) in favor of [source filtering](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-source-filtering.html).
+
+## Scripting
+
+A [`Script`](https://github.com/olivere/elastic/blob/release-branch.v3/script.go) type has been added to Elastic 3.0. In Elastic 2.0, there were various places (e.g. aggregations) where you could just add the script as a string, specify the scripting language, add parameters etc. With Elastic 3.0, you should now always use the `Script` type.
+
+Example for Elastic 2.0 (old):
+
+```go
+update, err := client.Update().Index("twitter").Type("tweet").Id("1").
+ Script("ctx._source.retweets += num").
+ ScriptParams(map[string]interface{}{"num": 1}).
+ Upsert(map[string]interface{}{"retweets": 0}).
+ Do()
+```
+
+Example for Elastic 3.0 (new):
+
+```go
+update, err := client.Update().Index("twitter").Type("tweet").Id("1").
+ Script(elastic.NewScript("ctx._source.retweets += num").Param("num", 1)).
+ Upsert(map[string]interface{}{"retweets": 0}).
+ Do()
+```
+
+## Cluster State
+
+The combination of `Metric(string)` and `Metrics(...string)` has been replaced by a single func with the signature `Metric(...string)`.
+
+## Unexported structs in response
+
+Services generally return a typed response from a `Do` func. Those structs are exported so that they can be passed around in your own application. In Elastic 3.0 however, we changed that (most) sub-structs are now unexported, meaning: You can only pass around the whole response, not sub-structures of it. This makes it easier for restructuring responses according to the Elasticsearch API. See [`ClusterStateResponse`](https://github.com/olivere/elastic/blob/release-branch.v3/cluster_state.go#L182) as an example.
+
+## Add offset to Histogram aggregation
+
+Histogram aggregations now have an [offset](https://github.com/elastic/elasticsearch/pull/9505) option.
+
+## Services
+
+### REST API specification
+
+As you might know, Elasticsearch comes with a REST API specification. The specification describes the endpoints in a JSON structure.
+
+Most services in Elastic predated the REST API specification. We are in the process of bringing all these services in line with the specification. Services can be generated by `go generate` (not 100% automatic though). This is an ongoing process.
+
+This probably doesn't mean a lot to you. However, you can now be more confident that Elastic supports all features that the REST API specification describes.
+
+At the same time, the file names of the services are renamed to match the REST API specification naming.
+
+### REST API Test Suite
+
+The REST API specification of Elasticsearch comes along with a test suite that official clients typically use to test for conformance. Up until now, Elastic didn't run this test suite. However, we are in the process of setting up infrastructure and tests to match this suite as well.
+
+This process in not completed though.
+
+
diff --git a/vendor/github.com/olivere/elastic/CHANGELOG-5.0.md b/vendor/github.com/olivere/elastic/CHANGELOG-5.0.md
new file mode 100644
index 000000000..161c6a1ce
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/CHANGELOG-5.0.md
@@ -0,0 +1,195 @@
+# Changes in Elastic 5.0
+
+## Enforce context.Context in PerformRequest and Do
+
+We enforce the usage of `context.Context` everywhere you execute a request.
+You need to change all your `Do()` calls to pass a context: `Do(ctx)`.
+This enables automatic request cancelation and many other patterns.
+
+If you don't need this, simply pass `context.TODO()` or `context.Background()`.
+
+## Warmers removed
+
+Warmers are no longer necessary and have been [removed in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_index_apis.html#_warmers).
+
+## Optimize removed
+
+Optimize was deprecated in ES 2.0 and has been [removed in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_rest_api_changes.html#_literal__optimize_literal_endpoint_removed).
+Use [Force Merge](https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html) instead.
+
+## Missing Query removed
+
+The `missing` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-exists-query.html#_literal_missing_literal_query).
+Use `exists` query with `must_not` in `bool` query instead.
+
+## And Query removed
+
+The `and` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed).
+Use `must` clauses in a `bool` query instead.
+
+## Not Query removed
+
+TODO Is it removed?
+
+## Or Query removed
+
+The `or` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed).
+Use `should` clauses in a `bool` query instead.
+
+## Filtered Query removed
+
+The `filtered` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed).
+Use `bool` query instead, which supports `filter` clauses too.
+
+## Limit Query removed
+
+The `limit` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed).
+Use the `terminate_after` parameter instead.
+
+# Template Query removed
+
+The `template` query has been [deprecated](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/query-dsl-template-query.html). You should use
+Search Templates instead.
+
+We remove it from Elastic 5.0 as the 5.0 update is already a good opportunity
+to get rid of old stuff.
+
+## `_timestamp` and `_ttl` removed
+
+Both of these fields were deprecated and are now [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_mapping_changes.html#_literal__timestamp_literal_and_literal__ttl_literal).
+
+## Search template Put/Delete API returns `acknowledged` only
+
+The response type for Put/Delete search templates has changed.
+It only returns a single `acknowledged` flag now.
+
+## Fields has been renamed to Stored Fields
+
+The `fields` parameter has been renamed to `stored_fields`.
+See [here](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_search_changes.html#_literal_fields_literal_parameter).
+
+## Fielddatafields has been renamed to Docvaluefields
+
+The `fielddata_fields` parameter [has been renamed](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_search_changes.html#_literal_fielddata_fields_literal_parameter)
+to `docvalue_fields`.
+
+## Type exists endpoint changed
+
+The endpoint for checking whether a type exists has been changed from
+`HEAD {index}/{type}` to `HEAD {index}/_mapping/{type}`.
+See [here](https://www.elastic.co/guide/en/elasticsearch/reference/5.0/breaking_50_rest_api_changes.html#_literal_head_index_type_literal_replaced_with_literal_head_index__mapping_type_literal).
+
+## Refresh parameter changed
+
+The `?refresh` parameter previously could be a boolean value. It indicated
+whether changes made by a request (e.g. by the Bulk API) should be immediately
+visible in search, or not. Using `refresh=true` had the positive effect of
+immediately seeing the changes when searching; the negative effect is that
+it is a rather big performance hit.
+
+With 5.0, you now have the choice between these 3 values.
+
+* `"true"` - Refresh immediately
+* `"false"` - Do not refresh (the default value)
+* `"wait_for"` - Wait until ES made the document visible in search
+
+See [?refresh](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-refresh.html) in the documentation.
+
+Notice that `true` and `false` (the boolean values) are no longer available
+now in Elastic. You must use a string instead, with one of the above values.
+
+## ReindexerService removed
+
+The `ReindexerService` was a custom solution that was started in the ES 1.x era
+to automate reindexing data, from one index to another or even between clusters.
+
+ES 2.3 introduced its own [Reindex API](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html)
+so we're going to remove our custom solution and ask you to use the native reindexer.
+
+The `ReindexService` is available via `client.Reindex()` (which used to point
+to the custom reindexer).
+
+## Delete By Query back in core
+
+The [Delete By Query API](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html)
+was moved into a plugin in 2.0. Now its back in core with a complete rewrite based on the Bulk API.
+
+It has it's own endpoint at `/_delete_by_query`.
+
+Delete By Query, Reindex, and Update By Query are very similar under the hood.
+
+## Reindex, Delete By Query, and Update By Query response changed
+
+The response from the above APIs changed a bit. E.g. the `retries` value
+used to be an `int64` and returns separate values for `bulk` and `search` now:
+
+```
+// Old
+{
+ ...
+ "retries": 123,
+ ...
+}
+```
+
+```
+// New
+{
+ ...
+ "retries": {
+ "bulk": 123,
+ "search": 0
+ },
+ ...
+}
+```
+
+## ScanService removed
+
+The `ScanService` is removed. Use the (new) `ScrollService` instead.
+
+## New ScrollService
+
+There was confusion around `ScanService` and `ScrollService` doing basically
+the same. One was returning slices and didn't support all query details, the
+other returned one document after another and wasn't safe for concurrent use.
+So we merged the two and merged it into a new `ScrollService` that
+removes all the problems with the older services.
+
+In other words:
+If you used `ScanService`, switch to `ScrollService`.
+If you used the old `ScrollService`, you might need to fix some things but
+overall it should just work.
+
+Changes:
+- We replaced `elastic.EOS` with `io.EOF` to indicate the "end of scroll".
+
+TODO Not implemented yet
+
+## Suggesters
+
+They have been [completely rewritten in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_suggester.html).
+
+Some changes:
+- Suggesters no longer have an [output](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_suggester.html#_simpler_completion_indexing).
+
+TODO Fix all structural changes in suggesters
+
+## Percolator
+
+Percolator has [changed considerably](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_percolator.html).
+
+Elastic 5.0 adds the new
+[Percolator Query](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/query-dsl-percolate-query.html)
+which can be used in combination with the new
+[Percolator type](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/percolator.html).
+
+The Percolate service is removed from Elastic 5.0.
+
+## Remove Consistency, add WaitForActiveShards
+
+The `consistency` parameter has been removed in a lot of places, e.g. the Bulk,
+Index, Delete, Delete-by-Query, Reindex, Update, and Update-by-Query API.
+
+It has been replaced by a somewhat similar `wait_for_active_shards` parameter.
+See https://github.com/elastic/elasticsearch/pull/19454.
diff --git a/vendor/github.com/olivere/elastic/CHANGELOG-6.0.md b/vendor/github.com/olivere/elastic/CHANGELOG-6.0.md
new file mode 100644
index 000000000..277925929
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/CHANGELOG-6.0.md
@@ -0,0 +1,18 @@
+# Changes from 5.0 to 6.0
+
+See [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-6.0.html).
+
+## _all removed
+
+6.0 has removed support for the `_all` field.
+
+## Boolean values coerced
+
+Only use `true` or `false` for boolean values, not `0` or `1` or `on` or `off`.
+
+## Single Type Indices
+
+Notice that 6.0 and future versions will default to single type indices, i.e. you may not use multiple types when e.g. adding an index with a mapping.
+
+See [here for details](https://www.elastic.co/guide/en/elasticsearch/reference/6.x/removal-of-types.html#_what_are_mapping_types).
+
diff --git a/vendor/github.com/olivere/elastic/CODE_OF_CONDUCT.md b/vendor/github.com/olivere/elastic/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..acefecee5
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/CODE_OF_CONDUCT.md
@@ -0,0 +1,46 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at oliver@eilhard.net. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/olivere/elastic/CONTRIBUTING.md b/vendor/github.com/olivere/elastic/CONTRIBUTING.md
new file mode 100644
index 000000000..4fbc79dd0
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/CONTRIBUTING.md
@@ -0,0 +1,40 @@
+# How to contribute
+
+Elastic is an open-source project and we are looking forward to each
+contribution.
+
+Notice that while the [official Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) is rather good, it is a high-level
+overview of the features of Elasticsearch. However, Elastic tries to resemble
+the Java API of Elasticsearch which you can find [on GitHub](https://github.com/elastic/elasticsearch).
+
+This explains why you might think that some options are strange or missing
+in Elastic, while often they're just different. Please check the Java API first.
+
+Having said that: Elasticsearch is moving fast and it might be very likely
+that we missed some features or changes. Feel free to change that.
+
+## Your Pull Request
+
+To make it easy to review and understand your changes, please keep the
+following things in mind before submitting your pull request:
+
+* You compared the existing implemenation with the Java API, did you?
+* Please work on the latest possible state of `olivere/elastic`.
+ Use `release-branch.v2` for targeting Elasticsearch 1.x and
+ `release-branch.v3` for targeting 2.x.
+* Create a branch dedicated to your change.
+* If possible, write a test case which confirms your change.
+* Make sure your changes and your tests work with all recent versions of
+ Elasticsearch. We currently support Elasticsearch 1.7.x in the
+ release-branch.v2 and Elasticsearch 2.x in the release-branch.v3.
+* Test your changes before creating a pull request (`go test ./...`).
+* Don't mix several features or bug fixes in one pull request.
+* Create a meaningful commit message.
+* Explain your change, e.g. provide a link to the issue you are fixing and
+ probably a link to the Elasticsearch documentation and/or source code.
+* Format your source with `go fmt`.
+
+## Additional Resources
+
+* [GitHub documentation](http://help.github.com/)
+* [GitHub pull request documentation](http://help.github.com/send-pull-requests/)
diff --git a/vendor/github.com/olivere/elastic/CONTRIBUTORS b/vendor/github.com/olivere/elastic/CONTRIBUTORS
new file mode 100644
index 000000000..d7f7f780f
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/CONTRIBUTORS
@@ -0,0 +1,123 @@
+# This is a list of people who have contributed code
+# to the Elastic repository.
+#
+# It is just my small "thank you" to all those that helped
+# making Elastic what it is.
+#
+# Please keep this list sorted.
+
+0x6875790d0a [@huydx](https://github.com/huydx)
+Adam Alix [@adamalix](https://github.com/adamalix)
+Adam Weiner [@adamweiner](https://github.com/adamweiner)
+Adrian Lungu [@AdrianLungu](https://github.com/AdrianLungu)
+alehano [@alehano](https://github.com/alehano)
+Alex [@akotlar](https://github.com/akotlar)
+Alexander Sack [@asac](https://github.com/asac)
+Alexandre Olivier [@aliphen](https://github.com/aliphen)
+Alexey Sharov [@nizsheanez](https://github.com/nizsheanez)
+AndreKR [@AndreKR](https://github.com/AndreKR)
+André Bierlein [@ligustah](https://github.com/ligustah)
+Andrew Dunham [@andrew-d](https://github.com/andrew-d)
+Andrew Gaul [@andrewgaul](https://github.com/andrewgaul)
+Andy Walker [@alaska](https://github.com/alaska)
+Arquivei [@arquivei](https://github.com/arquivei)
+arthurgustin [@arthurgustin](https://github.com/arthurgustin)
+Benjamin Fernandes [@LotharSee](https://github.com/LotharSee)
+Benjamin Zarzycki [@kf6nux](https://github.com/kf6nux)
+Braden Bassingthwaite [@bbassingthwaite-va](https://github.com/bbassingthwaite-va)
+Brady Love [@bradylove](https://github.com/bradylove)
+Bryan Conklin [@bmconklin](https://github.com/bmconklin)
+Bruce Zhou [@brucez-isell](https://github.com/brucez-isell)
+cforbes [@cforbes](https://github.com/cforbes)
+Chris M [@tebriel](https://github.com/tebriel)
+Chris Rice [@donutmonger](https://github.com/donutmonger)
+Claudiu Olteanu [@claudiuolteanu](https://github.com/claudiuolteanu)
+Christophe Courtaut [@kri5](https://github.com/kri5)
+Connor Peet [@connor4312](https://github.com/connor4312)
+Conrad Pankoff [@deoxxa](https://github.com/deoxxa)
+Corey Scott [@corsc](https://github.com/corsc)
+Daniel Barrett [@shendaras](https://github.com/shendaras)
+Daniel Heckrath [@DanielHeckrath](https://github.com/DanielHeckrath)
+Daniel Imfeld [@dimfeld](https://github.com/dimfeld)
+Dwayne Schultz [@myshkin5](https://github.com/myshkin5)
+Ellison Leão [@ellisonleao](https://github.com/ellisonleao)
+Erwin [@eticzon](https://github.com/eticzon)
+Eugene Egorov [@EugeneEgorov](https://github.com/EugeneEgorov)
+Evan Shaw [@edsrzf](https://github.com/edsrzf)
+Fanfan [@wenpos](https://github.com/wenpos)
+Faolan C-P [@fcheslack](https://github.com/fcheslack)
+Filip Tepper [@filiptepper](https://github.com/filiptepper)
+Gaylord Aulke [@blafasel42](https://github.com/blafasel42)
+Gerhard Häring [@ghaering](https://github.com/ghaering)
+Guilherme Silveira [@guilherme-santos](https://github.com/guilherme-santos)
+Guillaume J. Charmes [@creack](https://github.com/creack)
+Guiseppe [@gm42](https://github.com/gm42)
+Han Yu [@MoonighT](https://github.com/MoonighT)
+Harmen [@alicebob](https://github.com/alicebob)
+Harrison Wright [@wright8191](https://github.com/wright8191)
+Henry Clifford [@hcliff](https://github.com/hcliff)
+Igor Dubinskiy [@idubinskiy](https://github.com/idubinskiy)
+initialcontext [@initialcontext](https://github.com/initialcontext)
+Isaac Saldana [@isaldana](https://github.com/isaldana)
+Jack Lindamood [@cep21](https://github.com/cep21)
+Jacob [@jdelgad](https://github.com/jdelgad)
+Jayme Rotsaert [@jrots](https://github.com/jrots)
+Jeremy Canady [@jrmycanady](https://github.com/jrmycanady)
+Jim Berlage [@jimberlage](https://github.com/jimberlage)
+Joe Buck [@four2five](https://github.com/four2five)
+John Barker [@j16r](https://github.com/j16r)
+John Goodall [@jgoodall](https://github.com/jgoodall)
+John Stanford [@jxstanford](https://github.com/jxstanford)
+Josh Chorlton [@jchorl](https://github.com/jchorl)
+jun [@coseyo](https://github.com/coseyo)
+Junpei Tsuji [@jun06t](https://github.com/jun06t)
+Keith Hatton [@khatton-ft](https://github.com/khatton-ft)
+kel [@liketic](https://github.com/liketic)
+Kenta SUZUKI [@suzuken](https://github.com/suzuken)
+Kevin Mulvey [@kmulvey](https://github.com/kmulvey)
+Kyle Brandt [@kylebrandt](https://github.com/kylebrandt)
+Leandro Piccilli [@lpic10](https://github.com/lpic10)
+M. Zulfa Achsani [@misterciput](https://github.com/misterciput)
+Maciej Lisiewski [@c2h5oh](https://github.com/c2h5oh)
+Mara Kim [@autochthe](https://github.com/autochthe)
+Marcy Buccellato [@marcybuccellato](https://github.com/marcybuccellato)
+Mark Costello [@mcos](https://github.com/mcos)
+Martin Häger [@protomouse](https://github.com/protomouse)
+Medhi Bechina [@mdzor](https://github.com/mdzor)
+mnpritula [@mnpritula](https://github.com/mnpritula)
+mosa [@mosasiru](https://github.com/mosasiru)
+naimulhaider [@naimulhaider](https://github.com/naimulhaider)
+Naoya Yoshizawa [@azihsoyn](https://github.com/azihsoyn)
+navins [@ishare](https://github.com/ishare)
+Naoya Tsutsumi [@tutuming](https://github.com/tutuming)
+Nicholas Wolff [@nwolff](https://github.com/nwolff)
+Nick K [@utrack](https://github.com/utrack)
+Nick Whyte [@nickw444](https://github.com/nickw444)
+Nicolae Vartolomei [@nvartolomei](https://github.com/nvartolomei)
+Orne Brocaar [@brocaar](https://github.com/brocaar)
+Paul [@eyeamera](https://github.com/eyeamera)
+Pete C [@peteclark-ft](https://github.com/peteclark-ft)
+Radoslaw Wesolowski [r--w](https://github.com/r--w)
+Ryan Schmukler [@rschmukler](https://github.com/rschmukler)
+Sacheendra talluri [@sacheendra](https://github.com/sacheendra)
+Sean DuBois [@Sean-Der](https://github.com/Sean-Der)
+Shalin LK [@shalinlk](https://github.com/shalinlk)
+Stephen Kubovic [@stephenkubovic](https://github.com/stephenkubovic)
+Stuart Warren [@Woz](https://github.com/stuart-warren)
+Sulaiman [@salajlan](https://github.com/salajlan)
+Sundar [@sundarv85](https://github.com/sundarv85)
+Swarlston [@Swarlston](https://github.com/Swarlston)
+Take [ww24](https://github.com/ww24)
+Tetsuya Morimoto [@t2y](https://github.com/t2y)
+TimeEmit [@TimeEmit](https://github.com/timeemit)
+TusharM [@tusharm](https://github.com/tusharm)
+zhangxin [@visaxin](https://github.com/visaxin)
+wangtuo [@wangtuo](https://github.com/wangtuo)
+Wédney Yuri [@wedneyyuri](https://github.com/wedneyyuri)
+wolfkdy [@wolfkdy](https://github.com/wolfkdy)
+Wyndham Blanton [@wyndhblb](https://github.com/wyndhblb)
+Yarden Bar [@ayashjorden](https://github.com/ayashjorden)
+zakthomas [@zakthomas](https://github.com/zakthomas)
+singham [@zhaochenxiao90](https://github.com/zhaochenxiao90)
+@林 [@zplzpl](https://github.com/zplzpl)
+Roman Colohanin [@zuzmic](https://github.com/zuzmic)
diff --git a/vendor/github.com/olivere/elastic/ISSUE_TEMPLATE.md b/vendor/github.com/olivere/elastic/ISSUE_TEMPLATE.md
new file mode 100644
index 000000000..88d66cc83
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/ISSUE_TEMPLATE.md
@@ -0,0 +1,18 @@
+Please use the following questions as a guideline to help me answer
+your issue/question without further inquiry. Thank you.
+
+### Which version of Elastic are you using?
+
+[ ] elastic.v2 (for Elasticsearch 1.x)
+[ ] elastic.v3 (for Elasticsearch 2.x)
+[ ] elastic.v5 (for Elasticsearch 5.x)
+[ ] elastic.v6 (for Elasticsearch 6.x)
+
+### Please describe the expected behavior
+
+
+### Please describe the actual behavior
+
+
+### Any steps to reproduce the behavior?
+
diff --git a/vendor/github.com/cpanato/html2text/LICENSE b/vendor/github.com/olivere/elastic/LICENSE
index 1f2423ecb..8b22cdb60 100644
--- a/vendor/github.com/cpanato/html2text/LICENSE
+++ b/vendor/github.com/olivere/elastic/LICENSE
@@ -1,23 +1,20 @@
The MIT License (MIT)
-
-Copyright (c) 2015 Jay Taylor
-Modified work: Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
+Copyright © 2012-2015 Oliver Eilhard
Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
+of this software and associated documentation files (the “Software”), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+IN THE SOFTWARE.
diff --git a/vendor/github.com/olivere/elastic/README.md b/vendor/github.com/olivere/elastic/README.md
new file mode 100644
index 000000000..f452b664d
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/README.md
@@ -0,0 +1,391 @@
+# Elastic
+
+**This is a development branch that is actively being worked on. DO NOT USE IN PRODUCTION!**
+
+Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for the
+[Go](http://www.golang.org/) programming language.
+
+[![Build Status](https://travis-ci.org/olivere/elastic.svg?branch=release-branch.v6)](https://travis-ci.org/olivere/elastic)
+[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](http://godoc.org/github.com/olivere/elastic)
+[![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE)
+
+See the [wiki](https://github.com/olivere/elastic/wiki) for additional information about Elastic.
+
+
+## Releases
+
+**The release branches (e.g. [`release-branch.v6`](https://github.com/olivere/elastic/tree/release-branch.v6))
+are actively being worked on and can break at any time.
+If you want to use stable versions of Elastic, please use a dependency manager like [dep](https://github.com/golang/dep).**
+
+Here's the version matrix:
+
+Elasticsearch version | Elastic version | Package URL | Remarks |
+----------------------|------------------|-------------|---------|
+6.x                   | 6.0             | [`github.com/olivere/elastic`](https://github.com/olivere/elastic) ([source](https://github.com/olivere/elastic/tree/release-branch.v6) [doc](http://godoc.org/github.com/olivere/elastic)) | Use a dependency manager (see below).
+5.x | 5.0 | [`gopkg.in/olivere/elastic.v5`](https://gopkg.in/olivere/elastic.v5) ([source](https://github.com/olivere/elastic/tree/release-branch.v5) [doc](http://godoc.org/gopkg.in/olivere/elastic.v5)) | Actively maintained.
+2.x | 3.0 | [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3) ([source](https://github.com/olivere/elastic/tree/release-branch.v3) [doc](http://godoc.org/gopkg.in/olivere/elastic.v3)) | Deprecated. Please update.
+1.x | 2.0 | [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2) ([source](https://github.com/olivere/elastic/tree/release-branch.v2) [doc](http://godoc.org/gopkg.in/olivere/elastic.v2)) | Deprecated. Please update.
+0.9-1.3 | 1.0 | [`gopkg.in/olivere/elastic.v1`](https://gopkg.in/olivere/elastic.v1) ([source](https://github.com/olivere/elastic/tree/release-branch.v1) [doc](http://godoc.org/gopkg.in/olivere/elastic.v1)) | Deprecated. Please update.
+
+**Example:**
+
+You have installed Elasticsearch 6.0.0 and want to use Elastic.
+As listed above, you should use Elastic 6.0.
+
+To use the required version of Elastic in your application, it is strongly
+advised to use a tool like
+[dep](https://github.com/golang/dep)
+or
+[Glide](https://glide.sh/)
+to manage that dependency. Make sure to use a version such as `^6.0.0`.
+
+To use Elastic, simply import:
+
+```go
+import "github.com/olivere/elastic"
+```
+
+### Elastic 6.0
+
+Elastic 6.0 targets Elasticsearch 6.x which was [released on 14th November 2017](https://www.elastic.co/blog/elasticsearch-6-0-0-released).
+
+Notice that there are will be a lot of [breaking changes in Elasticsearch 6.0](https://www.elastic.co/guide/en/elasticsearch/reference/6.0/breaking-changes-6.0.html)
+and we used this as an opportunity to [clean up and refactor Elastic](https://github.com/olivere/elastic/blob/release-branch.v6/CHANGELOG-6.0.md)
+as we did in the transition from earlier versions of Elastic.
+
+### Elastic 5.0
+
+Elastic 5.0 targets Elasticsearch 5.0.0 and later. Elasticsearch 5.0.0 was
+[released on 26th October 2016](https://www.elastic.co/blog/elasticsearch-5-0-0-released).
+
+Notice that there are will be a lot of [breaking changes in Elasticsearch 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/5.0/breaking-changes-5.0.html)
+and we used this as an opportunity to [clean up and refactor Elastic](https://github.com/olivere/elastic/blob/release-branch.v5/CHANGELOG-5.0.md)
+as we did in the transition from Elastic 2.0 (for Elasticsearch 1.x) to Elastic 3.0 (for Elasticsearch 2.x).
+
+Furthermore, the jump in version numbers will give us a chance to be in sync with the Elastic Stack.
+
+### Elastic 3.0
+
+Elastic 3.0 targets Elasticsearch 2.x and is published via [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3).
+
+Elastic 3.0 will only get critical bug fixes. You should update to a recent version.
+
+### Elastic 2.0
+
+Elastic 2.0 targets Elasticsearch 1.x and is published via [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2).
+
+Elastic 2.0 will only get critical bug fixes. You should update to a recent version.
+
+### Elastic 1.0
+
+Elastic 1.0 is deprecated. You should really update Elasticsearch and Elastic
+to a recent version.
+
+However, if you cannot update for some reason, don't worry. Version 1.0 is
+still available. All you need to do is go-get it and change your import path
+as described above.
+
+
+## Status
+
+We use Elastic in production since 2012. Elastic is stable but the API changes
+now and then. We strive for API compatibility.
+However, Elasticsearch sometimes introduces [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes.html)
+and we sometimes have to adapt.
+
+Having said that, there have been no big API changes that required you
+to rewrite your application big time. More often than not it's renaming APIs
+and adding/removing features so that Elastic is in sync with Elasticsearch.
+
+Elastic has been used in production with the following Elasticsearch versions:
+0.90, 1.0-1.7, and 2.0-2.4.1. Furthermore, we use [Travis CI](https://travis-ci.org/)
+to test Elastic with the most recent versions of Elasticsearch and Go.
+See the [.travis.yml](https://github.com/olivere/elastic/blob/master/.travis.yml)
+file for the exact matrix and [Travis](https://travis-ci.org/olivere/elastic)
+for the results.
+
+Elasticsearch has quite a few features. Most of them are implemented
+by Elastic. I add features and APIs as required. It's straightforward
+to implement missing pieces. I'm accepting pull requests :-)
+
+Having said that, I hope you find the project useful.
+
+
+## Getting Started
+
+The first thing you do is to create a [Client](https://github.com/olivere/elastic/blob/master/client.go).
+The client connects to Elasticsearch on `http://127.0.0.1:9200` by default.
+
+You typically create one client for your app. Here's a complete example of
+creating a client, creating an index, adding a document, executing a search etc.
+
+An example is available [here](https://olivere.github.io/elastic/).
+
+Here's a [link to a complete working example for v3](https://gist.github.com/olivere/114347ff9d9cfdca7bdc0ecea8b82263).
+
+See the [wiki](https://github.com/olivere/elastic/wiki) for more details.
+
+
+## API Status
+
+### Document APIs
+
+- [x] Index API
+- [x] Get API
+- [x] Delete API
+- [x] Delete By Query API
+- [x] Update API
+- [x] Update By Query API
+- [x] Multi Get API
+- [x] Bulk API
+- [x] Reindex API
+- [x] Term Vectors
+- [x] Multi termvectors API
+
+### Search APIs
+
+- [x] Search
+- [x] Search Template
+- [ ] Multi Search Template
+- [ ] Search Shards API
+- [x] Suggesters
+ - [x] Term Suggester
+ - [x] Phrase Suggester
+ - [x] Completion Suggester
+ - [x] Context Suggester
+- [x] Multi Search API
+- [x] Count API
+- [ ] Validate API
+- [x] Explain API
+- [x] Profile API
+- [x] Field Capabilities API
+
+### Aggregations
+
+- Metrics Aggregations
+ - [x] Avg
+ - [x] Cardinality
+ - [x] Extended Stats
+ - [x] Geo Bounds
+ - [ ] Geo Centroid
+ - [x] Max
+ - [x] Min
+ - [x] Percentiles
+ - [x] Percentile Ranks
+ - [ ] Scripted Metric
+ - [x] Stats
+ - [x] Sum
+ - [x] Top Hits
+ - [x] Value Count
+- Bucket Aggregations
+ - [ ] Adjacency Matrix
+ - [x] Children
+ - [x] Date Histogram
+ - [x] Date Range
+ - [ ] Diversified Sampler
+ - [x] Filter
+ - [x] Filters
+ - [x] Geo Distance
+ - [ ] GeoHash Grid
+ - [x] Global
+ - [x] Histogram
+ - [x] IP Range
+ - [x] Missing
+ - [x] Nested
+ - [x] Range
+ - [x] Reverse Nested
+ - [x] Sampler
+ - [x] Significant Terms
+ - [x] Significant Text
+ - [x] Terms
+- Pipeline Aggregations
+ - [x] Avg Bucket
+ - [x] Derivative
+ - [x] Max Bucket
+ - [x] Min Bucket
+ - [x] Sum Bucket
+ - [x] Stats Bucket
+ - [ ] Extended Stats Bucket
+ - [x] Percentiles Bucket
+ - [x] Moving Average
+ - [x] Cumulative Sum
+ - [x] Bucket Script
+ - [x] Bucket Selector
+ - [x] Serial Differencing
+- [x] Matrix Aggregations
+ - [x] Matrix Stats
+- [x] Aggregation Metadata
+
+### Indices APIs
+
+- [x] Create Index
+- [x] Delete Index
+- [x] Get Index
+- [x] Indices Exists
+- [x] Open / Close Index
+- [x] Shrink Index
+- [x] Rollover Index
+- [x] Put Mapping
+- [x] Get Mapping
+- [x] Get Field Mapping
+- [x] Types Exists
+- [x] Index Aliases
+- [x] Update Indices Settings
+- [x] Get Settings
+- [x] Analyze
+- [x] Index Templates
+- [ ] Shadow Replica Indices
+- [x] Indices Stats
+- [x] Indices Segments
+- [ ] Indices Recovery
+- [ ] Indices Shard Stores
+- [ ] Clear Cache
+- [x] Flush
+- [x] Refresh
+- [x] Force Merge
+- [ ] Upgrade
+
+### cat APIs
+
+The cat APIs are not implemented as of now. We think they are better suited for operating with Elasticsearch on the command line.
+
+- [ ] cat aliases
+- [ ] cat allocation
+- [ ] cat count
+- [ ] cat fielddata
+- [ ] cat health
+- [ ] cat indices
+- [ ] cat master
+- [ ] cat nodeattrs
+- [ ] cat nodes
+- [ ] cat pending tasks
+- [ ] cat plugins
+- [ ] cat recovery
+- [ ] cat repositories
+- [ ] cat thread pool
+- [ ] cat shards
+- [ ] cat segments
+- [ ] cat snapshots
+
+### Cluster APIs
+
+- [x] Cluster Health
+- [x] Cluster State
+- [x] Cluster Stats
+- [ ] Pending Cluster Tasks
+- [ ] Cluster Reroute
+- [ ] Cluster Update Settings
+- [x] Nodes Stats
+- [x] Nodes Info
+- [x] Task Management API
+- [ ] Nodes hot_threads
+- [ ] Cluster Allocation Explain API
+
+### Query DSL
+
+- [x] Match All Query
+- [x] Inner hits
+- Full text queries
+ - [x] Match Query
+ - [x] Match Phrase Query
+ - [x] Match Phrase Prefix Query
+ - [x] Multi Match Query
+ - [x] Common Terms Query
+ - [x] Query String Query
+ - [x] Simple Query String Query
+- Term level queries
+ - [x] Term Query
+ - [x] Terms Query
+ - [x] Range Query
+ - [x] Exists Query
+ - [x] Prefix Query
+ - [x] Wildcard Query
+ - [x] Regexp Query
+ - [x] Fuzzy Query
+ - [x] Type Query
+ - [x] Ids Query
+- Compound queries
+ - [x] Constant Score Query
+ - [x] Bool Query
+ - [x] Dis Max Query
+ - [x] Function Score Query
+ - [x] Boosting Query
+ - [x] Indices Query
+- Joining queries
+ - [x] Nested Query
+ - [x] Has Child Query
+ - [x] Has Parent Query
+ - [x] Parent Id Query
+- Geo queries
+ - [ ] GeoShape Query
+ - [x] Geo Bounding Box Query
+ - [x] Geo Distance Query
+ - [ ] Geo Distance Range Query
+ - [x] Geo Polygon Query
+ - [ ] Geohash Cell Query
+- Specialized queries
+ - [x] More Like This Query
+ - [x] Template Query
+ - [x] Script Query
+ - [x] Percolate Query
+- Span queries
+ - [ ] Span Term Query
+ - [ ] Span Multi Term Query
+ - [ ] Span First Query
+ - [ ] Span Near Query
+ - [ ] Span Or Query
+ - [ ] Span Not Query
+ - [ ] Span Containing Query
+ - [ ] Span Within Query
+ - [ ] Span Field Masking Query
+- [ ] Minimum Should Match
+- [ ] Multi Term Query Rewrite
+
+### Modules
+
+- Snapshot and Restore
+ - [x] Repositories
+ - [ ] Snapshot
+ - [ ] Restore
+ - [ ] Snapshot status
+ - [ ] Monitoring snapshot/restore status
+ - [ ] Stopping currently running snapshot and restore
+
+### Sorting
+
+- [x] Sort by score
+- [x] Sort by field
+- [x] Sort by geo distance
+- [x] Sort by script
+- [x] Sort by doc
+
+### Scrolling
+
+Scrolling is supported via a `ScrollService`. It supports an iterator-like interface.
+The `ClearScroll` API is implemented as well.
+
+A pattern for [efficiently scrolling in parallel](https://github.com/olivere/elastic/wiki/ScrollParallel)
+is described in the [Wiki](https://github.com/olivere/elastic/wiki).
+
+## How to contribute
+
+Read [the contribution guidelines](https://github.com/olivere/elastic/blob/master/CONTRIBUTING.md).
+
+## Credits
+
+Thanks a lot for the great folks working hard on
+[Elasticsearch](https://www.elastic.co/products/elasticsearch)
+and
+[Go](https://golang.org/).
+
+Elastic uses portions of the
+[uritemplates](https://github.com/jtacoma/uritemplates) library
+by Joshua Tacoma,
+[backoff](https://github.com/cenkalti/backoff) by Cenk Altı and
+[leaktest](https://github.com/fortytw2/leaktest) by Ian Chiles.
+
+## LICENSE
+
+MIT-LICENSE. See [LICENSE](http://olivere.mit-license.org/)
+or the LICENSE file provided in the repository for details.
diff --git a/vendor/github.com/olivere/elastic/acknowledged_response.go b/vendor/github.com/olivere/elastic/acknowledged_response.go
new file mode 100644
index 000000000..2045ab85e
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/acknowledged_response.go
@@ -0,0 +1,13 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// AcknowledgedResponse is returned from various APIs. It simply indicates
+// whether the operation is ack'd or not.
+type AcknowledgedResponse struct {
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/backoff.go b/vendor/github.com/olivere/elastic/backoff.go
new file mode 100644
index 000000000..736959f6d
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/backoff.go
@@ -0,0 +1,148 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "math"
+ "math/rand"
+ "sync"
+ "time"
+)
+
+// BackoffFunc specifies the signature of a function that returns the
+// time to wait before the next call to a resource. To stop retrying
+// return false in the 2nd return value.
+type BackoffFunc func(retry int) (time.Duration, bool)
+
+// Backoff allows callers to implement their own Backoff strategy.
+type Backoff interface {
+ // Next implements a BackoffFunc.
+ Next(retry int) (time.Duration, bool)
+}
+
+// -- ZeroBackoff --
+
+// ZeroBackoff is a fixed backoff policy whose backoff time is always zero,
+// meaning that the operation is retried immediately without waiting,
+// indefinitely.
+type ZeroBackoff struct{}
+
+// Next implements BackoffFunc for ZeroBackoff.
+func (b ZeroBackoff) Next(retry int) (time.Duration, bool) {
+ return 0, true
+}
+
+// -- StopBackoff --
+
+// StopBackoff is a fixed backoff policy that always returns false for
+// Next(), meaning that the operation should never be retried.
+type StopBackoff struct{}
+
+// Next implements BackoffFunc for StopBackoff.
+func (b StopBackoff) Next(retry int) (time.Duration, bool) {
+ return 0, false
+}
+
+// -- ConstantBackoff --
+
+// ConstantBackoff is a backoff policy that always returns the same delay.
+type ConstantBackoff struct {
+ interval time.Duration
+}
+
+// NewConstantBackoff returns a new ConstantBackoff.
+func NewConstantBackoff(interval time.Duration) *ConstantBackoff {
+ return &ConstantBackoff{interval: interval}
+}
+
+// Next implements BackoffFunc for ConstantBackoff.
+func (b *ConstantBackoff) Next(retry int) (time.Duration, bool) {
+ return b.interval, true
+}
+
+// -- Exponential --
+
+// ExponentialBackoff implements the simple exponential backoff described by
+// Douglas Thain at http://dthain.blogspot.de/2009/02/exponential-backoff-in-distributed.html.
+type ExponentialBackoff struct {
+ t float64 // initial timeout (in msec)
+ f float64 // exponential factor (e.g. 2)
+ m float64 // maximum timeout (in msec)
+}
+
+// NewExponentialBackoff returns a ExponentialBackoff backoff policy.
+// Use initialTimeout to set the first/minimal interval
+// and maxTimeout to set the maximum wait interval.
+func NewExponentialBackoff(initialTimeout, maxTimeout time.Duration) *ExponentialBackoff {
+ return &ExponentialBackoff{
+ t: float64(int64(initialTimeout / time.Millisecond)),
+ f: 2.0,
+ m: float64(int64(maxTimeout / time.Millisecond)),
+ }
+}
+
+// Next implements BackoffFunc for ExponentialBackoff.
+func (b *ExponentialBackoff) Next(retry int) (time.Duration, bool) {
+ r := 1.0 + rand.Float64() // random number in [1..2]
+ m := math.Min(r*b.t*math.Pow(b.f, float64(retry)), b.m)
+ if m >= b.m {
+ return 0, false
+ }
+ d := time.Duration(int64(m)) * time.Millisecond
+ return d, true
+}
+
+// -- Simple Backoff --
+
+// SimpleBackoff takes a list of fixed values for backoff intervals.
+// Each call to Next returns the next value from that fixed list.
+// After each value is returned, subsequent calls to Next will only return
+// the last element. The values are optionally "jittered" (off by default).
+type SimpleBackoff struct {
+ sync.Mutex
+ ticks []int
+ jitter bool
+}
+
+// NewSimpleBackoff creates a SimpleBackoff algorithm with the specified
+// list of fixed intervals in milliseconds.
+func NewSimpleBackoff(ticks ...int) *SimpleBackoff {
+ return &SimpleBackoff{
+ ticks: ticks,
+ jitter: false,
+ }
+}
+
+// Jitter enables or disables jittering values.
+func (b *SimpleBackoff) Jitter(flag bool) *SimpleBackoff {
+ b.Lock()
+ b.jitter = flag
+ b.Unlock()
+ return b
+}
+
+// jitter randomizes the interval to return a value of [0.5*millis .. 1.5*millis].
+func jitter(millis int) int {
+ if millis <= 0 {
+ return 0
+ }
+ return millis/2 + rand.Intn(millis)
+}
+
+// Next implements BackoffFunc for SimpleBackoff.
+func (b *SimpleBackoff) Next(retry int) (time.Duration, bool) {
+ b.Lock()
+ defer b.Unlock()
+
+ if retry >= len(b.ticks) {
+ return 0, false
+ }
+
+ ms := b.ticks[retry]
+ if b.jitter {
+ ms = jitter(ms)
+ }
+ return time.Duration(ms) * time.Millisecond, true
+}
diff --git a/vendor/github.com/olivere/elastic/backoff_test.go b/vendor/github.com/olivere/elastic/backoff_test.go
new file mode 100644
index 000000000..eae168a12
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/backoff_test.go
@@ -0,0 +1,140 @@
+package elastic
+
+import (
+ "math/rand"
+ "testing"
+ "time"
+)
+
+func TestZeroBackoff(t *testing.T) {
+ b := ZeroBackoff{}
+ _, ok := b.Next(0)
+ if !ok {
+ t.Fatalf("expected %v, got %v", true, ok)
+ }
+}
+
+func TestStopBackoff(t *testing.T) {
+ b := StopBackoff{}
+ _, ok := b.Next(0)
+ if ok {
+ t.Fatalf("expected %v, got %v", false, ok)
+ }
+}
+
+func TestConstantBackoff(t *testing.T) {
+ b := NewConstantBackoff(time.Second)
+ d, ok := b.Next(0)
+ if !ok {
+ t.Fatalf("expected %v, got %v", true, ok)
+ }
+ if d != time.Second {
+ t.Fatalf("expected %v, got %v", time.Second, d)
+ }
+}
+
+func TestSimpleBackoff(t *testing.T) {
+ var tests = []struct {
+ Duration time.Duration
+ Continue bool
+ }{
+ // #0
+ {
+ Duration: 1 * time.Millisecond,
+ Continue: true,
+ },
+ // #1
+ {
+ Duration: 2 * time.Millisecond,
+ Continue: true,
+ },
+ // #2
+ {
+ Duration: 7 * time.Millisecond,
+ Continue: true,
+ },
+ // #3
+ {
+ Duration: 0,
+ Continue: false,
+ },
+ // #4
+ {
+ Duration: 0,
+ Continue: false,
+ },
+ }
+
+ b := NewSimpleBackoff(1, 2, 7)
+
+ for i, tt := range tests {
+ d, ok := b.Next(i)
+ if got, want := ok, tt.Continue; got != want {
+ t.Fatalf("#%d: expected %v, got %v", i, want, got)
+ }
+ if got, want := d, tt.Duration; got != want {
+ t.Fatalf("#%d: expected %v, got %v", i, want, got)
+ }
+ }
+}
+
+func TestExponentialBackoff(t *testing.T) {
+ rand.Seed(time.Now().UnixNano())
+
+ min := time.Duration(8) * time.Millisecond
+ max := time.Duration(256) * time.Millisecond
+ b := NewExponentialBackoff(min, max)
+
+ between := func(value time.Duration, a, b int) bool {
+ x := int(value / time.Millisecond)
+ return a <= x && x <= b
+ }
+
+ got, ok := b.Next(0)
+ if !ok {
+ t.Fatalf("expected %v, got %v", true, ok)
+ }
+ if !between(got, 8, 256) {
+ t.Errorf("expected [%v..%v], got %v", 8, 256, got)
+ }
+
+ got, ok = b.Next(1)
+ if !ok {
+ t.Fatalf("expected %v, got %v", true, ok)
+ }
+ if !between(got, 8, 256) {
+ t.Errorf("expected [%v..%v], got %v", 8, 256, got)
+ }
+
+ got, ok = b.Next(2)
+ if !ok {
+ t.Fatalf("expected %v, got %v", true, ok)
+ }
+ if !between(got, 8, 256) {
+ t.Errorf("expected [%v..%v], got %v", 8, 256, got)
+ }
+
+ got, ok = b.Next(3)
+ if !ok {
+ t.Fatalf("expected %v, got %v", true, ok)
+ }
+ if !between(got, 8, 256) {
+ t.Errorf("expected [%v..%v], got %v", 8, 256, got)
+ }
+
+ got, ok = b.Next(4)
+ if !ok {
+ t.Fatalf("expected %v, got %v", true, ok)
+ }
+ if !between(got, 8, 256) {
+ t.Errorf("expected [%v..%v], got %v", 8, 256, got)
+ }
+
+ if _, ok := b.Next(5); ok {
+ t.Fatalf("expected %v, got %v", false, ok)
+ }
+
+ if _, ok = b.Next(6); ok {
+ t.Fatalf("expected %v, got %v", false, ok)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/bulk.go b/vendor/github.com/olivere/elastic/bulk.go
new file mode 100644
index 000000000..f4228294f
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/bulk.go
@@ -0,0 +1,417 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "net/url"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// BulkService allows for batching bulk requests and sending them to
+// Elasticsearch in one roundtrip. Use the Add method with BulkIndexRequest,
+// BulkUpdateRequest, and BulkDeleteRequest to add bulk requests to a batch,
+// then use Do to send them to Elasticsearch.
+//
+// BulkService will be reset after each Do call. In other words, you can
+// reuse BulkService to send many batches. You do not have to create a new
+// BulkService for each batch.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
+// for more details.
+type BulkService struct {
+ client *Client
+ retrier Retrier
+
+ index string
+ typ string
+ requests []BulkableRequest
+ pipeline string
+ timeout string
+ refresh string
+ routing string
+ waitForActiveShards string
+ pretty bool
+
+ // estimated bulk size in bytes, up to the request index sizeInBytesCursor
+ sizeInBytes int64
+ sizeInBytesCursor int
+}
+
+// NewBulkService initializes a new BulkService.
+func NewBulkService(client *Client) *BulkService {
+ builder := &BulkService{
+ client: client,
+ }
+ return builder
+}
+
+func (s *BulkService) reset() {
+ s.requests = make([]BulkableRequest, 0)
+ s.sizeInBytes = 0
+ s.sizeInBytesCursor = 0
+}
+
+// Retrier allows to set specific retry logic for this BulkService.
+// If not specified, it will use the client's default retrier.
+func (s *BulkService) Retrier(retrier Retrier) *BulkService {
+ s.retrier = retrier
+ return s
+}
+
+// Index specifies the index to use for all batches. You may also leave
+// this blank and specify the index in the individual bulk requests.
+func (s *BulkService) Index(index string) *BulkService {
+ s.index = index
+ return s
+}
+
+// Type specifies the type to use for all batches. You may also leave
+// this blank and specify the type in the individual bulk requests.
+func (s *BulkService) Type(typ string) *BulkService {
+ s.typ = typ
+ return s
+}
+
+// Timeout is a global timeout for processing bulk requests. This is a
+// server-side timeout, i.e. it tells Elasticsearch the time after which
+// it should stop processing.
+func (s *BulkService) Timeout(timeout string) *BulkService {
+ s.timeout = timeout
+ return s
+}
+
+// Refresh controls when changes made by this request are made visible
+// to search. The allowed values are: "true" (refresh the relevant
+// primary and replica shards immediately), "wait_for" (wait for the
+// changes to be made visible by a refresh before applying), or "false"
+// (no refresh related actions).
+func (s *BulkService) Refresh(refresh string) *BulkService {
+ s.refresh = refresh
+ return s
+}
+
+// Routing specifies the routing value.
+func (s *BulkService) Routing(routing string) *BulkService {
+ s.routing = routing
+ return s
+}
+
+// Pipeline specifies the pipeline id to preprocess incoming documents with.
+func (s *BulkService) Pipeline(pipeline string) *BulkService {
+ s.pipeline = pipeline
+ return s
+}
+
+// WaitForActiveShards sets the number of shard copies that must be active
+// before proceeding with the bulk operation. Defaults to 1, meaning the
+// primary shard only. Set to `all` for all shard copies, otherwise set to
+// any non-negative value less than or equal to the total number of copies
+// for the shard (number of replicas + 1).
+func (s *BulkService) WaitForActiveShards(waitForActiveShards string) *BulkService {
+ s.waitForActiveShards = waitForActiveShards
+ return s
+}
+
+// Pretty tells Elasticsearch whether to return a formatted JSON response.
+func (s *BulkService) Pretty(pretty bool) *BulkService {
+ s.pretty = pretty
+ return s
+}
+
+// Add adds bulkable requests, i.e. BulkIndexRequest, BulkUpdateRequest,
+// and/or BulkDeleteRequest.
+func (s *BulkService) Add(requests ...BulkableRequest) *BulkService {
+ for _, r := range requests {
+ s.requests = append(s.requests, r)
+ }
+ return s
+}
+
+// EstimatedSizeInBytes returns the estimated size of all bulkable
+// requests added via Add.
+func (s *BulkService) EstimatedSizeInBytes() int64 {
+ if s.sizeInBytesCursor == len(s.requests) {
+ return s.sizeInBytes
+ }
+ for _, r := range s.requests[s.sizeInBytesCursor:] {
+ s.sizeInBytes += s.estimateSizeInBytes(r)
+ s.sizeInBytesCursor++
+ }
+ return s.sizeInBytes
+}
+
+// estimateSizeInBytes returns the estimates size of the given
+// bulkable request, i.e. BulkIndexRequest, BulkUpdateRequest, and
+// BulkDeleteRequest.
+func (s *BulkService) estimateSizeInBytes(r BulkableRequest) int64 {
+ lines, _ := r.Source()
+ size := 0
+ for _, line := range lines {
+ // +1 for the \n
+ size += len(line) + 1
+ }
+ return int64(size)
+}
+
+// NumberOfActions returns the number of bulkable requests that need to
+// be sent to Elasticsearch on the next batch.
+func (s *BulkService) NumberOfActions() int {
+ return len(s.requests)
+}
+
+func (s *BulkService) bodyAsString() (string, error) {
+ // Pre-allocate to reduce allocs
+ buf := bytes.NewBuffer(make([]byte, 0, s.EstimatedSizeInBytes()))
+
+ for _, req := range s.requests {
+ source, err := req.Source()
+ if err != nil {
+ return "", err
+ }
+ for _, line := range source {
+ buf.WriteString(line)
+ buf.WriteByte('\n')
+ }
+ }
+
+ return buf.String(), nil
+}
+
+// Do sends the batched requests to Elasticsearch. Note that, when successful,
+// you can reuse the BulkService for the next batch as the list of bulk
+// requests is cleared on success.
+func (s *BulkService) Do(ctx context.Context) (*BulkResponse, error) {
+ // No actions?
+ if s.NumberOfActions() == 0 {
+ return nil, errors.New("elastic: No bulk actions to commit")
+ }
+
+ // Get body
+ body, err := s.bodyAsString()
+ if err != nil {
+ return nil, err
+ }
+
+ // Build url
+ path := "/"
+ if len(s.index) > 0 {
+ index, err := uritemplates.Expand("{index}", map[string]string{
+ "index": s.index,
+ })
+ if err != nil {
+ return nil, err
+ }
+ path += index + "/"
+ }
+ if len(s.typ) > 0 {
+ typ, err := uritemplates.Expand("{type}", map[string]string{
+ "type": s.typ,
+ })
+ if err != nil {
+ return nil, err
+ }
+ path += typ + "/"
+ }
+ path += "_bulk"
+
+ // Parameters
+ params := make(url.Values)
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+ if s.pipeline != "" {
+ params.Set("pipeline", s.pipeline)
+ }
+ if s.refresh != "" {
+ params.Set("refresh", s.refresh)
+ }
+ if s.routing != "" {
+ params.Set("routing", s.routing)
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.waitForActiveShards != "" {
+ params.Set("wait_for_active_shards", s.waitForActiveShards)
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ ContentType: "application/x-ndjson",
+ Retrier: s.retrier,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return results
+ ret := new(BulkResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+
+ // Reset so the request can be reused
+ s.reset()
+
+ return ret, nil
+}
+
+// BulkResponse is a response to a bulk execution.
+//
+// Example:
+// {
+// "took":3,
+// "errors":false,
+// "items":[{
+// "index":{
+// "_index":"index1",
+// "_type":"tweet",
+// "_id":"1",
+// "_version":3,
+// "status":201
+// }
+// },{
+// "index":{
+// "_index":"index2",
+// "_type":"tweet",
+// "_id":"2",
+// "_version":3,
+// "status":200
+// }
+// },{
+// "delete":{
+// "_index":"index1",
+// "_type":"tweet",
+// "_id":"1",
+// "_version":4,
+// "status":200,
+// "found":true
+// }
+// },{
+// "update":{
+// "_index":"index2",
+// "_type":"tweet",
+// "_id":"2",
+// "_version":4,
+// "status":200
+// }
+// }]
+// }
+type BulkResponse struct {
+ Took int `json:"took,omitempty"`
+ Errors bool `json:"errors,omitempty"`
+ Items []map[string]*BulkResponseItem `json:"items,omitempty"`
+}
+
+// BulkResponseItem is the result of a single bulk request.
+type BulkResponseItem struct {
+ Index string `json:"_index,omitempty"`
+ Type string `json:"_type,omitempty"`
+ Id string `json:"_id,omitempty"`
+ Version int64 `json:"_version,omitempty"`
+ Result string `json:"result,omitempty"`
+ Shards *shardsInfo `json:"_shards,omitempty"`
+ SeqNo int64 `json:"_seq_no,omitempty"`
+ PrimaryTerm int64 `json:"_primary_term,omitempty"`
+ Status int `json:"status,omitempty"`
+ ForcedRefresh bool `json:"forced_refresh,omitempty"`
+ Error *ErrorDetails `json:"error,omitempty"`
+ GetResult *GetResult `json:"get,omitempty"`
+}
+
+// Indexed returns all bulk request results of "index" actions.
+func (r *BulkResponse) Indexed() []*BulkResponseItem {
+ return r.ByAction("index")
+}
+
+// Created returns all bulk request results of "create" actions.
+func (r *BulkResponse) Created() []*BulkResponseItem {
+ return r.ByAction("create")
+}
+
+// Updated returns all bulk request results of "update" actions.
+func (r *BulkResponse) Updated() []*BulkResponseItem {
+ return r.ByAction("update")
+}
+
+// Deleted returns all bulk request results of "delete" actions.
+func (r *BulkResponse) Deleted() []*BulkResponseItem {
+ return r.ByAction("delete")
+}
+
+// ByAction returns all bulk request results of a certain action,
+// e.g. "index" or "delete".
+func (r *BulkResponse) ByAction(action string) []*BulkResponseItem {
+ if r.Items == nil {
+ return nil
+ }
+ var items []*BulkResponseItem
+ for _, item := range r.Items {
+ if result, found := item[action]; found {
+ items = append(items, result)
+ }
+ }
+ return items
+}
+
+// ById returns all bulk request results of a given document id,
+// regardless of the action ("index", "delete" etc.).
+func (r *BulkResponse) ById(id string) []*BulkResponseItem {
+ if r.Items == nil {
+ return nil
+ }
+ var items []*BulkResponseItem
+ for _, item := range r.Items {
+ for _, result := range item {
+ if result.Id == id {
+ items = append(items, result)
+ }
+ }
+ }
+ return items
+}
+
+// Failed returns those items of a bulk response that have errors,
+// i.e. those that don't have a status code between 200 and 299.
+func (r *BulkResponse) Failed() []*BulkResponseItem {
+ if r.Items == nil {
+ return nil
+ }
+ var errors []*BulkResponseItem
+ for _, item := range r.Items {
+ for _, result := range item {
+ if !(result.Status >= 200 && result.Status <= 299) {
+ errors = append(errors, result)
+ }
+ }
+ }
+ return errors
+}
+
+// Succeeded returns those items of a bulk response that have no errors,
+// i.e. those have a status code between 200 and 299.
+func (r *BulkResponse) Succeeded() []*BulkResponseItem {
+ if r.Items == nil {
+ return nil
+ }
+ var succeeded []*BulkResponseItem
+ for _, item := range r.Items {
+ for _, result := range item {
+ if result.Status >= 200 && result.Status <= 299 {
+ succeeded = append(succeeded, result)
+ }
+ }
+ }
+ return succeeded
+}
diff --git a/vendor/github.com/olivere/elastic/bulk_delete_request.go b/vendor/github.com/olivere/elastic/bulk_delete_request.go
new file mode 100644
index 000000000..e6c98c553
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/bulk_delete_request.go
@@ -0,0 +1,166 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+//go:generate easyjson bulk_delete_request.go
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+// -- Bulk delete request --
+
+// BulkDeleteRequest is a request to remove a document from Elasticsearch.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
+// for details.
+type BulkDeleteRequest struct {
+ BulkableRequest
+ index string
+ typ string
+ id string
+ parent string
+ routing string
+ version int64 // default is MATCH_ANY
+ versionType string // default is "internal"
+
+ source []string
+
+ useEasyJSON bool
+}
+
+//easyjson:json
+type bulkDeleteRequestCommand map[string]bulkDeleteRequestCommandOp
+
+//easyjson:json
+type bulkDeleteRequestCommandOp struct {
+ Index string `json:"_index,omitempty"`
+ Type string `json:"_type,omitempty"`
+ Id string `json:"_id,omitempty"`
+ Parent string `json:"parent,omitempty"`
+ Routing string `json:"routing,omitempty"`
+ Version int64 `json:"version,omitempty"`
+ VersionType string `json:"version_type,omitempty"`
+}
+
+// NewBulkDeleteRequest returns a new BulkDeleteRequest.
+func NewBulkDeleteRequest() *BulkDeleteRequest {
+ return &BulkDeleteRequest{}
+}
+
+// UseEasyJSON is an experimental setting that enables serialization
+// with github.com/mailru/easyjson, which should in faster serialization
+// time and less allocations, but removed compatibility with encoding/json,
+// usage of unsafe etc. See https://github.com/mailru/easyjson#issues-notes-and-limitations
+// for details. This setting is disabled by default.
+func (r *BulkDeleteRequest) UseEasyJSON(enable bool) *BulkDeleteRequest {
+ r.useEasyJSON = enable
+ return r
+}
+
+// Index specifies the Elasticsearch index to use for this delete request.
+// If unspecified, the index set on the BulkService will be used.
+func (r *BulkDeleteRequest) Index(index string) *BulkDeleteRequest {
+ r.index = index
+ r.source = nil
+ return r
+}
+
+// Type specifies the Elasticsearch type to use for this delete request.
+// If unspecified, the type set on the BulkService will be used.
+func (r *BulkDeleteRequest) Type(typ string) *BulkDeleteRequest {
+ r.typ = typ
+ r.source = nil
+ return r
+}
+
+// Id specifies the identifier of the document to delete.
+func (r *BulkDeleteRequest) Id(id string) *BulkDeleteRequest {
+ r.id = id
+ r.source = nil
+ return r
+}
+
+// Parent specifies the parent of the request, which is used in parent/child
+// mappings.
+func (r *BulkDeleteRequest) Parent(parent string) *BulkDeleteRequest {
+ r.parent = parent
+ r.source = nil
+ return r
+}
+
+// Routing specifies a routing value for the request.
+func (r *BulkDeleteRequest) Routing(routing string) *BulkDeleteRequest {
+ r.routing = routing
+ r.source = nil
+ return r
+}
+
+// Version indicates the version to be deleted as part of an optimistic
+// concurrency model.
+func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest {
+ r.version = version
+ r.source = nil
+ return r
+}
+
+// VersionType can be "internal" (default), "external", "external_gte",
+// or "external_gt".
+func (r *BulkDeleteRequest) VersionType(versionType string) *BulkDeleteRequest {
+ r.versionType = versionType
+ r.source = nil
+ return r
+}
+
+// String returns the on-wire representation of the delete request,
+// concatenated as a single string.
+func (r *BulkDeleteRequest) String() string {
+ lines, err := r.Source()
+ if err != nil {
+ return fmt.Sprintf("error: %v", err)
+ }
+ return strings.Join(lines, "\n")
+}
+
+// Source returns the on-wire representation of the delete request,
+// split into an action-and-meta-data line and an (optional) source line.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
+// for details.
+func (r *BulkDeleteRequest) Source() ([]string, error) {
+ if r.source != nil {
+ return r.source, nil
+ }
+ command := bulkDeleteRequestCommand{
+ "delete": bulkDeleteRequestCommandOp{
+ Index: r.index,
+ Type: r.typ,
+ Id: r.id,
+ Routing: r.routing,
+ Parent: r.parent,
+ Version: r.version,
+ VersionType: r.versionType,
+ },
+ }
+
+ var err error
+ var body []byte
+ if r.useEasyJSON {
+ // easyjson
+ body, err = command.MarshalJSON()
+ } else {
+ // encoding/json
+ body, err = json.Marshal(command)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ lines := []string{string(body)}
+ r.source = lines
+
+ return lines, nil
+}
diff --git a/vendor/github.com/olivere/elastic/bulk_delete_request_easyjson.go b/vendor/github.com/olivere/elastic/bulk_delete_request_easyjson.go
new file mode 100644
index 000000000..df3452ce6
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/bulk_delete_request_easyjson.go
@@ -0,0 +1,230 @@
+// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT.
+
+package elastic
+
+import (
+ json "encoding/json"
+ easyjson "github.com/mailru/easyjson"
+ jlexer "github.com/mailru/easyjson/jlexer"
+ jwriter "github.com/mailru/easyjson/jwriter"
+)
+
+// suppress unused package warning
+var (
+ _ *json.RawMessage
+ _ *jlexer.Lexer
+ _ *jwriter.Writer
+ _ easyjson.Marshaler
+)
+
+func easyjson8092efb6DecodeGithubComOlivereElastic(in *jlexer.Lexer, out *bulkDeleteRequestCommandOp) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeString()
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "_index":
+ out.Index = string(in.String())
+ case "_type":
+ out.Type = string(in.String())
+ case "_id":
+ out.Id = string(in.String())
+ case "parent":
+ out.Parent = string(in.String())
+ case "routing":
+ out.Routing = string(in.String())
+ case "version":
+ out.Version = int64(in.Int64())
+ case "version_type":
+ out.VersionType = string(in.String())
+ default:
+ in.SkipRecursive()
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+func easyjson8092efb6EncodeGithubComOlivereElastic(out *jwriter.Writer, in bulkDeleteRequestCommandOp) {
+ out.RawByte('{')
+ first := true
+ _ = first
+ if in.Index != "" {
+ const prefix string = ",\"_index\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Index))
+ }
+ if in.Type != "" {
+ const prefix string = ",\"_type\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Type))
+ }
+ if in.Id != "" {
+ const prefix string = ",\"_id\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Id))
+ }
+ if in.Parent != "" {
+ const prefix string = ",\"parent\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Parent))
+ }
+ if in.Routing != "" {
+ const prefix string = ",\"routing\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Routing))
+ }
+ if in.Version != 0 {
+ const prefix string = ",\"version\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Int64(int64(in.Version))
+ }
+ if in.VersionType != "" {
+ const prefix string = ",\"version_type\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.VersionType))
+ }
+ out.RawByte('}')
+}
+
+// MarshalJSON supports json.Marshaler interface
+func (v bulkDeleteRequestCommandOp) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ easyjson8092efb6EncodeGithubComOlivereElastic(&w, v)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// MarshalEasyJSON supports easyjson.Marshaler interface
+func (v bulkDeleteRequestCommandOp) MarshalEasyJSON(w *jwriter.Writer) {
+ easyjson8092efb6EncodeGithubComOlivereElastic(w, v)
+}
+
+// UnmarshalJSON supports json.Unmarshaler interface
+func (v *bulkDeleteRequestCommandOp) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ easyjson8092efb6DecodeGithubComOlivereElastic(&r, v)
+ return r.Error()
+}
+
+// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
+func (v *bulkDeleteRequestCommandOp) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ easyjson8092efb6DecodeGithubComOlivereElastic(l, v)
+}
+func easyjson8092efb6DecodeGithubComOlivereElastic1(in *jlexer.Lexer, out *bulkDeleteRequestCommand) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ in.Skip()
+ } else {
+ in.Delim('{')
+ if !in.IsDelim('}') {
+ *out = make(bulkDeleteRequestCommand)
+ } else {
+ *out = nil
+ }
+ for !in.IsDelim('}') {
+ key := string(in.String())
+ in.WantColon()
+ var v1 bulkDeleteRequestCommandOp
+ (v1).UnmarshalEasyJSON(in)
+ (*out)[key] = v1
+ in.WantComma()
+ }
+ in.Delim('}')
+ }
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+func easyjson8092efb6EncodeGithubComOlivereElastic1(out *jwriter.Writer, in bulkDeleteRequestCommand) {
+ if in == nil && (out.Flags&jwriter.NilMapAsEmpty) == 0 {
+ out.RawString(`null`)
+ } else {
+ out.RawByte('{')
+ v2First := true
+ for v2Name, v2Value := range in {
+ if v2First {
+ v2First = false
+ } else {
+ out.RawByte(',')
+ }
+ out.String(string(v2Name))
+ out.RawByte(':')
+ (v2Value).MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+ }
+}
+
+// MarshalJSON supports json.Marshaler interface
+func (v bulkDeleteRequestCommand) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ easyjson8092efb6EncodeGithubComOlivereElastic1(&w, v)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// MarshalEasyJSON supports easyjson.Marshaler interface
+func (v bulkDeleteRequestCommand) MarshalEasyJSON(w *jwriter.Writer) {
+ easyjson8092efb6EncodeGithubComOlivereElastic1(w, v)
+}
+
+// UnmarshalJSON supports json.Unmarshaler interface
+func (v *bulkDeleteRequestCommand) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ easyjson8092efb6DecodeGithubComOlivereElastic1(&r, v)
+ return r.Error()
+}
+
+// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
+func (v *bulkDeleteRequestCommand) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ easyjson8092efb6DecodeGithubComOlivereElastic1(l, v)
+}
diff --git a/vendor/github.com/olivere/elastic/bulk_delete_request_test.go b/vendor/github.com/olivere/elastic/bulk_delete_request_test.go
new file mode 100644
index 000000000..8635e34d1
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/bulk_delete_request_test.go
@@ -0,0 +1,79 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestBulkDeleteRequestSerialization(t *testing.T) {
+ tests := []struct {
+ Request BulkableRequest
+ Expected []string
+ }{
+ // #0
+ {
+ Request: NewBulkDeleteRequest().Index("index1").Type("doc").Id("1"),
+ Expected: []string{
+ `{"delete":{"_index":"index1","_type":"doc","_id":"1"}}`,
+ },
+ },
+ // #1
+ {
+ Request: NewBulkDeleteRequest().Index("index1").Type("doc").Id("1").Parent("2"),
+ Expected: []string{
+ `{"delete":{"_index":"index1","_type":"doc","_id":"1","parent":"2"}}`,
+ },
+ },
+ // #2
+ {
+ Request: NewBulkDeleteRequest().Index("index1").Type("doc").Id("1").Routing("3"),
+ Expected: []string{
+ `{"delete":{"_index":"index1","_type":"doc","_id":"1","routing":"3"}}`,
+ },
+ },
+ }
+
+ for i, test := range tests {
+ lines, err := test.Request.Source()
+ if err != nil {
+ t.Fatalf("case #%d: expected no error, got: %v", i, err)
+ }
+ if lines == nil {
+ t.Fatalf("case #%d: expected lines, got nil", i)
+ }
+ if len(lines) != len(test.Expected) {
+ t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines))
+ }
+ for j, line := range lines {
+ if line != test.Expected[j] {
+ t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line)
+ }
+ }
+ }
+}
+
+var bulkDeleteRequestSerializationResult string
+
+func BenchmarkBulkDeleteRequestSerialization(b *testing.B) {
+ b.Run("stdlib", func(b *testing.B) {
+ r := NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1")
+ benchmarkBulkDeleteRequestSerialization(b, r.UseEasyJSON(false))
+ })
+ b.Run("easyjson", func(b *testing.B) {
+ r := NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1")
+ benchmarkBulkDeleteRequestSerialization(b, r.UseEasyJSON(true))
+ })
+}
+
+func benchmarkBulkDeleteRequestSerialization(b *testing.B, r *BulkDeleteRequest) {
+ var s string
+ for n := 0; n < b.N; n++ {
+ s = r.String()
+ r.source = nil // Don't let caching spoil the benchmark
+ }
+ bulkDeleteRequestSerializationResult = s // ensure the compiler doesn't optimize
+ b.ReportAllocs()
+}
diff --git a/vendor/github.com/olivere/elastic/bulk_index_request.go b/vendor/github.com/olivere/elastic/bulk_index_request.go
new file mode 100644
index 000000000..321d2e25a
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/bulk_index_request.go
@@ -0,0 +1,239 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+//go:generate easyjson bulk_index_request.go
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+// BulkIndexRequest is a request to add a document to Elasticsearch.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
+// for details.
+type BulkIndexRequest struct {
+ BulkableRequest
+ index string
+ typ string
+ id string
+ opType string
+ routing string
+ parent string
+ version int64 // default is MATCH_ANY
+ versionType string // default is "internal"
+ doc interface{}
+ pipeline string
+ retryOnConflict *int
+
+ source []string
+
+ useEasyJSON bool
+}
+
+//easyjson:json
+type bulkIndexRequestCommand map[string]bulkIndexRequestCommandOp
+
+//easyjson:json
+type bulkIndexRequestCommandOp struct {
+ Index string `json:"_index,omitempty"`
+ Id string `json:"_id,omitempty"`
+ Type string `json:"_type,omitempty"`
+ Parent string `json:"parent,omitempty"`
+ // RetryOnConflict is "_retry_on_conflict" for 6.0 and "retry_on_conflict" for 6.1+.
+ RetryOnConflict *int `json:"retry_on_conflict,omitempty"`
+ Routing string `json:"routing,omitempty"`
+ Version int64 `json:"version,omitempty"`
+ VersionType string `json:"version_type,omitempty"`
+ Pipeline string `json:"pipeline,omitempty"`
+}
+
+// NewBulkIndexRequest returns a new BulkIndexRequest.
+// The operation type is "index" by default.
+func NewBulkIndexRequest() *BulkIndexRequest {
+ return &BulkIndexRequest{
+ opType: "index",
+ }
+}
+
+// UseEasyJSON is an experimental setting that enables serialization
+// with github.com/mailru/easyjson, which should in faster serialization
+// time and less allocations, but removed compatibility with encoding/json,
+// usage of unsafe etc. See https://github.com/mailru/easyjson#issues-notes-and-limitations
+// for details. This setting is disabled by default.
+func (r *BulkIndexRequest) UseEasyJSON(enable bool) *BulkIndexRequest {
+ r.useEasyJSON = enable
+ return r
+}
+
+// Index specifies the Elasticsearch index to use for this index request.
+// If unspecified, the index set on the BulkService will be used.
+func (r *BulkIndexRequest) Index(index string) *BulkIndexRequest {
+ r.index = index
+ r.source = nil
+ return r
+}
+
+// Type specifies the Elasticsearch type to use for this index request.
+// If unspecified, the type set on the BulkService will be used.
+func (r *BulkIndexRequest) Type(typ string) *BulkIndexRequest {
+ r.typ = typ
+ r.source = nil
+ return r
+}
+
+// Id specifies the identifier of the document to index.
+func (r *BulkIndexRequest) Id(id string) *BulkIndexRequest {
+ r.id = id
+ r.source = nil
+ return r
+}
+
+// OpType specifies if this request should follow create-only or upsert
+// behavior. This follows the OpType of the standard document index API.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-index_.html#operation-type
+// for details.
+func (r *BulkIndexRequest) OpType(opType string) *BulkIndexRequest {
+ r.opType = opType
+ r.source = nil
+ return r
+}
+
+// Routing specifies a routing value for the request.
+func (r *BulkIndexRequest) Routing(routing string) *BulkIndexRequest {
+ r.routing = routing
+ r.source = nil
+ return r
+}
+
+// Parent specifies the identifier of the parent document (if available).
+func (r *BulkIndexRequest) Parent(parent string) *BulkIndexRequest {
+ r.parent = parent
+ r.source = nil
+ return r
+}
+
+// Version indicates the version of the document as part of an optimistic
+// concurrency model.
+func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest {
+ r.version = version
+ r.source = nil
+ return r
+}
+
+// VersionType specifies how versions are created. It can be e.g. internal,
+// external, external_gte, or force.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-index_.html#index-versioning
+// for details.
+func (r *BulkIndexRequest) VersionType(versionType string) *BulkIndexRequest {
+ r.versionType = versionType
+ r.source = nil
+ return r
+}
+
+// Doc specifies the document to index.
+func (r *BulkIndexRequest) Doc(doc interface{}) *BulkIndexRequest {
+ r.doc = doc
+ r.source = nil
+ return r
+}
+
+// RetryOnConflict specifies how often to retry in case of a version conflict.
+func (r *BulkIndexRequest) RetryOnConflict(retryOnConflict int) *BulkIndexRequest {
+ r.retryOnConflict = &retryOnConflict
+ r.source = nil
+ return r
+}
+
+// Pipeline to use while processing the request.
+func (r *BulkIndexRequest) Pipeline(pipeline string) *BulkIndexRequest {
+ r.pipeline = pipeline
+ r.source = nil
+ return r
+}
+
+// String returns the on-wire representation of the index request,
+// concatenated as a single string.
+func (r *BulkIndexRequest) String() string {
+ lines, err := r.Source()
+ if err != nil {
+ return fmt.Sprintf("error: %v", err)
+ }
+ return strings.Join(lines, "\n")
+}
+
+// Source returns the on-wire representation of the index request,
+// split into an action-and-meta-data line and an (optional) source line.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
+// for details.
+func (r *BulkIndexRequest) Source() ([]string, error) {
+ // { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } }
+ // { "field1" : "value1" }
+
+ if r.source != nil {
+ return r.source, nil
+ }
+
+ lines := make([]string, 2)
+
+ // "index" ...
+ indexCommand := bulkIndexRequestCommandOp{
+ Index: r.index,
+ Type: r.typ,
+ Id: r.id,
+ Routing: r.routing,
+ Parent: r.parent,
+ Version: r.version,
+ VersionType: r.versionType,
+ RetryOnConflict: r.retryOnConflict,
+ Pipeline: r.pipeline,
+ }
+ command := bulkIndexRequestCommand{
+ r.opType: indexCommand,
+ }
+
+ var err error
+ var body []byte
+ if r.useEasyJSON {
+ // easyjson
+ body, err = command.MarshalJSON()
+ } else {
+ // encoding/json
+ body, err = json.Marshal(command)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ lines[0] = string(body)
+
+ // "field1" ...
+ if r.doc != nil {
+ switch t := r.doc.(type) {
+ default:
+ body, err := json.Marshal(r.doc)
+ if err != nil {
+ return nil, err
+ }
+ lines[1] = string(body)
+ case json.RawMessage:
+ lines[1] = string(t)
+ case *json.RawMessage:
+ lines[1] = string(*t)
+ case string:
+ lines[1] = t
+ case *string:
+ lines[1] = *t
+ }
+ } else {
+ lines[1] = "{}"
+ }
+
+ r.source = lines
+ return lines, nil
+}
diff --git a/vendor/github.com/olivere/elastic/bulk_index_request_easyjson.go b/vendor/github.com/olivere/elastic/bulk_index_request_easyjson.go
new file mode 100644
index 000000000..f8792978f
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/bulk_index_request_easyjson.go
@@ -0,0 +1,262 @@
+// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT.
+
+package elastic
+
+import (
+ json "encoding/json"
+ easyjson "github.com/mailru/easyjson"
+ jlexer "github.com/mailru/easyjson/jlexer"
+ jwriter "github.com/mailru/easyjson/jwriter"
+)
+
+// suppress unused package warning
+var (
+ _ *json.RawMessage
+ _ *jlexer.Lexer
+ _ *jwriter.Writer
+ _ easyjson.Marshaler
+)
+
+func easyjson9de0fcbfDecodeGithubComOlivereElastic(in *jlexer.Lexer, out *bulkIndexRequestCommandOp) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeString()
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "_index":
+ out.Index = string(in.String())
+ case "_id":
+ out.Id = string(in.String())
+ case "_type":
+ out.Type = string(in.String())
+ case "parent":
+ out.Parent = string(in.String())
+ case "retry_on_conflict":
+ if in.IsNull() {
+ in.Skip()
+ out.RetryOnConflict = nil
+ } else {
+ if out.RetryOnConflict == nil {
+ out.RetryOnConflict = new(int)
+ }
+ *out.RetryOnConflict = int(in.Int())
+ }
+ case "routing":
+ out.Routing = string(in.String())
+ case "version":
+ out.Version = int64(in.Int64())
+ case "version_type":
+ out.VersionType = string(in.String())
+ case "pipeline":
+ out.Pipeline = string(in.String())
+ default:
+ in.SkipRecursive()
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+func easyjson9de0fcbfEncodeGithubComOlivereElastic(out *jwriter.Writer, in bulkIndexRequestCommandOp) {
+ out.RawByte('{')
+ first := true
+ _ = first
+ if in.Index != "" {
+ const prefix string = ",\"_index\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Index))
+ }
+ if in.Id != "" {
+ const prefix string = ",\"_id\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Id))
+ }
+ if in.Type != "" {
+ const prefix string = ",\"_type\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Type))
+ }
+ if in.Parent != "" {
+ const prefix string = ",\"parent\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Parent))
+ }
+ if in.RetryOnConflict != nil {
+ const prefix string = ",\"retry_on_conflict\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Int(int(*in.RetryOnConflict))
+ }
+ if in.Routing != "" {
+ const prefix string = ",\"routing\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Routing))
+ }
+ if in.Version != 0 {
+ const prefix string = ",\"version\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Int64(int64(in.Version))
+ }
+ if in.VersionType != "" {
+ const prefix string = ",\"version_type\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.VersionType))
+ }
+ if in.Pipeline != "" {
+ const prefix string = ",\"pipeline\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Pipeline))
+ }
+ out.RawByte('}')
+}
+
+// MarshalJSON supports json.Marshaler interface
+func (v bulkIndexRequestCommandOp) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ easyjson9de0fcbfEncodeGithubComOlivereElastic(&w, v)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// MarshalEasyJSON supports easyjson.Marshaler interface
+func (v bulkIndexRequestCommandOp) MarshalEasyJSON(w *jwriter.Writer) {
+ easyjson9de0fcbfEncodeGithubComOlivereElastic(w, v)
+}
+
+// UnmarshalJSON supports json.Unmarshaler interface
+func (v *bulkIndexRequestCommandOp) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ easyjson9de0fcbfDecodeGithubComOlivereElastic(&r, v)
+ return r.Error()
+}
+
+// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
+func (v *bulkIndexRequestCommandOp) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ easyjson9de0fcbfDecodeGithubComOlivereElastic(l, v)
+}
+func easyjson9de0fcbfDecodeGithubComOlivereElastic1(in *jlexer.Lexer, out *bulkIndexRequestCommand) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ in.Skip()
+ } else {
+ in.Delim('{')
+ if !in.IsDelim('}') {
+ *out = make(bulkIndexRequestCommand)
+ } else {
+ *out = nil
+ }
+ for !in.IsDelim('}') {
+ key := string(in.String())
+ in.WantColon()
+ var v1 bulkIndexRequestCommandOp
+ (v1).UnmarshalEasyJSON(in)
+ (*out)[key] = v1
+ in.WantComma()
+ }
+ in.Delim('}')
+ }
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+func easyjson9de0fcbfEncodeGithubComOlivereElastic1(out *jwriter.Writer, in bulkIndexRequestCommand) {
+ if in == nil && (out.Flags&jwriter.NilMapAsEmpty) == 0 {
+ out.RawString(`null`)
+ } else {
+ out.RawByte('{')
+ v2First := true
+ for v2Name, v2Value := range in {
+ if v2First {
+ v2First = false
+ } else {
+ out.RawByte(',')
+ }
+ out.String(string(v2Name))
+ out.RawByte(':')
+ (v2Value).MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+ }
+}
+
+// MarshalJSON supports json.Marshaler interface
+func (v bulkIndexRequestCommand) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ easyjson9de0fcbfEncodeGithubComOlivereElastic1(&w, v)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// MarshalEasyJSON supports easyjson.Marshaler interface
+func (v bulkIndexRequestCommand) MarshalEasyJSON(w *jwriter.Writer) {
+ easyjson9de0fcbfEncodeGithubComOlivereElastic1(w, v)
+}
+
+// UnmarshalJSON supports json.Unmarshaler interface
+func (v *bulkIndexRequestCommand) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ easyjson9de0fcbfDecodeGithubComOlivereElastic1(&r, v)
+ return r.Error()
+}
+
+// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
+func (v *bulkIndexRequestCommand) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ easyjson9de0fcbfDecodeGithubComOlivereElastic1(l, v)
+}
diff --git a/vendor/github.com/olivere/elastic/bulk_index_request_test.go b/vendor/github.com/olivere/elastic/bulk_index_request_test.go
new file mode 100644
index 000000000..79baf51fb
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/bulk_index_request_test.go
@@ -0,0 +1,116 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+ "time"
+)
+
+func TestBulkIndexRequestSerialization(t *testing.T) {
+ tests := []struct {
+ Request BulkableRequest
+ Expected []string
+ }{
+ // #0
+ {
+ Request: NewBulkIndexRequest().Index("index1").Type("doc").Id("1").
+ Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
+ Expected: []string{
+ `{"index":{"_index":"index1","_id":"1","_type":"doc"}}`,
+ `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
+ },
+ },
+ // #1
+ {
+ Request: NewBulkIndexRequest().OpType("create").Index("index1").Type("doc").Id("1").
+ Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
+ Expected: []string{
+ `{"create":{"_index":"index1","_id":"1","_type":"doc"}}`,
+ `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
+ },
+ },
+ // #2
+ {
+ Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("doc").Id("1").
+ Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
+ Expected: []string{
+ `{"index":{"_index":"index1","_id":"1","_type":"doc"}}`,
+ `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
+ },
+ },
+ // #3
+ {
+ Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("doc").Id("1").RetryOnConflict(42).
+ Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
+ Expected: []string{
+ `{"index":{"_index":"index1","_id":"1","_type":"doc","retry_on_conflict":42}}`,
+ `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
+ },
+ },
+ // #4
+ {
+ Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("doc").Id("1").Pipeline("my_pipeline").
+ Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
+ Expected: []string{
+ `{"index":{"_index":"index1","_id":"1","_type":"doc","pipeline":"my_pipeline"}}`,
+ `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
+ },
+ },
+ // #5
+ {
+ Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("doc").Id("1").
+ Routing("123").
+ Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
+ Expected: []string{
+ `{"index":{"_index":"index1","_id":"1","_type":"doc","routing":"123"}}`,
+ `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
+ },
+ },
+ }
+
+ for i, test := range tests {
+ lines, err := test.Request.Source()
+ if err != nil {
+ t.Fatalf("case #%d: expected no error, got: %v", i, err)
+ }
+ if lines == nil {
+ t.Fatalf("case #%d: expected lines, got nil", i)
+ }
+ if len(lines) != len(test.Expected) {
+ t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines))
+ }
+ for j, line := range lines {
+ if line != test.Expected[j] {
+ t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line)
+ }
+ }
+ }
+}
+
+var bulkIndexRequestSerializationResult string
+
+func BenchmarkBulkIndexRequestSerialization(b *testing.B) {
+ b.Run("stdlib", func(b *testing.B) {
+ r := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").
+ Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)})
+ benchmarkBulkIndexRequestSerialization(b, r.UseEasyJSON(false))
+ })
+ b.Run("easyjson", func(b *testing.B) {
+ r := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").
+ Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)})
+ benchmarkBulkIndexRequestSerialization(b, r.UseEasyJSON(true))
+ })
+}
+
+func benchmarkBulkIndexRequestSerialization(b *testing.B, r *BulkIndexRequest) {
+ var s string
+ for n := 0; n < b.N; n++ {
+ s = r.String()
+ r.source = nil // Don't let caching spoil the benchmark
+ }
+ bulkIndexRequestSerializationResult = s // ensure the compiler doesn't optimize
+ b.ReportAllocs()
+}
diff --git a/vendor/github.com/olivere/elastic/bulk_processor.go b/vendor/github.com/olivere/elastic/bulk_processor.go
new file mode 100644
index 000000000..b2709a880
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/bulk_processor.go
@@ -0,0 +1,547 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// BulkProcessorService allows to easily process bulk requests. It allows setting
+// policies when to flush new bulk requests, e.g. based on a number of actions,
+// on the size of the actions, and/or to flush periodically. It also allows
+// to control the number of concurrent bulk requests allowed to be executed
+// in parallel.
+//
+// BulkProcessorService, by default, commits either every 1000 requests or when the
+// (estimated) size of the bulk requests exceeds 5 MB. However, it does not
+// commit periodically. BulkProcessorService also does retry by default, using
+// an exponential backoff algorithm.
+//
+// The caller is responsible for setting the index and type on every
+// bulk request added to BulkProcessorService.
+//
+// BulkProcessorService takes ideas from the BulkProcessor of the
+// Elasticsearch Java API as documented in
+// https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/java-docs-bulk-processor.html.
+type BulkProcessorService struct {
+ c *Client
+ beforeFn BulkBeforeFunc
+ afterFn BulkAfterFunc
+ name string // name of processor
+ numWorkers int // # of workers (>= 1)
+ bulkActions int // # of requests after which to commit
+ bulkSize int // # of bytes after which to commit
+ flushInterval time.Duration // periodic flush interval
+ wantStats bool // indicates whether to gather statistics
+ backoff Backoff // a custom Backoff to use for errors
+}
+
+// NewBulkProcessorService creates a new BulkProcessorService.
+func NewBulkProcessorService(client *Client) *BulkProcessorService {
+ return &BulkProcessorService{
+ c: client,
+ numWorkers: 1,
+ bulkActions: 1000,
+ bulkSize: 5 << 20, // 5 MB
+ backoff: NewExponentialBackoff(
+ time.Duration(200)*time.Millisecond,
+ time.Duration(10000)*time.Millisecond,
+ ),
+ }
+}
+
+// BulkBeforeFunc defines the signature of callbacks that are executed
+// before a commit to Elasticsearch.
+type BulkBeforeFunc func(executionId int64, requests []BulkableRequest)
+
+// BulkAfterFunc defines the signature of callbacks that are executed
+// after a commit to Elasticsearch. The err parameter signals an error.
+type BulkAfterFunc func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error)
+
+// Before specifies a function to be executed before bulk requests get comitted
+// to Elasticsearch.
+func (s *BulkProcessorService) Before(fn BulkBeforeFunc) *BulkProcessorService {
+ s.beforeFn = fn
+ return s
+}
+
+// After specifies a function to be executed when bulk requests have been
+// comitted to Elasticsearch. The After callback executes both when the
+// commit was successful as well as on failures.
+func (s *BulkProcessorService) After(fn BulkAfterFunc) *BulkProcessorService {
+ s.afterFn = fn
+ return s
+}
+
+// Name is an optional name to identify this bulk processor.
+func (s *BulkProcessorService) Name(name string) *BulkProcessorService {
+ s.name = name
+ return s
+}
+
+// Workers is the number of concurrent workers allowed to be
+// executed. Defaults to 1 and must be greater or equal to 1.
+func (s *BulkProcessorService) Workers(num int) *BulkProcessorService {
+ s.numWorkers = num
+ return s
+}
+
+// BulkActions specifies when to flush based on the number of actions
+// currently added. Defaults to 1000 and can be set to -1 to be disabled.
+func (s *BulkProcessorService) BulkActions(bulkActions int) *BulkProcessorService {
+ s.bulkActions = bulkActions
+ return s
+}
+
+// BulkSize specifies when to flush based on the size (in bytes) of the actions
+// currently added. Defaults to 5 MB and can be set to -1 to be disabled.
+func (s *BulkProcessorService) BulkSize(bulkSize int) *BulkProcessorService {
+ s.bulkSize = bulkSize
+ return s
+}
+
+// FlushInterval specifies when to flush at the end of the given interval.
+// This is disabled by default. If you want the bulk processor to
+// operate completely asynchronously, set both BulkActions and BulkSize to
+// -1 and set the FlushInterval to a meaningful interval.
+func (s *BulkProcessorService) FlushInterval(interval time.Duration) *BulkProcessorService {
+ s.flushInterval = interval
+ return s
+}
+
+// Stats tells bulk processor to gather stats while running.
+// Use Stats to return the stats. This is disabled by default.
+func (s *BulkProcessorService) Stats(wantStats bool) *BulkProcessorService {
+ s.wantStats = wantStats
+ return s
+}
+
+// Set the backoff strategy to use for errors
+func (s *BulkProcessorService) Backoff(backoff Backoff) *BulkProcessorService {
+ s.backoff = backoff
+ return s
+}
+
+// Do creates a new BulkProcessor and starts it.
+// Consider the BulkProcessor as a running instance that accepts bulk requests
+// and commits them to Elasticsearch, spreading the work across one or more
+// workers.
+//
+// You can interoperate with the BulkProcessor returned by Do, e.g. Start and
+// Stop (or Close) it.
+//
+// Context is an optional context that is passed into the bulk request
+// service calls. In contrast to other operations, this context is used in
+// a long running process. You could use it to pass e.g. loggers, but you
+// shouldn't use it for cancellation.
+//
+// Calling Do several times returns new BulkProcessors. You probably don't
+// want to do this. BulkProcessorService implements just a builder pattern.
+func (s *BulkProcessorService) Do(ctx context.Context) (*BulkProcessor, error) {
+ p := newBulkProcessor(
+ s.c,
+ s.beforeFn,
+ s.afterFn,
+ s.name,
+ s.numWorkers,
+ s.bulkActions,
+ s.bulkSize,
+ s.flushInterval,
+ s.wantStats,
+ s.backoff)
+
+ err := p.Start(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+// -- Bulk Processor Statistics --
+
+// BulkProcessorStats contains various statistics of a bulk processor
+// while it is running. Use the Stats func to return it while running.
+type BulkProcessorStats struct {
+ Flushed int64 // number of times the flush interval has been invoked
+ Committed int64 // # of times workers committed bulk requests
+ Indexed int64 // # of requests indexed
+ Created int64 // # of requests that ES reported as creates (201)
+ Updated int64 // # of requests that ES reported as updates
+ Deleted int64 // # of requests that ES reported as deletes
+ Succeeded int64 // # of requests that ES reported as successful
+ Failed int64 // # of requests that ES reported as failed
+
+ Workers []*BulkProcessorWorkerStats // stats for each worker
+}
+
+// BulkProcessorWorkerStats represents per-worker statistics.
+type BulkProcessorWorkerStats struct {
+ Queued int64 // # of requests queued in this worker
+ LastDuration time.Duration // duration of last commit
+}
+
+// newBulkProcessorStats initializes and returns a BulkProcessorStats struct.
+func newBulkProcessorStats(workers int) *BulkProcessorStats {
+ stats := &BulkProcessorStats{
+ Workers: make([]*BulkProcessorWorkerStats, workers),
+ }
+ for i := 0; i < workers; i++ {
+ stats.Workers[i] = &BulkProcessorWorkerStats{}
+ }
+ return stats
+}
+
+func (st *BulkProcessorStats) dup() *BulkProcessorStats {
+ dst := new(BulkProcessorStats)
+ dst.Flushed = st.Flushed
+ dst.Committed = st.Committed
+ dst.Indexed = st.Indexed
+ dst.Created = st.Created
+ dst.Updated = st.Updated
+ dst.Deleted = st.Deleted
+ dst.Succeeded = st.Succeeded
+ dst.Failed = st.Failed
+ for _, src := range st.Workers {
+ dst.Workers = append(dst.Workers, src.dup())
+ }
+ return dst
+}
+
+func (st *BulkProcessorWorkerStats) dup() *BulkProcessorWorkerStats {
+ dst := new(BulkProcessorWorkerStats)
+ dst.Queued = st.Queued
+ dst.LastDuration = st.LastDuration
+ return dst
+}
+
+// -- Bulk Processor --
+
+// BulkProcessor encapsulates a task that accepts bulk requests and
+// orchestrates committing them to Elasticsearch via one or more workers.
+//
+// BulkProcessor is returned by setting up a BulkProcessorService and
+// calling the Do method.
+type BulkProcessor struct {
+ c *Client
+ beforeFn BulkBeforeFunc
+ afterFn BulkAfterFunc
+ name string
+ bulkActions int
+ bulkSize int
+ numWorkers int
+ executionId int64
+ requestsC chan BulkableRequest
+ workerWg sync.WaitGroup
+ workers []*bulkWorker
+ flushInterval time.Duration
+ flusherStopC chan struct{}
+ wantStats bool
+ backoff Backoff
+
+ startedMu sync.Mutex // guards the following block
+ started bool
+
+ statsMu sync.Mutex // guards the following block
+ stats *BulkProcessorStats
+}
+
+func newBulkProcessor(
+ client *Client,
+ beforeFn BulkBeforeFunc,
+ afterFn BulkAfterFunc,
+ name string,
+ numWorkers int,
+ bulkActions int,
+ bulkSize int,
+ flushInterval time.Duration,
+ wantStats bool,
+ backoff Backoff) *BulkProcessor {
+ return &BulkProcessor{
+ c: client,
+ beforeFn: beforeFn,
+ afterFn: afterFn,
+ name: name,
+ numWorkers: numWorkers,
+ bulkActions: bulkActions,
+ bulkSize: bulkSize,
+ flushInterval: flushInterval,
+ wantStats: wantStats,
+ backoff: backoff,
+ }
+}
+
+// Start starts the bulk processor. If the processor is already started,
+// nil is returned.
+func (p *BulkProcessor) Start(ctx context.Context) error {
+ p.startedMu.Lock()
+ defer p.startedMu.Unlock()
+
+ if p.started {
+ return nil
+ }
+
+ // We must have at least one worker.
+ if p.numWorkers < 1 {
+ p.numWorkers = 1
+ }
+
+ p.requestsC = make(chan BulkableRequest)
+ p.executionId = 0
+ p.stats = newBulkProcessorStats(p.numWorkers)
+
+ // Create and start up workers.
+ p.workers = make([]*bulkWorker, p.numWorkers)
+ for i := 0; i < p.numWorkers; i++ {
+ p.workerWg.Add(1)
+ p.workers[i] = newBulkWorker(p, i)
+ go p.workers[i].work(ctx)
+ }
+
+ // Start the ticker for flush (if enabled)
+ if int64(p.flushInterval) > 0 {
+ p.flusherStopC = make(chan struct{})
+ go p.flusher(p.flushInterval)
+ }
+
+ p.started = true
+
+ return nil
+}
+
+// Stop is an alias for Close.
+func (p *BulkProcessor) Stop() error {
+ return p.Close()
+}
+
+// Close stops the bulk processor previously started with Do.
+// If it is already stopped, this is a no-op and nil is returned.
+//
+// By implementing Close, BulkProcessor implements the io.Closer interface.
+func (p *BulkProcessor) Close() error {
+ p.startedMu.Lock()
+ defer p.startedMu.Unlock()
+
+ // Already stopped? Do nothing.
+ if !p.started {
+ return nil
+ }
+
+ // Stop flusher (if enabled)
+ if p.flusherStopC != nil {
+ p.flusherStopC <- struct{}{}
+ <-p.flusherStopC
+ close(p.flusherStopC)
+ p.flusherStopC = nil
+ }
+
+ // Stop all workers.
+ close(p.requestsC)
+ p.workerWg.Wait()
+
+ p.started = false
+
+ return nil
+}
+
+// Stats returns the latest bulk processor statistics.
+// Collecting stats must be enabled first by calling Stats(true) on
+// the service that created this processor.
+func (p *BulkProcessor) Stats() BulkProcessorStats {
+ p.statsMu.Lock()
+ defer p.statsMu.Unlock()
+ return *p.stats.dup()
+}
+
+// Add adds a single request to commit by the BulkProcessorService.
+//
+// The caller is responsible for setting the index and type on the request.
+func (p *BulkProcessor) Add(request BulkableRequest) {
+ p.requestsC <- request
+}
+
+// Flush manually asks all workers to commit their outstanding requests.
+// It returns only when all workers acknowledge completion.
+func (p *BulkProcessor) Flush() error {
+ p.statsMu.Lock()
+ p.stats.Flushed++
+ p.statsMu.Unlock()
+
+ for _, w := range p.workers {
+ w.flushC <- struct{}{}
+ <-w.flushAckC // wait for completion
+ }
+ return nil
+}
+
+// flusher is a single goroutine that periodically asks all workers to
+// commit their outstanding bulk requests. It is only started if
+// FlushInterval is greater than 0.
+func (p *BulkProcessor) flusher(interval time.Duration) {
+ ticker := time.NewTicker(interval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C: // Periodic flush
+ p.Flush() // TODO swallow errors here?
+
+ case <-p.flusherStopC:
+ p.flusherStopC <- struct{}{}
+ return
+ }
+ }
+}
+
+// -- Bulk Worker --
+
+// bulkWorker encapsulates a single worker, running in a goroutine,
+// receiving bulk requests and eventually committing them to Elasticsearch.
+// It is strongly bound to a BulkProcessor.
+type bulkWorker struct {
+ p *BulkProcessor
+ i int
+ bulkActions int
+ bulkSize int
+ service *BulkService
+ flushC chan struct{}
+ flushAckC chan struct{}
+}
+
+// newBulkWorker creates a new bulkWorker instance.
+func newBulkWorker(p *BulkProcessor, i int) *bulkWorker {
+ return &bulkWorker{
+ p: p,
+ i: i,
+ bulkActions: p.bulkActions,
+ bulkSize: p.bulkSize,
+ service: NewBulkService(p.c),
+ flushC: make(chan struct{}),
+ flushAckC: make(chan struct{}),
+ }
+}
+
+// work waits for bulk requests and manual flush calls on the respective
+// channels and is invoked as a goroutine when the bulk processor is started.
+func (w *bulkWorker) work(ctx context.Context) {
+ defer func() {
+ w.p.workerWg.Done()
+ close(w.flushAckC)
+ close(w.flushC)
+ }()
+
+ var stop bool
+ for !stop {
+ select {
+ case req, open := <-w.p.requestsC:
+ if open {
+ // Received a new request
+ w.service.Add(req)
+ if w.commitRequired() {
+ w.commit(ctx) // TODO swallow errors here?
+ }
+ } else {
+ // Channel closed: Stop.
+ stop = true
+ if w.service.NumberOfActions() > 0 {
+ w.commit(ctx) // TODO swallow errors here?
+ }
+ }
+
+ case <-w.flushC:
+ // Commit outstanding requests
+ if w.service.NumberOfActions() > 0 {
+ w.commit(ctx) // TODO swallow errors here?
+ }
+ w.flushAckC <- struct{}{}
+ }
+ }
+}
+
+// commit commits the bulk requests in the given service,
+// invoking callbacks as specified.
+func (w *bulkWorker) commit(ctx context.Context) error {
+ var res *BulkResponse
+
+ // commitFunc will commit bulk requests and, on failure, be retried
+ // via exponential backoff
+ commitFunc := func() error {
+ var err error
+ res, err = w.service.Do(ctx)
+ return err
+ }
+ // notifyFunc will be called if retry fails
+ notifyFunc := func(err error) {
+ w.p.c.errorf("elastic: bulk processor %q failed but may retry: %v", w.p.name, err)
+ }
+
+ id := atomic.AddInt64(&w.p.executionId, 1)
+
+ // Update # documents in queue before eventual retries
+ w.p.statsMu.Lock()
+ if w.p.wantStats {
+ w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests))
+ }
+ w.p.statsMu.Unlock()
+
+ // Save requests because they will be reset in commitFunc
+ reqs := w.service.requests
+
+ // Invoke before callback
+ if w.p.beforeFn != nil {
+ w.p.beforeFn(id, reqs)
+ }
+
+ // Commit bulk requests
+ err := RetryNotify(commitFunc, w.p.backoff, notifyFunc)
+ w.updateStats(res)
+ if err != nil {
+ w.p.c.errorf("elastic: bulk processor %q failed: %v", w.p.name, err)
+ }
+
+ // Invoke after callback
+ if w.p.afterFn != nil {
+ w.p.afterFn(id, reqs, res, err)
+ }
+
+ return err
+}
+
+func (w *bulkWorker) updateStats(res *BulkResponse) {
+ // Update stats
+ if res != nil {
+ w.p.statsMu.Lock()
+ if w.p.wantStats {
+ w.p.stats.Committed++
+ if res != nil {
+ w.p.stats.Indexed += int64(len(res.Indexed()))
+ w.p.stats.Created += int64(len(res.Created()))
+ w.p.stats.Updated += int64(len(res.Updated()))
+ w.p.stats.Deleted += int64(len(res.Deleted()))
+ w.p.stats.Succeeded += int64(len(res.Succeeded()))
+ w.p.stats.Failed += int64(len(res.Failed()))
+ }
+ w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests))
+ w.p.stats.Workers[w.i].LastDuration = time.Duration(int64(res.Took)) * time.Millisecond
+ }
+ w.p.statsMu.Unlock()
+ }
+}
+
+// commitRequired returns true if the service has to commit its
+// bulk requests. This can be either because the number of actions
+// or the estimated size in bytes is larger than specified in the
+// BulkProcessorService.
+func (w *bulkWorker) commitRequired() bool {
+ if w.bulkActions >= 0 && w.service.NumberOfActions() >= w.bulkActions {
+ return true
+ }
+ if w.bulkSize >= 0 && w.service.EstimatedSizeInBytes() >= int64(w.bulkSize) {
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/olivere/elastic/bulk_processor_test.go b/vendor/github.com/olivere/elastic/bulk_processor_test.go
new file mode 100644
index 000000000..bb97ca217
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/bulk_processor_test.go
@@ -0,0 +1,425 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+func TestBulkProcessorDefaults(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ p := client.BulkProcessor()
+ if p == nil {
+ t.Fatalf("expected BulkProcessorService; got: %v", p)
+ }
+ if got, want := p.name, ""; got != want {
+ t.Errorf("expected %q; got: %q", want, got)
+ }
+ if got, want := p.numWorkers, 1; got != want {
+ t.Errorf("expected %d; got: %d", want, got)
+ }
+ if got, want := p.bulkActions, 1000; got != want {
+ t.Errorf("expected %d; got: %d", want, got)
+ }
+ if got, want := p.bulkSize, 5*1024*1024; got != want {
+ t.Errorf("expected %d; got: %d", want, got)
+ }
+ if got, want := p.flushInterval, time.Duration(0); got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+ if got, want := p.wantStats, false; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+ if p.backoff == nil {
+ t.Fatalf("expected non-nill backoff; got: %v", p.backoff)
+ }
+}
+
+func TestBulkProcessorCommitOnBulkActions(t *testing.T) {
+ //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+ client := setupTestClientAndCreateIndex(t)
+
+ testBulkProcessor(t,
+ 10000,
+ client.BulkProcessor().
+ Name("Actions-1").
+ Workers(1).
+ BulkActions(100).
+ BulkSize(-1),
+ )
+
+ testBulkProcessor(t,
+ 10000,
+ client.BulkProcessor().
+ Name("Actions-2").
+ Workers(2).
+ BulkActions(100).
+ BulkSize(-1),
+ )
+}
+
+func TestBulkProcessorCommitOnBulkSize(t *testing.T) {
+ //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+ client := setupTestClientAndCreateIndex(t)
+
+ testBulkProcessor(t,
+ 10000,
+ client.BulkProcessor().
+ Name("Size-1").
+ Workers(1).
+ BulkActions(-1).
+ BulkSize(64*1024),
+ )
+
+ testBulkProcessor(t,
+ 10000,
+ client.BulkProcessor().
+ Name("Size-2").
+ Workers(2).
+ BulkActions(-1).
+ BulkSize(64*1024),
+ )
+}
+
+func TestBulkProcessorBasedOnFlushInterval(t *testing.T) {
+ //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+ client := setupTestClientAndCreateIndex(t)
+
+ var beforeRequests int64
+ var befores int64
+ var afters int64
+ var failures int64
+ var afterRequests int64
+
+ beforeFn := func(executionId int64, requests []BulkableRequest) {
+ atomic.AddInt64(&beforeRequests, int64(len(requests)))
+ atomic.AddInt64(&befores, 1)
+ }
+ afterFn := func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) {
+ atomic.AddInt64(&afters, 1)
+ if err != nil {
+ atomic.AddInt64(&failures, 1)
+ }
+ atomic.AddInt64(&afterRequests, int64(len(requests)))
+ }
+
+ svc := client.BulkProcessor().
+ Name("FlushInterval-1").
+ Workers(2).
+ BulkActions(-1).
+ BulkSize(-1).
+ FlushInterval(1 * time.Second).
+ Before(beforeFn).
+ After(afterFn)
+
+ p, err := svc.Do(context.Background())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ const numDocs = 1000 // low-enough number that flush should be invoked
+
+ for i := 1; i <= numDocs; i++ {
+ tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))}
+ request := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id(fmt.Sprintf("%d", i)).Doc(tweet)
+ p.Add(request)
+ }
+
+ // Should flush at least once
+ time.Sleep(2 * time.Second)
+
+ err = p.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if p.stats.Flushed == 0 {
+ t.Errorf("expected at least 1 flush; got: %d", p.stats.Flushed)
+ }
+ if got, want := beforeRequests, int64(numDocs); got != want {
+ t.Errorf("expected %d requests to before callback; got: %d", want, got)
+ }
+ if got, want := afterRequests, int64(numDocs); got != want {
+ t.Errorf("expected %d requests to after callback; got: %d", want, got)
+ }
+ if befores == 0 {
+ t.Error("expected at least 1 call to before callback")
+ }
+ if afters == 0 {
+ t.Error("expected at least 1 call to after callback")
+ }
+ if failures != 0 {
+ t.Errorf("expected 0 calls to failure callback; got: %d", failures)
+ }
+
+ // Check number of documents that were bulk indexed
+ _, err = p.c.Flush(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ count, err := p.c.Count(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != int64(numDocs) {
+ t.Fatalf("expected %d documents; got: %d", numDocs, count)
+ }
+}
+
+func TestBulkProcessorClose(t *testing.T) {
+ //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+ client := setupTestClientAndCreateIndex(t)
+
+ var beforeRequests int64
+ var befores int64
+ var afters int64
+ var failures int64
+ var afterRequests int64
+
+ beforeFn := func(executionId int64, requests []BulkableRequest) {
+ atomic.AddInt64(&beforeRequests, int64(len(requests)))
+ atomic.AddInt64(&befores, 1)
+ }
+ afterFn := func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) {
+ atomic.AddInt64(&afters, 1)
+ if err != nil {
+ atomic.AddInt64(&failures, 1)
+ }
+ atomic.AddInt64(&afterRequests, int64(len(requests)))
+ }
+
+ p, err := client.BulkProcessor().
+ Name("FlushInterval-1").
+ Workers(2).
+ BulkActions(-1).
+ BulkSize(-1).
+ FlushInterval(30 * time.Second). // 30 seconds to flush
+ Before(beforeFn).After(afterFn).
+ Do(context.Background())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ const numDocs = 1000 // low-enough number that flush should be invoked
+
+ for i := 1; i <= numDocs; i++ {
+ tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))}
+ request := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id(fmt.Sprintf("%d", i)).Doc(tweet)
+ p.Add(request)
+ }
+
+ // Should not flush because 30s > 1s
+ time.Sleep(1 * time.Second)
+
+ // Close should flush
+ err = p.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if p.stats.Flushed != 0 {
+ t.Errorf("expected no flush; got: %d", p.stats.Flushed)
+ }
+ if got, want := beforeRequests, int64(numDocs); got != want {
+ t.Errorf("expected %d requests to before callback; got: %d", want, got)
+ }
+ if got, want := afterRequests, int64(numDocs); got != want {
+ t.Errorf("expected %d requests to after callback; got: %d", want, got)
+ }
+ if befores == 0 {
+ t.Error("expected at least 1 call to before callback")
+ }
+ if afters == 0 {
+ t.Error("expected at least 1 call to after callback")
+ }
+ if failures != 0 {
+ t.Errorf("expected 0 calls to failure callback; got: %d", failures)
+ }
+
+ // Check number of documents that were bulk indexed
+ _, err = p.c.Flush(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ count, err := p.c.Count(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != int64(numDocs) {
+ t.Fatalf("expected %d documents; got: %d", numDocs, count)
+ }
+}
+
+func TestBulkProcessorFlush(t *testing.T) {
+ //client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+ client := setupTestClientAndCreateIndex(t)
+
+ p, err := client.BulkProcessor().
+ Name("ManualFlush").
+ Workers(10).
+ BulkActions(-1).
+ BulkSize(-1).
+ FlushInterval(30 * time.Second). // 30 seconds to flush
+ Stats(true).
+ Do(context.Background())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ const numDocs = 100
+
+ for i := 1; i <= numDocs; i++ {
+ tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))}
+ request := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id(fmt.Sprintf("%d", i)).Doc(tweet)
+ p.Add(request)
+ }
+
+ // Should not flush because 30s > 1s
+ time.Sleep(1 * time.Second)
+
+ // No flush yet
+ stats := p.Stats()
+ if stats.Flushed != 0 {
+ t.Errorf("expected no flush; got: %d", p.stats.Flushed)
+ }
+
+ // Manual flush
+ err = p.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ time.Sleep(1 * time.Second)
+
+ // Now flushed
+ stats = p.Stats()
+ if got, want := p.stats.Flushed, int64(1); got != want {
+ t.Errorf("expected %d flush; got: %d", want, got)
+ }
+
+ // Close should not start another flush
+ err = p.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Still 1 flush
+ stats = p.Stats()
+ if got, want := p.stats.Flushed, int64(1); got != want {
+ t.Errorf("expected %d flush; got: %d", want, got)
+ }
+
+ // Check number of documents that were bulk indexed
+ _, err = p.c.Flush(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ count, err := p.c.Count(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != int64(numDocs) {
+ t.Fatalf("expected %d documents; got: %d", numDocs, count)
+ }
+}
+
+// -- Helper --
+
+func testBulkProcessor(t *testing.T, numDocs int, svc *BulkProcessorService) {
+ var beforeRequests int64
+ var befores int64
+ var afters int64
+ var failures int64
+ var afterRequests int64
+
+ beforeFn := func(executionId int64, requests []BulkableRequest) {
+ atomic.AddInt64(&beforeRequests, int64(len(requests)))
+ atomic.AddInt64(&befores, 1)
+ }
+ afterFn := func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) {
+ atomic.AddInt64(&afters, 1)
+ if err != nil {
+ atomic.AddInt64(&failures, 1)
+ }
+ atomic.AddInt64(&afterRequests, int64(len(requests)))
+ }
+
+ p, err := svc.Before(beforeFn).After(afterFn).Stats(true).Do(context.Background())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 1; i <= numDocs; i++ {
+ tweet := tweet{User: "olivere", Message: fmt.Sprintf("%07d. %s", i, randomString(1+rand.Intn(63)))}
+ request := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id(fmt.Sprintf("%d", i)).Doc(tweet)
+ p.Add(request)
+ }
+
+ err = p.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ stats := p.Stats()
+
+ if stats.Flushed != 0 {
+ t.Errorf("expected no flush; got: %d", stats.Flushed)
+ }
+ if stats.Committed <= 0 {
+ t.Errorf("expected committed > %d; got: %d", 0, stats.Committed)
+ }
+ if got, want := stats.Indexed, int64(numDocs); got != want {
+ t.Errorf("expected indexed = %d; got: %d", want, got)
+ }
+ if got, want := stats.Created, int64(0); got != want {
+ t.Errorf("expected created = %d; got: %d", want, got)
+ }
+ if got, want := stats.Updated, int64(0); got != want {
+ t.Errorf("expected updated = %d; got: %d", want, got)
+ }
+ if got, want := stats.Deleted, int64(0); got != want {
+ t.Errorf("expected deleted = %d; got: %d", want, got)
+ }
+ if got, want := stats.Succeeded, int64(numDocs); got != want {
+ t.Errorf("expected succeeded = %d; got: %d", want, got)
+ }
+ if got, want := stats.Failed, int64(0); got != want {
+ t.Errorf("expected failed = %d; got: %d", want, got)
+ }
+ if got, want := beforeRequests, int64(numDocs); got != want {
+ t.Errorf("expected %d requests to before callback; got: %d", want, got)
+ }
+ if got, want := afterRequests, int64(numDocs); got != want {
+ t.Errorf("expected %d requests to after callback; got: %d", want, got)
+ }
+ if befores == 0 {
+ t.Error("expected at least 1 call to before callback")
+ }
+ if afters == 0 {
+ t.Error("expected at least 1 call to after callback")
+ }
+ if failures != 0 {
+ t.Errorf("expected 0 calls to failure callback; got: %d", failures)
+ }
+
+ // Check number of documents that were bulk indexed
+ _, err = p.c.Flush(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ count, err := p.c.Count(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != int64(numDocs) {
+ t.Fatalf("expected %d documents; got: %d", numDocs, count)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/bulk_request.go b/vendor/github.com/olivere/elastic/bulk_request.go
new file mode 100644
index 000000000..ce3bf0768
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/bulk_request.go
@@ -0,0 +1,17 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+)
+
+// -- Bulkable request (index/update/delete) --
+
+// BulkableRequest is a generic interface to bulkable requests.
+type BulkableRequest interface {
+ fmt.Stringer
+ Source() ([]string, error)
+}
diff --git a/vendor/github.com/olivere/elastic/bulk_test.go b/vendor/github.com/olivere/elastic/bulk_test.go
new file mode 100644
index 000000000..f31ed6613
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/bulk_test.go
@@ -0,0 +1,600 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math/rand"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+)
+
+func TestBulk(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
+
+ index1Req := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").Doc(tweet1)
+ index2Req := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("2").Doc(tweet2)
+ delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1")
+
+ bulkRequest := client.Bulk()
+ bulkRequest = bulkRequest.Add(index1Req)
+ bulkRequest = bulkRequest.Add(index2Req)
+ bulkRequest = bulkRequest.Add(delete1Req)
+
+ if bulkRequest.NumberOfActions() != 3 {
+ t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions())
+ }
+
+ bulkResponse, err := bulkRequest.Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if bulkResponse == nil {
+ t.Errorf("expected bulkResponse to be != nil; got nil")
+ }
+
+ if bulkRequest.NumberOfActions() != 0 {
+ t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions())
+ }
+
+ // Document with Id="1" should not exist
+ exists, err := client.Exists().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if exists {
+ t.Errorf("expected exists %v; got %v", false, exists)
+ }
+
+ // Document with Id="2" should exist
+ exists, err = client.Exists().Index(testIndexName).Type("doc").Id("2").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !exists {
+ t.Errorf("expected exists %v; got %v", true, exists)
+ }
+
+ // Update
+ updateDoc := struct {
+ Retweets int `json:"retweets"`
+ }{
+ 42,
+ }
+ update1Req := NewBulkUpdateRequest().Index(testIndexName).Type("doc").Id("2").Doc(&updateDoc)
+ bulkRequest = client.Bulk()
+ bulkRequest = bulkRequest.Add(update1Req)
+
+ if bulkRequest.NumberOfActions() != 1 {
+ t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 1, bulkRequest.NumberOfActions())
+ }
+
+ bulkResponse, err = bulkRequest.Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if bulkResponse == nil {
+ t.Errorf("expected bulkResponse to be != nil; got nil")
+ }
+
+ if bulkRequest.NumberOfActions() != 0 {
+ t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions())
+ }
+
+ // Document with Id="1" should have a retweets count of 42
+ doc, err := client.Get().Index(testIndexName).Type("doc").Id("2").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if doc == nil {
+ t.Fatal("expected doc to be != nil; got nil")
+ }
+ if !doc.Found {
+ t.Fatalf("expected doc to be found; got found = %v", doc.Found)
+ }
+ if doc.Source == nil {
+ t.Fatal("expected doc source to be != nil; got nil")
+ }
+ var updatedTweet tweet
+ err = json.Unmarshal(*doc.Source, &updatedTweet)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if updatedTweet.Retweets != 42 {
+ t.Errorf("expected updated tweet retweets = %v; got %v", 42, updatedTweet.Retweets)
+ }
+
+ // Update with script
+ update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("doc").Id("2").
+ RetryOnConflict(3).
+ Script(NewScript("ctx._source.retweets += params.v").Param("v", 1))
+ bulkRequest = client.Bulk()
+ bulkRequest = bulkRequest.Add(update2Req)
+ if bulkRequest.NumberOfActions() != 1 {
+ t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 1, bulkRequest.NumberOfActions())
+ }
+ bulkResponse, err = bulkRequest.Refresh("wait_for").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if bulkResponse == nil {
+ t.Errorf("expected bulkResponse to be != nil; got nil")
+ }
+
+ if bulkRequest.NumberOfActions() != 0 {
+ t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions())
+ }
+
+ // Document with Id="1" should have a retweets count of 43
+ doc, err = client.Get().Index(testIndexName).Type("doc").Id("2").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if doc == nil {
+ t.Fatal("expected doc to be != nil; got nil")
+ }
+ if !doc.Found {
+ t.Fatalf("expected doc to be found; got found = %v", doc.Found)
+ }
+ if doc.Source == nil {
+ t.Fatal("expected doc source to be != nil; got nil")
+ }
+ err = json.Unmarshal(*doc.Source, &updatedTweet)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if updatedTweet.Retweets != 43 {
+ t.Errorf("expected updated tweet retweets = %v; got %v", 43, updatedTweet.Retweets)
+ }
+}
+
+func TestBulkWithIndexSetOnClient(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
+
+ index1Req := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").Doc(tweet1).Routing("1")
+ index2Req := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("2").Doc(tweet2)
+ delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1")
+
+ bulkRequest := client.Bulk().Index(testIndexName).Type("doc")
+ bulkRequest = bulkRequest.Add(index1Req)
+ bulkRequest = bulkRequest.Add(index2Req)
+ bulkRequest = bulkRequest.Add(delete1Req)
+
+ if bulkRequest.NumberOfActions() != 3 {
+ t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions())
+ }
+
+ bulkResponse, err := bulkRequest.Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if bulkResponse == nil {
+ t.Errorf("expected bulkResponse to be != nil; got nil")
+ }
+
+ // Document with Id="1" should not exist
+ exists, err := client.Exists().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if exists {
+ t.Errorf("expected exists %v; got %v", false, exists)
+ }
+
+ // Document with Id="2" should exist
+ exists, err = client.Exists().Index(testIndexName).Type("doc").Id("2").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !exists {
+ t.Errorf("expected exists %v; got %v", true, exists)
+ }
+}
+
+func TestBulkIndexDeleteUpdate(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+ //client := setupTestClientAndCreateIndexAndLog(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
+
+ index1Req := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").Doc(tweet1)
+ index2Req := NewBulkIndexRequest().OpType("create").Index(testIndexName).Type("doc").Id("2").Doc(tweet2)
+ delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1")
+ update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("doc").Id("2").
+ ReturnSource(true).
+ Doc(struct {
+ Retweets int `json:"retweets"`
+ }{
+ Retweets: 42,
+ })
+
+ bulkRequest := client.Bulk()
+ bulkRequest = bulkRequest.Add(index1Req)
+ bulkRequest = bulkRequest.Add(index2Req)
+ bulkRequest = bulkRequest.Add(delete1Req)
+ bulkRequest = bulkRequest.Add(update2Req)
+
+ if bulkRequest.NumberOfActions() != 4 {
+ t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 4, bulkRequest.NumberOfActions())
+ }
+
+ expected := `{"index":{"_index":"` + testIndexName + `","_id":"1","_type":"doc"}}
+{"user":"olivere","message":"Welcome to Golang and Elasticsearch.","retweets":0,"created":"0001-01-01T00:00:00Z"}
+{"create":{"_index":"` + testIndexName + `","_id":"2","_type":"doc"}}
+{"user":"sandrae","message":"Dancing all night long. Yeah.","retweets":0,"created":"0001-01-01T00:00:00Z"}
+{"delete":{"_index":"` + testIndexName + `","_type":"doc","_id":"1"}}
+{"update":{"_index":"` + testIndexName + `","_type":"doc","_id":"2"}}
+{"doc":{"retweets":42},"_source":true}
+`
+ got, err := bulkRequest.bodyAsString()
+ if err != nil {
+ t.Fatalf("expected no error, got: %v", err)
+ }
+ if got != expected {
+ t.Errorf("expected\n%s\ngot:\n%s", expected, got)
+ }
+
+ // Run the bulk request
+ bulkResponse, err := bulkRequest.Pretty(true).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if bulkResponse == nil {
+ t.Errorf("expected bulkResponse to be != nil; got nil")
+ }
+ if bulkResponse.Took == 0 {
+ t.Errorf("expected took to be > 0; got %d", bulkResponse.Took)
+ }
+ if bulkResponse.Errors {
+ t.Errorf("expected errors to be %v; got %v", false, bulkResponse.Errors)
+ }
+ if len(bulkResponse.Items) != 4 {
+ t.Fatalf("expected 4 result items; got %d", len(bulkResponse.Items))
+ }
+
+ // Indexed actions
+ indexed := bulkResponse.Indexed()
+ if indexed == nil {
+ t.Fatal("expected indexed to be != nil; got nil")
+ }
+ if len(indexed) != 1 {
+ t.Fatalf("expected len(indexed) == %d; got %d", 1, len(indexed))
+ }
+ if indexed[0].Id != "1" {
+ t.Errorf("expected indexed[0].Id == %s; got %s", "1", indexed[0].Id)
+ }
+ if indexed[0].Status != 201 {
+ t.Errorf("expected indexed[0].Status == %d; got %d", 201, indexed[0].Status)
+ }
+
+ // Created actions
+ created := bulkResponse.Created()
+ if created == nil {
+ t.Fatal("expected created to be != nil; got nil")
+ }
+ if len(created) != 1 {
+ t.Fatalf("expected len(created) == %d; got %d", 1, len(created))
+ }
+ if created[0].Id != "2" {
+ t.Errorf("expected created[0].Id == %s; got %s", "2", created[0].Id)
+ }
+ if created[0].Status != 201 {
+ t.Errorf("expected created[0].Status == %d; got %d", 201, created[0].Status)
+ }
+ if want, have := "created", created[0].Result; want != have {
+ t.Errorf("expected created[0].Result == %q; got %q", want, have)
+ }
+
+ // Deleted actions
+ deleted := bulkResponse.Deleted()
+ if deleted == nil {
+ t.Fatal("expected deleted to be != nil; got nil")
+ }
+ if len(deleted) != 1 {
+ t.Fatalf("expected len(deleted) == %d; got %d", 1, len(deleted))
+ }
+ if deleted[0].Id != "1" {
+ t.Errorf("expected deleted[0].Id == %s; got %s", "1", deleted[0].Id)
+ }
+ if deleted[0].Status != 200 {
+ t.Errorf("expected deleted[0].Status == %d; got %d", 200, deleted[0].Status)
+ }
+ if want, have := "deleted", deleted[0].Result; want != have {
+ t.Errorf("expected deleted[0].Result == %q; got %q", want, have)
+ }
+
+ // Updated actions
+ updated := bulkResponse.Updated()
+ if updated == nil {
+ t.Fatal("expected updated to be != nil; got nil")
+ }
+ if len(updated) != 1 {
+ t.Fatalf("expected len(updated) == %d; got %d", 1, len(updated))
+ }
+ if updated[0].Id != "2" {
+ t.Errorf("expected updated[0].Id == %s; got %s", "2", updated[0].Id)
+ }
+ if updated[0].Status != 200 {
+ t.Errorf("expected updated[0].Status == %d; got %d", 200, updated[0].Status)
+ }
+ if updated[0].Version != 2 {
+ t.Errorf("expected updated[0].Version == %d; got %d", 2, updated[0].Version)
+ }
+ if want, have := "updated", updated[0].Result; want != have {
+ t.Errorf("expected updated[0].Result == %q; got %q", want, have)
+ }
+ if updated[0].GetResult == nil {
+ t.Fatalf("expected updated[0].GetResult to be != nil; got nil")
+ }
+ if updated[0].GetResult.Source == nil {
+ t.Fatalf("expected updated[0].GetResult.Source to be != nil; got nil")
+ }
+ if want, have := true, updated[0].GetResult.Found; want != have {
+ t.Fatalf("expected updated[0].GetResult.Found to be != %v; got %v", want, have)
+ }
+ var doc tweet
+ if err := json.Unmarshal(*updated[0].GetResult.Source, &doc); err != nil {
+ t.Fatalf("expected to unmarshal updated[0].GetResult.Source; got %v", err)
+ }
+ if want, have := 42, doc.Retweets; want != have {
+ t.Fatalf("expected updated tweet to have Retweets = %v; got %v", want, have)
+ }
+
+ // Succeeded actions
+ succeeded := bulkResponse.Succeeded()
+ if succeeded == nil {
+ t.Fatal("expected succeeded to be != nil; got nil")
+ }
+ if len(succeeded) != 4 {
+ t.Fatalf("expected len(succeeded) == %d; got %d", 4, len(succeeded))
+ }
+
+ // ById
+ id1Results := bulkResponse.ById("1")
+ if id1Results == nil {
+ t.Fatal("expected id1Results to be != nil; got nil")
+ }
+ if len(id1Results) != 2 {
+ t.Fatalf("expected len(id1Results) == %d; got %d", 2, len(id1Results))
+ }
+ if id1Results[0].Id != "1" {
+ t.Errorf("expected id1Results[0].Id == %s; got %s", "1", id1Results[0].Id)
+ }
+ if id1Results[0].Status != 201 {
+ t.Errorf("expected id1Results[0].Status == %d; got %d", 201, id1Results[0].Status)
+ }
+ if id1Results[0].Version != 1 {
+ t.Errorf("expected id1Results[0].Version == %d; got %d", 1, id1Results[0].Version)
+ }
+ if id1Results[1].Id != "1" {
+ t.Errorf("expected id1Results[1].Id == %s; got %s", "1", id1Results[1].Id)
+ }
+ if id1Results[1].Status != 200 {
+ t.Errorf("expected id1Results[1].Status == %d; got %d", 200, id1Results[1].Status)
+ }
+ if id1Results[1].Version != 2 {
+ t.Errorf("expected id1Results[1].Version == %d; got %d", 2, id1Results[1].Version)
+ }
+}
+
+func TestFailedBulkRequests(t *testing.T) {
+ js := `{
+ "took" : 2,
+ "errors" : true,
+ "items" : [ {
+ "index" : {
+ "_index" : "elastic-test",
+ "_type" : "doc",
+ "_id" : "1",
+ "_version" : 1,
+ "status" : 201
+ }
+ }, {
+ "create" : {
+ "_index" : "elastic-test",
+ "_type" : "doc",
+ "_id" : "2",
+ "_version" : 1,
+ "status" : 423,
+ "error" : {
+ "type":"routing_missing_exception",
+ "reason":"routing is required for [elastic-test2]/[comment]/[1]"
+ }
+ }
+ }, {
+ "delete" : {
+ "_index" : "elastic-test",
+ "_type" : "doc",
+ "_id" : "1",
+ "_version" : 2,
+ "status" : 404,
+ "found" : false
+ }
+ }, {
+ "update" : {
+ "_index" : "elastic-test",
+ "_type" : "doc",
+ "_id" : "2",
+ "_version" : 2,
+ "status" : 200
+ }
+ } ]
+}`
+
+ var resp BulkResponse
+ err := json.Unmarshal([]byte(js), &resp)
+ if err != nil {
+ t.Fatal(err)
+ }
+ failed := resp.Failed()
+ if len(failed) != 2 {
+ t.Errorf("expected %d failed items; got: %d", 2, len(failed))
+ }
+}
+
+func TestBulkEstimatedSizeInBytes(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
+
+ index1Req := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").Doc(tweet1)
+ index2Req := NewBulkIndexRequest().OpType("create").Index(testIndexName).Type("doc").Id("2").Doc(tweet2)
+ delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1")
+ update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("doc").Id("2").
+ Doc(struct {
+ Retweets int `json:"retweets"`
+ }{
+ Retweets: 42,
+ })
+
+ bulkRequest := client.Bulk()
+ bulkRequest = bulkRequest.Add(index1Req)
+ bulkRequest = bulkRequest.Add(index2Req)
+ bulkRequest = bulkRequest.Add(delete1Req)
+ bulkRequest = bulkRequest.Add(update2Req)
+
+ if bulkRequest.NumberOfActions() != 4 {
+ t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 4, bulkRequest.NumberOfActions())
+ }
+
+ // The estimated size of the bulk request in bytes must be at least
+ // the length of the body request.
+ raw, err := bulkRequest.bodyAsString()
+ if err != nil {
+ t.Fatal(err)
+ }
+ rawlen := int64(len([]byte(raw)))
+
+ if got, want := bulkRequest.EstimatedSizeInBytes(), rawlen; got < want {
+ t.Errorf("expected an EstimatedSizeInBytes = %d; got: %v", want, got)
+ }
+
+ // Reset should also reset the calculated estimated byte size
+ bulkRequest.reset()
+
+ if got, want := bulkRequest.EstimatedSizeInBytes(), int64(0); got != want {
+ t.Errorf("expected an EstimatedSizeInBytes = %d; got: %v", want, got)
+ }
+}
+
+func TestBulkEstimateSizeInBytesLength(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+ s := client.Bulk()
+ r := NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1")
+ s = s.Add(r)
+ if got, want := s.estimateSizeInBytes(r), int64(1+len(r.String())); got != want {
+ t.Fatalf("expected %d; got: %d", want, got)
+ }
+}
+
+func TestBulkContentType(t *testing.T) {
+ var header http.Header
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ header = r.Header
+ fmt.Fprintln(w, `{}`)
+ }))
+ defer ts.Close()
+
+ client, err := NewSimpleClient(SetURL(ts.URL))
+ if err != nil {
+ t.Fatal(err)
+ }
+ indexReq := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").Doc(tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."})
+ if _, err := client.Bulk().Add(indexReq).Do(context.Background()); err != nil {
+ t.Fatal(err)
+ }
+ if header == nil {
+ t.Fatalf("expected header, got %v", header)
+ }
+ if want, have := "application/x-ndjson", header.Get("Content-Type"); want != have {
+ t.Fatalf("Content-Type: want %q, have %q", want, have)
+ }
+}
+
+// -- Benchmarks --
+
+var benchmarkBulkEstimatedSizeInBytes int64
+
+func BenchmarkBulkEstimatedSizeInBytesWith1Request(b *testing.B) {
+ client := setupTestClientAndCreateIndex(b)
+ s := client.Bulk()
+ var result int64
+ for n := 0; n < b.N; n++ {
+ s = s.Add(NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").Doc(struct{ A string }{"1"}))
+ s = s.Add(NewBulkUpdateRequest().Index(testIndexName).Type("doc").Id("1").Doc(struct{ A string }{"2"}))
+ s = s.Add(NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1"))
+ result = s.EstimatedSizeInBytes()
+ s.reset()
+ }
+ b.ReportAllocs()
+ benchmarkBulkEstimatedSizeInBytes = result // ensure the compiler doesn't optimize
+}
+
+func BenchmarkBulkEstimatedSizeInBytesWith100Requests(b *testing.B) {
+ client := setupTestClientAndCreateIndex(b)
+ s := client.Bulk()
+ var result int64
+ for n := 0; n < b.N; n++ {
+ for i := 0; i < 100; i++ {
+ s = s.Add(NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").Doc(struct{ A string }{"1"}))
+ s = s.Add(NewBulkUpdateRequest().Index(testIndexName).Type("doc").Id("1").Doc(struct{ A string }{"2"}))
+ s = s.Add(NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1"))
+ }
+ result = s.EstimatedSizeInBytes()
+ s.reset()
+ }
+ b.ReportAllocs()
+ benchmarkBulkEstimatedSizeInBytes = result // ensure the compiler doesn't optimize
+}
+
+func BenchmarkBulkAllocs(b *testing.B) {
+ b.Run("1000 docs with 64 byte", func(b *testing.B) { benchmarkBulkAllocs(b, 64, 1000) })
+ b.Run("1000 docs with 1 KiB", func(b *testing.B) { benchmarkBulkAllocs(b, 1024, 1000) })
+ b.Run("1000 docs with 4 KiB", func(b *testing.B) { benchmarkBulkAllocs(b, 4096, 1000) })
+ b.Run("1000 docs with 16 KiB", func(b *testing.B) { benchmarkBulkAllocs(b, 16*1024, 1000) })
+ b.Run("1000 docs with 64 KiB", func(b *testing.B) { benchmarkBulkAllocs(b, 64*1024, 1000) })
+ b.Run("1000 docs with 256 KiB", func(b *testing.B) { benchmarkBulkAllocs(b, 256*1024, 1000) })
+ b.Run("1000 docs with 1 MiB", func(b *testing.B) { benchmarkBulkAllocs(b, 1024*1024, 1000) })
+}
+
+const (
+ charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-"
+)
+
+func benchmarkBulkAllocs(b *testing.B, size, num int) {
+ buf := make([]byte, size)
+ for i := range buf {
+ buf[i] = charset[rand.Intn(len(charset))]
+ }
+
+ s := &BulkService{}
+ n := 0
+ for {
+ n++
+ s = s.Add(NewBulkIndexRequest().Index("test").Type("doc").Id("1").Doc(struct {
+ S string `json:"s"`
+ }{
+ S: string(buf),
+ }))
+ if n >= num {
+ break
+ }
+ }
+ for i := 0; i < b.N; i++ {
+ s.bodyAsString()
+ }
+ b.ReportAllocs()
+}
diff --git a/vendor/github.com/olivere/elastic/bulk_update_request.go b/vendor/github.com/olivere/elastic/bulk_update_request.go
new file mode 100644
index 000000000..50e5adb8f
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/bulk_update_request.go
@@ -0,0 +1,298 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+//go:generate easyjson bulk_update_request.go
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+// BulkUpdateRequest is a request to update a document in Elasticsearch.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
+// for details.
+type BulkUpdateRequest struct {
+ BulkableRequest
+ index string
+ typ string
+ id string
+
+ routing string
+ parent string
+ script *Script
+ scriptedUpsert *bool
+ version int64 // default is MATCH_ANY
+ versionType string // default is "internal"
+ retryOnConflict *int
+ upsert interface{}
+ docAsUpsert *bool
+ detectNoop *bool
+ doc interface{}
+ returnSource *bool
+
+ source []string
+
+ useEasyJSON bool
+}
+
+//easyjson:json
+type bulkUpdateRequestCommand map[string]bulkUpdateRequestCommandOp
+
+//easyjson:json
+type bulkUpdateRequestCommandOp struct {
+ Index string `json:"_index,omitempty"`
+ Type string `json:"_type,omitempty"`
+ Id string `json:"_id,omitempty"`
+ Parent string `json:"parent,omitempty"`
+ // RetryOnConflict is "_retry_on_conflict" for 6.0 and "retry_on_conflict" for 6.1+.
+ RetryOnConflict *int `json:"retry_on_conflict,omitempty"`
+ Routing string `json:"routing,omitempty"`
+ Version int64 `json:"version,omitempty"`
+ VersionType string `json:"version_type,omitempty"`
+}
+
+//easyjson:json
+type bulkUpdateRequestCommandData struct {
+ DetectNoop *bool `json:"detect_noop,omitempty"`
+ Doc interface{} `json:"doc,omitempty"`
+ DocAsUpsert *bool `json:"doc_as_upsert,omitempty"`
+ Script interface{} `json:"script,omitempty"`
+ ScriptedUpsert *bool `json:"scripted_upsert,omitempty"`
+ Upsert interface{} `json:"upsert,omitempty"`
+ Source *bool `json:"_source,omitempty"`
+}
+
+// NewBulkUpdateRequest returns a new BulkUpdateRequest.
+func NewBulkUpdateRequest() *BulkUpdateRequest {
+ return &BulkUpdateRequest{}
+}
+
+// UseEasyJSON is an experimental setting that enables serialization
+// with github.com/mailru/easyjson, which should in faster serialization
+// time and less allocations, but removed compatibility with encoding/json,
+// usage of unsafe etc. See https://github.com/mailru/easyjson#issues-notes-and-limitations
+// for details. This setting is disabled by default.
+func (r *BulkUpdateRequest) UseEasyJSON(enable bool) *BulkUpdateRequest {
+ r.useEasyJSON = enable
+ return r
+}
+
+// Index specifies the Elasticsearch index to use for this update request.
+// If unspecified, the index set on the BulkService will be used.
+func (r *BulkUpdateRequest) Index(index string) *BulkUpdateRequest {
+ r.index = index
+ r.source = nil
+ return r
+}
+
+// Type specifies the Elasticsearch type to use for this update request.
+// If unspecified, the type set on the BulkService will be used.
+func (r *BulkUpdateRequest) Type(typ string) *BulkUpdateRequest {
+ r.typ = typ
+ r.source = nil
+ return r
+}
+
+// Id specifies the identifier of the document to update.
+func (r *BulkUpdateRequest) Id(id string) *BulkUpdateRequest {
+ r.id = id
+ r.source = nil
+ return r
+}
+
+// Routing specifies a routing value for the request.
+func (r *BulkUpdateRequest) Routing(routing string) *BulkUpdateRequest {
+ r.routing = routing
+ r.source = nil
+ return r
+}
+
+// Parent specifies the identifier of the parent document (if available).
+func (r *BulkUpdateRequest) Parent(parent string) *BulkUpdateRequest {
+ r.parent = parent
+ r.source = nil
+ return r
+}
+
+// Script specifies an update script.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html#bulk-update
+// and https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-scripting.html
+// for details.
+func (r *BulkUpdateRequest) Script(script *Script) *BulkUpdateRequest {
+ r.script = script
+ r.source = nil
+ return r
+}
+
+// ScripedUpsert specifies if your script will run regardless of
+// whether the document exists or not.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-update.html#_literal_scripted_upsert_literal
+func (r *BulkUpdateRequest) ScriptedUpsert(upsert bool) *BulkUpdateRequest {
+ r.scriptedUpsert = &upsert
+ r.source = nil
+ return r
+}
+
+// RetryOnConflict specifies how often to retry in case of a version conflict.
+func (r *BulkUpdateRequest) RetryOnConflict(retryOnConflict int) *BulkUpdateRequest {
+ r.retryOnConflict = &retryOnConflict
+ r.source = nil
+ return r
+}
+
+// Version indicates the version of the document as part of an optimistic
+// concurrency model.
+func (r *BulkUpdateRequest) Version(version int64) *BulkUpdateRequest {
+ r.version = version
+ r.source = nil
+ return r
+}
+
+// VersionType can be "internal" (default), "external", "external_gte",
+// or "external_gt".
+func (r *BulkUpdateRequest) VersionType(versionType string) *BulkUpdateRequest {
+ r.versionType = versionType
+ r.source = nil
+ return r
+}
+
+// Doc specifies the updated document.
+func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest {
+ r.doc = doc
+ r.source = nil
+ return r
+}
+
+// DocAsUpsert indicates whether the contents of Doc should be used as
+// the Upsert value.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-update.html#_literal_doc_as_upsert_literal
+// for details.
+func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest {
+ r.docAsUpsert = &docAsUpsert
+ r.source = nil
+ return r
+}
+
+// DetectNoop specifies whether changes that don't affect the document
+// should be ignored (true) or unignored (false). This is enabled by default
+// in Elasticsearch.
+func (r *BulkUpdateRequest) DetectNoop(detectNoop bool) *BulkUpdateRequest {
+ r.detectNoop = &detectNoop
+ r.source = nil
+ return r
+}
+
+// Upsert specifies the document to use for upserts. It will be used for
+// create if the original document does not exist.
+func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest {
+ r.upsert = doc
+ r.source = nil
+ return r
+}
+
+// ReturnSource specifies whether Elasticsearch should return the source
+// after the update. In the request, this responds to the `_source` field.
+// It is false by default.
+func (r *BulkUpdateRequest) ReturnSource(source bool) *BulkUpdateRequest {
+ r.returnSource = &source
+ r.source = nil
+ return r
+}
+
+// String returns the on-wire representation of the update request,
+// concatenated as a single string.
+func (r *BulkUpdateRequest) String() string {
+ lines, err := r.Source()
+ if err != nil {
+ return fmt.Sprintf("error: %v", err)
+ }
+ return strings.Join(lines, "\n")
+}
+
+// Source returns the on-wire representation of the update request,
+// split into an action-and-meta-data line and an (optional) source line.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
+// for details.
+func (r *BulkUpdateRequest) Source() ([]string, error) {
+ // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
+ // { "doc" : { "field1" : "value1", ... } }
+ // or
+ // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
+ // { "script" : { ... } }
+
+ if r.source != nil {
+ return r.source, nil
+ }
+
+ lines := make([]string, 2)
+
+ // "update" ...
+ updateCommand := bulkUpdateRequestCommandOp{
+ Index: r.index,
+ Type: r.typ,
+ Id: r.id,
+ Routing: r.routing,
+ Parent: r.parent,
+ Version: r.version,
+ VersionType: r.versionType,
+ RetryOnConflict: r.retryOnConflict,
+ }
+ command := bulkUpdateRequestCommand{
+ "update": updateCommand,
+ }
+
+ var err error
+ var body []byte
+ if r.useEasyJSON {
+ // easyjson
+ body, err = command.MarshalJSON()
+ } else {
+ // encoding/json
+ body, err = json.Marshal(command)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ lines[0] = string(body)
+
+ // 2nd line: {"doc" : { ... }} or {"script": {...}}
+ data := bulkUpdateRequestCommandData{
+ DocAsUpsert: r.docAsUpsert,
+ DetectNoop: r.detectNoop,
+ Upsert: r.upsert,
+ ScriptedUpsert: r.scriptedUpsert,
+ Doc: r.doc,
+ Source: r.returnSource,
+ }
+ if r.script != nil {
+ script, err := r.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ data.Script = script
+ }
+
+ if r.useEasyJSON {
+ // easyjson
+ body, err = data.MarshalJSON()
+ } else {
+ // encoding/json
+ body, err = json.Marshal(data)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ lines[1] = string(body)
+
+ r.source = lines
+ return lines, nil
+}
diff --git a/vendor/github.com/olivere/elastic/bulk_update_request_easyjson.go b/vendor/github.com/olivere/elastic/bulk_update_request_easyjson.go
new file mode 100644
index 000000000..d2c2cbfc7
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/bulk_update_request_easyjson.go
@@ -0,0 +1,461 @@
+// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT.
+
+package elastic
+
+import (
+ json "encoding/json"
+ easyjson "github.com/mailru/easyjson"
+ jlexer "github.com/mailru/easyjson/jlexer"
+ jwriter "github.com/mailru/easyjson/jwriter"
+)
+
+// suppress unused package warning
+var (
+ _ *json.RawMessage
+ _ *jlexer.Lexer
+ _ *jwriter.Writer
+ _ easyjson.Marshaler
+)
+
+func easyjson1ed00e60DecodeGithubComOlivereElastic(in *jlexer.Lexer, out *bulkUpdateRequestCommandOp) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeString()
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "_index":
+ out.Index = string(in.String())
+ case "_type":
+ out.Type = string(in.String())
+ case "_id":
+ out.Id = string(in.String())
+ case "parent":
+ out.Parent = string(in.String())
+ case "retry_on_conflict":
+ if in.IsNull() {
+ in.Skip()
+ out.RetryOnConflict = nil
+ } else {
+ if out.RetryOnConflict == nil {
+ out.RetryOnConflict = new(int)
+ }
+ *out.RetryOnConflict = int(in.Int())
+ }
+ case "routing":
+ out.Routing = string(in.String())
+ case "version":
+ out.Version = int64(in.Int64())
+ case "version_type":
+ out.VersionType = string(in.String())
+ default:
+ in.SkipRecursive()
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+func easyjson1ed00e60EncodeGithubComOlivereElastic(out *jwriter.Writer, in bulkUpdateRequestCommandOp) {
+ out.RawByte('{')
+ first := true
+ _ = first
+ if in.Index != "" {
+ const prefix string = ",\"_index\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Index))
+ }
+ if in.Type != "" {
+ const prefix string = ",\"_type\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Type))
+ }
+ if in.Id != "" {
+ const prefix string = ",\"_id\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Id))
+ }
+ if in.Parent != "" {
+ const prefix string = ",\"parent\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Parent))
+ }
+ if in.RetryOnConflict != nil {
+ const prefix string = ",\"retry_on_conflict\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Int(int(*in.RetryOnConflict))
+ }
+ if in.Routing != "" {
+ const prefix string = ",\"routing\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Routing))
+ }
+ if in.Version != 0 {
+ const prefix string = ",\"version\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Int64(int64(in.Version))
+ }
+ if in.VersionType != "" {
+ const prefix string = ",\"version_type\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.VersionType))
+ }
+ out.RawByte('}')
+}
+
+// MarshalJSON supports json.Marshaler interface
+func (v bulkUpdateRequestCommandOp) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ easyjson1ed00e60EncodeGithubComOlivereElastic(&w, v)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// MarshalEasyJSON supports easyjson.Marshaler interface
+func (v bulkUpdateRequestCommandOp) MarshalEasyJSON(w *jwriter.Writer) {
+ easyjson1ed00e60EncodeGithubComOlivereElastic(w, v)
+}
+
+// UnmarshalJSON supports json.Unmarshaler interface
+func (v *bulkUpdateRequestCommandOp) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ easyjson1ed00e60DecodeGithubComOlivereElastic(&r, v)
+ return r.Error()
+}
+
+// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
+func (v *bulkUpdateRequestCommandOp) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ easyjson1ed00e60DecodeGithubComOlivereElastic(l, v)
+}
+func easyjson1ed00e60DecodeGithubComOlivereElastic1(in *jlexer.Lexer, out *bulkUpdateRequestCommandData) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeString()
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "detect_noop":
+ if in.IsNull() {
+ in.Skip()
+ out.DetectNoop = nil
+ } else {
+ if out.DetectNoop == nil {
+ out.DetectNoop = new(bool)
+ }
+ *out.DetectNoop = bool(in.Bool())
+ }
+ case "doc":
+ if m, ok := out.Doc.(easyjson.Unmarshaler); ok {
+ m.UnmarshalEasyJSON(in)
+ } else if m, ok := out.Doc.(json.Unmarshaler); ok {
+ _ = m.UnmarshalJSON(in.Raw())
+ } else {
+ out.Doc = in.Interface()
+ }
+ case "doc_as_upsert":
+ if in.IsNull() {
+ in.Skip()
+ out.DocAsUpsert = nil
+ } else {
+ if out.DocAsUpsert == nil {
+ out.DocAsUpsert = new(bool)
+ }
+ *out.DocAsUpsert = bool(in.Bool())
+ }
+ case "script":
+ if m, ok := out.Script.(easyjson.Unmarshaler); ok {
+ m.UnmarshalEasyJSON(in)
+ } else if m, ok := out.Script.(json.Unmarshaler); ok {
+ _ = m.UnmarshalJSON(in.Raw())
+ } else {
+ out.Script = in.Interface()
+ }
+ case "scripted_upsert":
+ if in.IsNull() {
+ in.Skip()
+ out.ScriptedUpsert = nil
+ } else {
+ if out.ScriptedUpsert == nil {
+ out.ScriptedUpsert = new(bool)
+ }
+ *out.ScriptedUpsert = bool(in.Bool())
+ }
+ case "upsert":
+ if m, ok := out.Upsert.(easyjson.Unmarshaler); ok {
+ m.UnmarshalEasyJSON(in)
+ } else if m, ok := out.Upsert.(json.Unmarshaler); ok {
+ _ = m.UnmarshalJSON(in.Raw())
+ } else {
+ out.Upsert = in.Interface()
+ }
+ case "_source":
+ if in.IsNull() {
+ in.Skip()
+ out.Source = nil
+ } else {
+ if out.Source == nil {
+ out.Source = new(bool)
+ }
+ *out.Source = bool(in.Bool())
+ }
+ default:
+ in.SkipRecursive()
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+func easyjson1ed00e60EncodeGithubComOlivereElastic1(out *jwriter.Writer, in bulkUpdateRequestCommandData) {
+ out.RawByte('{')
+ first := true
+ _ = first
+ if in.DetectNoop != nil {
+ const prefix string = ",\"detect_noop\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Bool(bool(*in.DetectNoop))
+ }
+ if in.Doc != nil {
+ const prefix string = ",\"doc\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ if m, ok := in.Doc.(easyjson.Marshaler); ok {
+ m.MarshalEasyJSON(out)
+ } else if m, ok := in.Doc.(json.Marshaler); ok {
+ out.Raw(m.MarshalJSON())
+ } else {
+ out.Raw(json.Marshal(in.Doc))
+ }
+ }
+ if in.DocAsUpsert != nil {
+ const prefix string = ",\"doc_as_upsert\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Bool(bool(*in.DocAsUpsert))
+ }
+ if in.Script != nil {
+ const prefix string = ",\"script\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ if m, ok := in.Script.(easyjson.Marshaler); ok {
+ m.MarshalEasyJSON(out)
+ } else if m, ok := in.Script.(json.Marshaler); ok {
+ out.Raw(m.MarshalJSON())
+ } else {
+ out.Raw(json.Marshal(in.Script))
+ }
+ }
+ if in.ScriptedUpsert != nil {
+ const prefix string = ",\"scripted_upsert\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Bool(bool(*in.ScriptedUpsert))
+ }
+ if in.Upsert != nil {
+ const prefix string = ",\"upsert\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ if m, ok := in.Upsert.(easyjson.Marshaler); ok {
+ m.MarshalEasyJSON(out)
+ } else if m, ok := in.Upsert.(json.Marshaler); ok {
+ out.Raw(m.MarshalJSON())
+ } else {
+ out.Raw(json.Marshal(in.Upsert))
+ }
+ }
+ if in.Source != nil {
+ const prefix string = ",\"_source\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Bool(bool(*in.Source))
+ }
+ out.RawByte('}')
+}
+
+// MarshalJSON supports json.Marshaler interface
+func (v bulkUpdateRequestCommandData) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ easyjson1ed00e60EncodeGithubComOlivereElastic1(&w, v)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// MarshalEasyJSON supports easyjson.Marshaler interface
+func (v bulkUpdateRequestCommandData) MarshalEasyJSON(w *jwriter.Writer) {
+ easyjson1ed00e60EncodeGithubComOlivereElastic1(w, v)
+}
+
+// UnmarshalJSON supports json.Unmarshaler interface
+func (v *bulkUpdateRequestCommandData) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ easyjson1ed00e60DecodeGithubComOlivereElastic1(&r, v)
+ return r.Error()
+}
+
+// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
+func (v *bulkUpdateRequestCommandData) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ easyjson1ed00e60DecodeGithubComOlivereElastic1(l, v)
+}
+func easyjson1ed00e60DecodeGithubComOlivereElastic2(in *jlexer.Lexer, out *bulkUpdateRequestCommand) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ in.Skip()
+ } else {
+ in.Delim('{')
+ if !in.IsDelim('}') {
+ *out = make(bulkUpdateRequestCommand)
+ } else {
+ *out = nil
+ }
+ for !in.IsDelim('}') {
+ key := string(in.String())
+ in.WantColon()
+ var v1 bulkUpdateRequestCommandOp
+ (v1).UnmarshalEasyJSON(in)
+ (*out)[key] = v1
+ in.WantComma()
+ }
+ in.Delim('}')
+ }
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+func easyjson1ed00e60EncodeGithubComOlivereElastic2(out *jwriter.Writer, in bulkUpdateRequestCommand) {
+ if in == nil && (out.Flags&jwriter.NilMapAsEmpty) == 0 {
+ out.RawString(`null`)
+ } else {
+ out.RawByte('{')
+ v2First := true
+ for v2Name, v2Value := range in {
+ if v2First {
+ v2First = false
+ } else {
+ out.RawByte(',')
+ }
+ out.String(string(v2Name))
+ out.RawByte(':')
+ (v2Value).MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+ }
+}
+
+// MarshalJSON supports json.Marshaler interface
+func (v bulkUpdateRequestCommand) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ easyjson1ed00e60EncodeGithubComOlivereElastic2(&w, v)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// MarshalEasyJSON supports easyjson.Marshaler interface
+func (v bulkUpdateRequestCommand) MarshalEasyJSON(w *jwriter.Writer) {
+ easyjson1ed00e60EncodeGithubComOlivereElastic2(w, v)
+}
+
+// UnmarshalJSON supports json.Unmarshaler interface
+func (v *bulkUpdateRequestCommand) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ easyjson1ed00e60DecodeGithubComOlivereElastic2(&r, v)
+ return r.Error()
+}
+
+// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
+func (v *bulkUpdateRequestCommand) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ easyjson1ed00e60DecodeGithubComOlivereElastic2(l, v)
+}
diff --git a/vendor/github.com/olivere/elastic/bulk_update_request_test.go b/vendor/github.com/olivere/elastic/bulk_update_request_test.go
new file mode 100644
index 000000000..53e73bd40
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/bulk_update_request_test.go
@@ -0,0 +1,149 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestBulkUpdateRequestSerialization(t *testing.T) {
+ tests := []struct {
+ Request BulkableRequest
+ Expected []string
+ }{
+ // #0
+ {
+ Request: NewBulkUpdateRequest().Index("index1").Type("doc").Id("1").Doc(struct {
+ Counter int64 `json:"counter"`
+ }{
+ Counter: 42,
+ }),
+ Expected: []string{
+ `{"update":{"_index":"index1","_type":"doc","_id":"1"}}`,
+ `{"doc":{"counter":42}}`,
+ },
+ },
+ // #1
+ {
+ Request: NewBulkUpdateRequest().Index("index1").Type("doc").Id("1").
+ Routing("123").
+ RetryOnConflict(3).
+ DocAsUpsert(true).
+ Doc(struct {
+ Counter int64 `json:"counter"`
+ }{
+ Counter: 42,
+ }),
+ Expected: []string{
+ `{"update":{"_index":"index1","_type":"doc","_id":"1","retry_on_conflict":3,"routing":"123"}}`,
+ `{"doc":{"counter":42},"doc_as_upsert":true}`,
+ },
+ },
+ // #2
+ {
+ Request: NewBulkUpdateRequest().Index("index1").Type("doc").Id("1").
+ RetryOnConflict(3).
+ Script(NewScript(`ctx._source.retweets += param1`).Lang("javascript").Param("param1", 42)).
+ Upsert(struct {
+ Counter int64 `json:"counter"`
+ }{
+ Counter: 42,
+ }),
+ Expected: []string{
+ `{"update":{"_index":"index1","_type":"doc","_id":"1","retry_on_conflict":3}}`,
+ `{"script":{"lang":"javascript","params":{"param1":42},"source":"ctx._source.retweets += param1"},"upsert":{"counter":42}}`,
+ },
+ },
+ // #3
+ {
+ Request: NewBulkUpdateRequest().Index("index1").Type("doc").Id("1").DetectNoop(true).Doc(struct {
+ Counter int64 `json:"counter"`
+ }{
+ Counter: 42,
+ }),
+ Expected: []string{
+ `{"update":{"_index":"index1","_type":"doc","_id":"1"}}`,
+ `{"detect_noop":true,"doc":{"counter":42}}`,
+ },
+ },
+ // #4
+ {
+ Request: NewBulkUpdateRequest().Index("index1").Type("doc").Id("1").
+ RetryOnConflict(3).
+ ScriptedUpsert(true).
+ Script(NewScript(`ctx._source.retweets += param1`).Lang("javascript").Param("param1", 42)).
+ Upsert(struct {
+ Counter int64 `json:"counter"`
+ }{
+ Counter: 42,
+ }),
+ Expected: []string{
+ `{"update":{"_index":"index1","_type":"doc","_id":"1","retry_on_conflict":3}}`,
+ `{"script":{"lang":"javascript","params":{"param1":42},"source":"ctx._source.retweets += param1"},"scripted_upsert":true,"upsert":{"counter":42}}`,
+ },
+ },
+ // #5
+ {
+ Request: NewBulkUpdateRequest().Index("index1").Type("doc").Id("4").ReturnSource(true).Doc(struct {
+ Counter int64 `json:"counter"`
+ }{
+ Counter: 42,
+ }),
+ Expected: []string{
+ `{"update":{"_index":"index1","_type":"doc","_id":"4"}}`,
+ `{"doc":{"counter":42},"_source":true}`,
+ },
+ },
+ }
+
+ for i, test := range tests {
+ lines, err := test.Request.Source()
+ if err != nil {
+ t.Fatalf("#%d: expected no error, got: %v", i, err)
+ }
+ if lines == nil {
+ t.Fatalf("#%d: expected lines, got nil", i)
+ }
+ if len(lines) != len(test.Expected) {
+ t.Fatalf("#%d: expected %d lines, got %d", i, len(test.Expected), len(lines))
+ }
+ for j, line := range lines {
+ if line != test.Expected[j] {
+ t.Errorf("#%d: expected line #%d to be\n%s\nbut got:\n%s", i, j, test.Expected[j], line)
+ }
+ }
+ }
+}
+
+var bulkUpdateRequestSerializationResult string
+
+func BenchmarkBulkUpdateRequestSerialization(b *testing.B) {
+ b.Run("stdlib", func(b *testing.B) {
+ r := NewBulkUpdateRequest().Index("index1").Type("doc").Id("1").Doc(struct {
+ Counter int64 `json:"counter"`
+ }{
+ Counter: 42,
+ })
+ benchmarkBulkUpdateRequestSerialization(b, r.UseEasyJSON(false))
+ })
+ b.Run("easyjson", func(b *testing.B) {
+ r := NewBulkUpdateRequest().Index("index1").Type("doc").Id("1").Doc(struct {
+ Counter int64 `json:"counter"`
+ }{
+ Counter: 42,
+ }).UseEasyJSON(false)
+ benchmarkBulkUpdateRequestSerialization(b, r.UseEasyJSON(true))
+ })
+}
+
+func benchmarkBulkUpdateRequestSerialization(b *testing.B, r *BulkUpdateRequest) {
+ var s string
+ for n := 0; n < b.N; n++ {
+ s = r.String()
+ r.source = nil // Don't let caching spoil the benchmark
+ }
+ bulkUpdateRequestSerializationResult = s // ensure the compiler doesn't optimize
+ b.ReportAllocs()
+}
diff --git a/vendor/github.com/olivere/elastic/canonicalize.go b/vendor/github.com/olivere/elastic/canonicalize.go
new file mode 100644
index 000000000..a436f03b6
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/canonicalize.go
@@ -0,0 +1,34 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "net/url"
+
+// canonicalize takes a list of URLs and returns its canonicalized form, i.e.
+// remove anything but scheme, userinfo, host, path, and port.
+// It also removes all trailing slashes. Invalid URLs or URLs that do not
+// use protocol http or https are skipped.
+//
+// Example:
+// http://127.0.0.1:9200/?query=1 -> http://127.0.0.1:9200
+// http://127.0.0.1:9200/db1/ -> http://127.0.0.1:9200/db1
+func canonicalize(rawurls ...string) []string {
+ var canonicalized []string
+ for _, rawurl := range rawurls {
+ u, err := url.Parse(rawurl)
+ if err == nil {
+ if u.Scheme == "http" || u.Scheme == "https" {
+ // Trim trailing slashes
+ for len(u.Path) > 0 && u.Path[len(u.Path)-1] == '/' {
+ u.Path = u.Path[0 : len(u.Path)-1]
+ }
+ u.Fragment = ""
+ u.RawQuery = ""
+ canonicalized = append(canonicalized, u.String())
+ }
+ }
+ }
+ return canonicalized
+}
diff --git a/vendor/github.com/olivere/elastic/canonicalize_test.go b/vendor/github.com/olivere/elastic/canonicalize_test.go
new file mode 100644
index 000000000..86b62d498
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/canonicalize_test.go
@@ -0,0 +1,72 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "testing"
+
+func TestCanonicalize(t *testing.T) {
+ tests := []struct {
+ Input []string
+ Output []string
+ }{
+ // #0
+ {
+ Input: []string{"http://127.0.0.1/"},
+ Output: []string{"http://127.0.0.1"},
+ },
+ // #1
+ {
+ Input: []string{"http://127.0.0.1:9200/", "gopher://golang.org/", "http://127.0.0.1:9201"},
+ Output: []string{"http://127.0.0.1:9200", "http://127.0.0.1:9201"},
+ },
+ // #2
+ {
+ Input: []string{"http://user:secret@127.0.0.1/path?query=1#fragment"},
+ Output: []string{"http://user:secret@127.0.0.1/path"},
+ },
+ // #3
+ {
+ Input: []string{"https://somewhere.on.mars:9999/path?query=1#fragment"},
+ Output: []string{"https://somewhere.on.mars:9999/path"},
+ },
+ // #4
+ {
+ Input: []string{"https://prod1:9999/one?query=1#fragment", "https://prod2:9998/two?query=1#fragment"},
+ Output: []string{"https://prod1:9999/one", "https://prod2:9998/two"},
+ },
+ // #5
+ {
+ Input: []string{"http://127.0.0.1/one/"},
+ Output: []string{"http://127.0.0.1/one"},
+ },
+ // #6
+ {
+ Input: []string{"http://127.0.0.1/one///"},
+ Output: []string{"http://127.0.0.1/one"},
+ },
+ // #7: Invalid URL
+ {
+ Input: []string{"127.0.0.1/"},
+ Output: []string{},
+ },
+ // #8: Invalid URL
+ {
+ Input: []string{"127.0.0.1:9200"},
+ Output: []string{},
+ },
+ }
+
+ for i, test := range tests {
+ got := canonicalize(test.Input...)
+ if want, have := len(test.Output), len(got); want != have {
+ t.Fatalf("#%d: expected %d elements; got: %d", i, want, have)
+ }
+ for i := 0; i < len(got); i++ {
+ if want, have := test.Output[i], got[i]; want != have {
+ t.Errorf("#%d: expected %q; got: %q", i, want, have)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/clear_scroll.go b/vendor/github.com/olivere/elastic/clear_scroll.go
new file mode 100644
index 000000000..4f449504c
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/clear_scroll.go
@@ -0,0 +1,108 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+)
+
+// ClearScrollService clears one or more scroll contexts by their ids.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-scroll.html#_clear_scroll_api
+// for details.
+type ClearScrollService struct {
+ client *Client
+ pretty bool
+ scrollId []string
+}
+
+// NewClearScrollService creates a new ClearScrollService.
+func NewClearScrollService(client *Client) *ClearScrollService {
+ return &ClearScrollService{
+ client: client,
+ scrollId: make([]string, 0),
+ }
+}
+
+// ScrollId is a list of scroll IDs to clear.
+// Use _all to clear all search contexts.
+func (s *ClearScrollService) ScrollId(scrollIds ...string) *ClearScrollService {
+ s.scrollId = append(s.scrollId, scrollIds...)
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *ClearScrollService) Pretty(pretty bool) *ClearScrollService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ClearScrollService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path := "/_search/scroll/"
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ClearScrollService) Validate() error {
+ var invalid []string
+ if len(s.scrollId) == 0 {
+ invalid = append(invalid, "ScrollId")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *ClearScrollService) Do(ctx context.Context) (*ClearScrollResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ body := map[string][]string{
+ "scroll_id": s.scrollId,
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "DELETE",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(ClearScrollResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// ClearScrollResponse is the response of ClearScrollService.Do.
+type ClearScrollResponse struct {
+}
diff --git a/vendor/github.com/olivere/elastic/clear_scroll_test.go b/vendor/github.com/olivere/elastic/clear_scroll_test.go
new file mode 100644
index 000000000..4037d3cd6
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/clear_scroll_test.go
@@ -0,0 +1,87 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ _ "net/http"
+ "testing"
+)
+
+func TestClearScroll(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+ // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ res, err := client.Scroll(testIndexName).Size(1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected results != nil; got nil")
+ }
+ if res.ScrollId == "" {
+ t.Fatalf("expected scrollId in results; got %q", res.ScrollId)
+ }
+
+ // Search should succeed
+ _, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Clear scroll id
+ clearScrollRes, err := client.ClearScroll().ScrollId(res.ScrollId).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if clearScrollRes == nil {
+ t.Fatal("expected results != nil; got nil")
+ }
+
+ // Search result should fail
+ _, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do(context.TODO())
+ if err == nil {
+ t.Fatalf("expected scroll to fail")
+ }
+}
+
+func TestClearScrollValidate(t *testing.T) {
+ client := setupTestClient(t)
+
+ // No scroll id -> fail with error
+ res, err := NewClearScrollService(client).Do(context.TODO())
+ if err == nil {
+ t.Fatalf("expected ClearScroll to fail without scroll ids")
+ }
+ if res != nil {
+ t.Fatalf("expected result to be nil; got: %v", res)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/client.go b/vendor/github.com/olivere/elastic/client.go
new file mode 100644
index 000000000..1eb0ec54f
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/client.go
@@ -0,0 +1,1786 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "os"
+ "regexp"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/pkg/errors"
+
+ "github.com/olivere/elastic/config"
+)
+
+const (
+ // Version is the current version of Elastic.
+ Version = "6.1.4"
+
+ // DefaultURL is the default endpoint of Elasticsearch on the local machine.
+ // It is used e.g. when initializing a new Client without a specific URL.
+ DefaultURL = "http://127.0.0.1:9200"
+
+ // DefaultScheme is the default protocol scheme to use when sniffing
+ // the Elasticsearch cluster.
+ DefaultScheme = "http"
+
+ // DefaultHealthcheckEnabled specifies if healthchecks are enabled by default.
+ DefaultHealthcheckEnabled = true
+
+ // DefaultHealthcheckTimeoutStartup is the time the healthcheck waits
+ // for a response from Elasticsearch on startup, i.e. when creating a
+ // client. After the client is started, a shorter timeout is commonly used
+ // (its default is specified in DefaultHealthcheckTimeout).
+ DefaultHealthcheckTimeoutStartup = 5 * time.Second
+
+ // DefaultHealthcheckTimeout specifies the time a running client waits for
+ // a response from Elasticsearch. Notice that the healthcheck timeout
+ // when a client is created is larger by default (see DefaultHealthcheckTimeoutStartup).
+ DefaultHealthcheckTimeout = 1 * time.Second
+
+ // DefaultHealthcheckInterval is the default interval between
+ // two health checks of the nodes in the cluster.
+ DefaultHealthcheckInterval = 60 * time.Second
+
+ // DefaultSnifferEnabled specifies if the sniffer is enabled by default.
+ DefaultSnifferEnabled = true
+
+ // DefaultSnifferInterval is the interval between two sniffing procedures,
+ // i.e. the lookup of all nodes in the cluster and their addition/removal
+ // from the list of actual connections.
+ DefaultSnifferInterval = 15 * time.Minute
+
+ // DefaultSnifferTimeoutStartup is the default timeout for the sniffing
+ // process that is initiated while creating a new client. For subsequent
+ // sniffing processes, DefaultSnifferTimeout is used (by default).
+ DefaultSnifferTimeoutStartup = 5 * time.Second
+
+ // DefaultSnifferTimeout is the default timeout after which the
+ // sniffing process times out. Notice that for the initial sniffing
+ // process, DefaultSnifferTimeoutStartup is used.
+ DefaultSnifferTimeout = 2 * time.Second
+
+ // DefaultSendGetBodyAs is the HTTP method to use when elastic is sending
+ // a GET request with a body.
+ DefaultSendGetBodyAs = "GET"
+
+ // off is used to disable timeouts.
+ off = -1 * time.Second
+)
+
+var (
+ // ErrNoClient is raised when no Elasticsearch node is available.
+ ErrNoClient = errors.New("no Elasticsearch node available")
+
+ // ErrRetry is raised when a request cannot be executed after the configured
+ // number of retries.
+ ErrRetry = errors.New("cannot connect after several retries")
+
+ // ErrTimeout is raised when a request timed out, e.g. when WaitForStatus
+ // didn't return in time.
+ ErrTimeout = errors.New("timeout")
+
+ // noRetries is a retrier that does not retry.
+ noRetries = NewStopRetrier()
+)
+
+// ClientOptionFunc is a function that configures a Client.
+// It is used in NewClient.
+type ClientOptionFunc func(*Client) error
+
+// Client is an Elasticsearch client. Create one by calling NewClient.
+type Client struct {
+ c *http.Client // net/http Client to use for requests
+
+ connsMu sync.RWMutex // connsMu guards the next block
+ conns []*conn // all connections
+ cindex int // index into conns
+
+ mu sync.RWMutex // guards the next block
+ urls []string // set of URLs passed initially to the client
+ running bool // true if the client's background processes are running
+ errorlog Logger // error log for critical messages
+ infolog Logger // information log for e.g. response times
+ tracelog Logger // trace log for debugging
+ scheme string // http or https
+ healthcheckEnabled bool // healthchecks enabled or disabled
+ healthcheckTimeoutStartup time.Duration // time the healthcheck waits for a response from Elasticsearch on startup
+ healthcheckTimeout time.Duration // time the healthcheck waits for a response from Elasticsearch
+ healthcheckInterval time.Duration // interval between healthchecks
+ healthcheckStop chan bool // notify healthchecker to stop, and notify back
+ snifferEnabled bool // sniffer enabled or disabled
+ snifferTimeoutStartup time.Duration // time the sniffer waits for a response from nodes info API on startup
+ snifferTimeout time.Duration // time the sniffer waits for a response from nodes info API
+ snifferInterval time.Duration // interval between sniffing
+ snifferCallback SnifferCallback // callback to modify the sniffing decision
+ snifferStop chan bool // notify sniffer to stop, and notify back
+ decoder Decoder // used to decode data sent from Elasticsearch
+ basicAuth bool // indicates whether to send HTTP Basic Auth credentials
+ basicAuthUsername string // username for HTTP Basic Auth
+ basicAuthPassword string // password for HTTP Basic Auth
+ sendGetBodyAs string // override for when sending a GET with a body
+ requiredPlugins []string // list of required plugins
+ retrier Retrier // strategy for retries
+}
+
+// NewClient creates a new client to work with Elasticsearch.
+//
+// NewClient, by default, is meant to be long-lived and shared across
+// your application. If you need a short-lived client, e.g. for request-scope,
+// consider using NewSimpleClient instead.
+//
+// The caller can configure the new client by passing configuration options
+// to the func.
+//
+// Example:
+//
+// client, err := elastic.NewClient(
+// elastic.SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"),
+// elastic.SetBasicAuth("user", "secret"))
+//
+// If no URL is configured, Elastic uses DefaultURL by default.
+//
+// If the sniffer is enabled (the default), the new client then sniffes
+// the cluster via the Nodes Info API
+// (see https://www.elastic.co/guide/en/elasticsearch/reference/6.0/cluster-nodes-info.html#cluster-nodes-info).
+// It uses the URLs specified by the caller. The caller is responsible
+// to only pass a list of URLs of nodes that belong to the same cluster.
+// This sniffing process is run on startup and periodically.
+// Use SnifferInterval to set the interval between two sniffs (default is
+// 15 minutes). In other words: By default, the client will find new nodes
+// in the cluster and remove those that are no longer available every
+// 15 minutes. Disable the sniffer by passing SetSniff(false) to NewClient.
+//
+// The list of nodes found in the sniffing process will be used to make
+// connections to the REST API of Elasticsearch. These nodes are also
+// periodically checked in a shorter time frame. This process is called
+// a health check. By default, a health check is done every 60 seconds.
+// You can set a shorter or longer interval by SetHealthcheckInterval.
+// Disabling health checks is not recommended, but can be done by
+// SetHealthcheck(false).
+//
+// Connections are automatically marked as dead or healthy while
+// making requests to Elasticsearch. When a request fails, Elastic will
+// call into the Retry strategy which can be specified with SetRetry.
+// The Retry strategy is also responsible for handling backoff i.e. the time
+// to wait before starting the next request. There are various standard
+// backoff implementations, e.g. ExponentialBackoff or SimpleBackoff.
+// Retries are disabled by default.
+//
+// If no HttpClient is configured, then http.DefaultClient is used.
+// You can use your own http.Client with some http.Transport for
+// advanced scenarios.
+//
+// An error is also returned when some configuration option is invalid or
+// the new client cannot sniff the cluster (if enabled).
+func NewClient(options ...ClientOptionFunc) (*Client, error) {
+ // Set up the client
+ c := &Client{
+ c: http.DefaultClient,
+ conns: make([]*conn, 0),
+ cindex: -1,
+ scheme: DefaultScheme,
+ decoder: &DefaultDecoder{},
+ healthcheckEnabled: DefaultHealthcheckEnabled,
+ healthcheckTimeoutStartup: DefaultHealthcheckTimeoutStartup,
+ healthcheckTimeout: DefaultHealthcheckTimeout,
+ healthcheckInterval: DefaultHealthcheckInterval,
+ healthcheckStop: make(chan bool),
+ snifferEnabled: DefaultSnifferEnabled,
+ snifferTimeoutStartup: DefaultSnifferTimeoutStartup,
+ snifferTimeout: DefaultSnifferTimeout,
+ snifferInterval: DefaultSnifferInterval,
+ snifferCallback: nopSnifferCallback,
+ snifferStop: make(chan bool),
+ sendGetBodyAs: DefaultSendGetBodyAs,
+ retrier: noRetries, // no retries by default
+ }
+
+ // Run the options on it
+ for _, option := range options {
+ if err := option(c); err != nil {
+ return nil, err
+ }
+ }
+
+ // Use a default URL and normalize them
+ if len(c.urls) == 0 {
+ c.urls = []string{DefaultURL}
+ }
+ c.urls = canonicalize(c.urls...)
+
+ // If the URLs have auth info, use them here as an alternative to SetBasicAuth
+ if !c.basicAuth {
+ for _, urlStr := range c.urls {
+ u, err := url.Parse(urlStr)
+ if err == nil && u.User != nil {
+ c.basicAuth = true
+ c.basicAuthUsername = u.User.Username()
+ c.basicAuthPassword, _ = u.User.Password()
+ break
+ }
+ }
+ }
+
+ // Check if we can make a request to any of the specified URLs
+ if c.healthcheckEnabled {
+ if err := c.startupHealthcheck(c.healthcheckTimeoutStartup); err != nil {
+ return nil, err
+ }
+ }
+
+ if c.snifferEnabled {
+ // Sniff the cluster initially
+ if err := c.sniff(c.snifferTimeoutStartup); err != nil {
+ return nil, err
+ }
+ } else {
+ // Do not sniff the cluster initially. Use the provided URLs instead.
+ for _, url := range c.urls {
+ c.conns = append(c.conns, newConn(url, url))
+ }
+ }
+
+ if c.healthcheckEnabled {
+ // Perform an initial health check
+ c.healthcheck(c.healthcheckTimeoutStartup, true)
+ }
+ // Ensure that we have at least one connection available
+ if err := c.mustActiveConn(); err != nil {
+ return nil, err
+ }
+
+ // Check the required plugins
+ for _, plugin := range c.requiredPlugins {
+ found, err := c.HasPlugin(plugin)
+ if err != nil {
+ return nil, err
+ }
+ if !found {
+ return nil, fmt.Errorf("elastic: plugin %s not found", plugin)
+ }
+ }
+
+ if c.snifferEnabled {
+ go c.sniffer() // periodically update cluster information
+ }
+ if c.healthcheckEnabled {
+ go c.healthchecker() // start goroutine periodically ping all nodes of the cluster
+ }
+
+ c.mu.Lock()
+ c.running = true
+ c.mu.Unlock()
+
+ return c, nil
+}
+
+// NewClientFromConfig initializes a client from a configuration.
+func NewClientFromConfig(cfg *config.Config) (*Client, error) {
+ var options []ClientOptionFunc
+ if cfg != nil {
+ if cfg.URL != "" {
+ options = append(options, SetURL(cfg.URL))
+ }
+ if cfg.Errorlog != "" {
+ f, err := os.OpenFile(cfg.Errorlog, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ return nil, errors.Wrap(err, "unable to initialize error log")
+ }
+ l := log.New(f, "", 0)
+ options = append(options, SetErrorLog(l))
+ }
+ if cfg.Tracelog != "" {
+ f, err := os.OpenFile(cfg.Tracelog, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ return nil, errors.Wrap(err, "unable to initialize trace log")
+ }
+ l := log.New(f, "", 0)
+ options = append(options, SetTraceLog(l))
+ }
+ if cfg.Infolog != "" {
+ f, err := os.OpenFile(cfg.Infolog, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ return nil, errors.Wrap(err, "unable to initialize info log")
+ }
+ l := log.New(f, "", 0)
+ options = append(options, SetInfoLog(l))
+ }
+ if cfg.Username != "" || cfg.Password != "" {
+ options = append(options, SetBasicAuth(cfg.Username, cfg.Password))
+ }
+ if cfg.Sniff != nil {
+ options = append(options, SetSniff(*cfg.Sniff))
+ }
+ }
+ return NewClient(options...)
+}
+
+// NewSimpleClient creates a new short-lived Client that can be used in
+// use cases where you need e.g. one client per request.
+//
+// While NewClient by default sets up e.g. periodic health checks
+// and sniffing for new nodes in separate goroutines, NewSimpleClient does
+// not and is meant as a simple replacement where you don't need all the
+// heavy lifting of NewClient.
+//
+// NewSimpleClient does the following by default: First, all health checks
+// are disabled, including timeouts and periodic checks. Second, sniffing
+// is disabled, including timeouts and periodic checks. The number of retries
+// is set to 1. NewSimpleClient also does not start any goroutines.
+//
+// Notice that you can still override settings by passing additional options,
+// just like with NewClient.
+func NewSimpleClient(options ...ClientOptionFunc) (*Client, error) {
+ c := &Client{
+ c: http.DefaultClient,
+ conns: make([]*conn, 0),
+ cindex: -1,
+ scheme: DefaultScheme,
+ decoder: &DefaultDecoder{},
+ healthcheckEnabled: false,
+ healthcheckTimeoutStartup: off,
+ healthcheckTimeout: off,
+ healthcheckInterval: off,
+ healthcheckStop: make(chan bool),
+ snifferEnabled: false,
+ snifferTimeoutStartup: off,
+ snifferTimeout: off,
+ snifferInterval: off,
+ snifferCallback: nopSnifferCallback,
+ snifferStop: make(chan bool),
+ sendGetBodyAs: DefaultSendGetBodyAs,
+ retrier: noRetries, // no retries by default
+ }
+
+ // Run the options on it
+ for _, option := range options {
+ if err := option(c); err != nil {
+ return nil, err
+ }
+ }
+
+ // Use a default URL and normalize them
+ if len(c.urls) == 0 {
+ c.urls = []string{DefaultURL}
+ }
+ c.urls = canonicalize(c.urls...)
+
+ // If the URLs have auth info, use them here as an alternative to SetBasicAuth
+ if !c.basicAuth {
+ for _, urlStr := range c.urls {
+ u, err := url.Parse(urlStr)
+ if err == nil && u.User != nil {
+ c.basicAuth = true
+ c.basicAuthUsername = u.User.Username()
+ c.basicAuthPassword, _ = u.User.Password()
+ break
+ }
+ }
+ }
+
+ for _, url := range c.urls {
+ c.conns = append(c.conns, newConn(url, url))
+ }
+
+ // Ensure that we have at least one connection available
+ if err := c.mustActiveConn(); err != nil {
+ return nil, err
+ }
+
+ // Check the required plugins
+ for _, plugin := range c.requiredPlugins {
+ found, err := c.HasPlugin(plugin)
+ if err != nil {
+ return nil, err
+ }
+ if !found {
+ return nil, fmt.Errorf("elastic: plugin %s not found", plugin)
+ }
+ }
+
+ c.mu.Lock()
+ c.running = true
+ c.mu.Unlock()
+
+ return c, nil
+}
+
+// SetHttpClient can be used to specify the http.Client to use when making
+// HTTP requests to Elasticsearch.
+func SetHttpClient(httpClient *http.Client) ClientOptionFunc {
+ return func(c *Client) error {
+ if httpClient != nil {
+ c.c = httpClient
+ } else {
+ c.c = http.DefaultClient
+ }
+ return nil
+ }
+}
+
+// SetBasicAuth can be used to specify the HTTP Basic Auth credentials to
+// use when making HTTP requests to Elasticsearch.
+func SetBasicAuth(username, password string) ClientOptionFunc {
+ return func(c *Client) error {
+ c.basicAuthUsername = username
+ c.basicAuthPassword = password
+ c.basicAuth = c.basicAuthUsername != "" || c.basicAuthPassword != ""
+ return nil
+ }
+}
+
+// SetURL defines the URL endpoints of the Elasticsearch nodes. Notice that
+// when sniffing is enabled, these URLs are used to initially sniff the
+// cluster on startup.
+func SetURL(urls ...string) ClientOptionFunc {
+ return func(c *Client) error {
+ switch len(urls) {
+ case 0:
+ c.urls = []string{DefaultURL}
+ default:
+ c.urls = urls
+ }
+ return nil
+ }
+}
+
+// SetScheme sets the HTTP scheme to look for when sniffing (http or https).
+// This is http by default.
+func SetScheme(scheme string) ClientOptionFunc {
+ return func(c *Client) error {
+ c.scheme = scheme
+ return nil
+ }
+}
+
+// SetSniff enables or disables the sniffer (enabled by default).
+func SetSniff(enabled bool) ClientOptionFunc {
+ return func(c *Client) error {
+ c.snifferEnabled = enabled
+ return nil
+ }
+}
+
+// SetSnifferTimeoutStartup sets the timeout for the sniffer that is used
+// when creating a new client. The default is 5 seconds. Notice that the
+// timeout being used for subsequent sniffing processes is set with
+// SetSnifferTimeout.
+func SetSnifferTimeoutStartup(timeout time.Duration) ClientOptionFunc {
+ return func(c *Client) error {
+ c.snifferTimeoutStartup = timeout
+ return nil
+ }
+}
+
+// SetSnifferTimeout sets the timeout for the sniffer that finds the
+// nodes in a cluster. The default is 2 seconds. Notice that the timeout
+// used when creating a new client on startup is usually greater and can
+// be set with SetSnifferTimeoutStartup.
+func SetSnifferTimeout(timeout time.Duration) ClientOptionFunc {
+ return func(c *Client) error {
+ c.snifferTimeout = timeout
+ return nil
+ }
+}
+
+// SetSnifferInterval sets the interval between two sniffing processes.
+// The default interval is 15 minutes.
+func SetSnifferInterval(interval time.Duration) ClientOptionFunc {
+ return func(c *Client) error {
+ c.snifferInterval = interval
+ return nil
+ }
+}
+
+// SnifferCallback defines the protocol for sniffing decisions.
+type SnifferCallback func(*NodesInfoNode) bool
+
+// nopSnifferCallback is the default sniffer callback: It accepts
+// all nodes the sniffer finds.
+var nopSnifferCallback = func(*NodesInfoNode) bool { return true }
+
+// SetSnifferCallback allows the caller to modify sniffer decisions.
+// When setting the callback, the given SnifferCallback is called for
+// each (healthy) node found during the sniffing process.
+// If the callback returns false, the node is ignored: No requests
+// are routed to it.
+func SetSnifferCallback(f SnifferCallback) ClientOptionFunc {
+ return func(c *Client) error {
+ if f != nil {
+ c.snifferCallback = f
+ }
+ return nil
+ }
+}
+
+// SetHealthcheck enables or disables healthchecks (enabled by default).
+func SetHealthcheck(enabled bool) ClientOptionFunc {
+ return func(c *Client) error {
+ c.healthcheckEnabled = enabled
+ return nil
+ }
+}
+
+// SetHealthcheckTimeoutStartup sets the timeout for the initial health check.
+// The default timeout is 5 seconds (see DefaultHealthcheckTimeoutStartup).
+// Notice that timeouts for subsequent health checks can be modified with
+// SetHealthcheckTimeout.
+func SetHealthcheckTimeoutStartup(timeout time.Duration) ClientOptionFunc {
+ return func(c *Client) error {
+ c.healthcheckTimeoutStartup = timeout
+ return nil
+ }
+}
+
+// SetHealthcheckTimeout sets the timeout for periodic health checks.
+// The default timeout is 1 second (see DefaultHealthcheckTimeout).
+// Notice that a different (usually larger) timeout is used for the initial
+// healthcheck, which is initiated while creating a new client.
+// The startup timeout can be modified with SetHealthcheckTimeoutStartup.
+func SetHealthcheckTimeout(timeout time.Duration) ClientOptionFunc {
+ return func(c *Client) error {
+ c.healthcheckTimeout = timeout
+ return nil
+ }
+}
+
+// SetHealthcheckInterval sets the interval between two health checks.
+// The default interval is 60 seconds.
+func SetHealthcheckInterval(interval time.Duration) ClientOptionFunc {
+ return func(c *Client) error {
+ c.healthcheckInterval = interval
+ return nil
+ }
+}
+
+// SetMaxRetries sets the maximum number of retries before giving up when
+// performing a HTTP request to Elasticsearch.
+//
+// Deprecated: Replace with a Retry implementation.
+func SetMaxRetries(maxRetries int) ClientOptionFunc {
+ return func(c *Client) error {
+ if maxRetries < 0 {
+ return errors.New("MaxRetries must be greater than or equal to 0")
+ } else if maxRetries == 0 {
+ c.retrier = noRetries
+ } else {
+ // Create a Retrier that will wait for 100ms (+/- jitter) between requests.
+ // This resembles the old behavior with maxRetries.
+ ticks := make([]int, maxRetries)
+ for i := 0; i < len(ticks); i++ {
+ ticks[i] = 100
+ }
+ backoff := NewSimpleBackoff(ticks...)
+ c.retrier = NewBackoffRetrier(backoff)
+ }
+ return nil
+ }
+}
+
+// SetDecoder sets the Decoder to use when decoding data from Elasticsearch.
+// DefaultDecoder is used by default.
+func SetDecoder(decoder Decoder) ClientOptionFunc {
+ return func(c *Client) error {
+ if decoder != nil {
+ c.decoder = decoder
+ } else {
+ c.decoder = &DefaultDecoder{}
+ }
+ return nil
+ }
+}
+
+// SetRequiredPlugins can be used to indicate that some plugins are required
+// before a Client will be created.
+func SetRequiredPlugins(plugins ...string) ClientOptionFunc {
+ return func(c *Client) error {
+ if c.requiredPlugins == nil {
+ c.requiredPlugins = make([]string, 0)
+ }
+ c.requiredPlugins = append(c.requiredPlugins, plugins...)
+ return nil
+ }
+}
+
+// SetErrorLog sets the logger for critical messages like nodes joining
+// or leaving the cluster or failing requests. It is nil by default.
+func SetErrorLog(logger Logger) ClientOptionFunc {
+ return func(c *Client) error {
+ c.errorlog = logger
+ return nil
+ }
+}
+
+// SetInfoLog sets the logger for informational messages, e.g. requests
+// and their response times. It is nil by default.
+func SetInfoLog(logger Logger) ClientOptionFunc {
+ return func(c *Client) error {
+ c.infolog = logger
+ return nil
+ }
+}
+
+// SetTraceLog specifies the log.Logger to use for output of HTTP requests
+// and responses which is helpful during debugging. It is nil by default.
+func SetTraceLog(logger Logger) ClientOptionFunc {
+ return func(c *Client) error {
+ c.tracelog = logger
+ return nil
+ }
+}
+
+// SetSendGetBodyAs specifies the HTTP method to use when sending a GET request
+// with a body. It is GET by default.
+func SetSendGetBodyAs(httpMethod string) ClientOptionFunc {
+ return func(c *Client) error {
+ c.sendGetBodyAs = httpMethod
+ return nil
+ }
+}
+
+// SetRetrier specifies the retry strategy that handles errors during
+// HTTP request/response with Elasticsearch.
+func SetRetrier(retrier Retrier) ClientOptionFunc {
+ return func(c *Client) error {
+ if retrier == nil {
+ retrier = noRetries // no retries by default
+ }
+ c.retrier = retrier
+ return nil
+ }
+}
+
+// String returns a string representation of the client status.
+func (c *Client) String() string {
+ c.connsMu.Lock()
+ conns := c.conns
+ c.connsMu.Unlock()
+
+ var buf bytes.Buffer
+ for i, conn := range conns {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(conn.String())
+ }
+ return buf.String()
+}
+
+// IsRunning returns true if the background processes of the client are
+// running, false otherwise.
+func (c *Client) IsRunning() bool {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.running
+}
+
+// Start starts the background processes like sniffing the cluster and
+// periodic health checks. You don't need to run Start when creating a
+// client with NewClient; the background processes are run by default.
+//
+// If the background processes are already running, this is a no-op.
+func (c *Client) Start() {
+ c.mu.RLock()
+ if c.running {
+ c.mu.RUnlock()
+ return
+ }
+ c.mu.RUnlock()
+
+ if c.snifferEnabled {
+ go c.sniffer()
+ }
+ if c.healthcheckEnabled {
+ go c.healthchecker()
+ }
+
+ c.mu.Lock()
+ c.running = true
+ c.mu.Unlock()
+
+ c.infof("elastic: client started")
+}
+
+// Stop stops the background processes that the client is running,
+// i.e. sniffing the cluster periodically and running health checks
+// on the nodes.
+//
+// If the background processes are not running, this is a no-op.
+func (c *Client) Stop() {
+ c.mu.RLock()
+ if !c.running {
+ c.mu.RUnlock()
+ return
+ }
+ c.mu.RUnlock()
+
+ if c.healthcheckEnabled {
+ c.healthcheckStop <- true
+ <-c.healthcheckStop
+ }
+
+ if c.snifferEnabled {
+ c.snifferStop <- true
+ <-c.snifferStop
+ }
+
+ c.mu.Lock()
+ c.running = false
+ c.mu.Unlock()
+
+ c.infof("elastic: client stopped")
+}
+
+// errorf logs to the error log.
+func (c *Client) errorf(format string, args ...interface{}) {
+ if c.errorlog != nil {
+ c.errorlog.Printf(format, args...)
+ }
+}
+
+// infof logs informational messages.
+func (c *Client) infof(format string, args ...interface{}) {
+ if c.infolog != nil {
+ c.infolog.Printf(format, args...)
+ }
+}
+
+// tracef logs to the trace log.
+func (c *Client) tracef(format string, args ...interface{}) {
+ if c.tracelog != nil {
+ c.tracelog.Printf(format, args...)
+ }
+}
+
+// dumpRequest dumps the given HTTP request to the trace log.
+func (c *Client) dumpRequest(r *http.Request) {
+ if c.tracelog != nil {
+ out, err := httputil.DumpRequestOut(r, true)
+ if err == nil {
+ c.tracef("%s\n", string(out))
+ }
+ }
+}
+
+// dumpResponse dumps the given HTTP response to the trace log.
+func (c *Client) dumpResponse(resp *http.Response) {
+ if c.tracelog != nil {
+ out, err := httputil.DumpResponse(resp, true)
+ if err == nil {
+ c.tracef("%s\n", string(out))
+ }
+ }
+}
+
+// sniffer periodically runs sniff.
+func (c *Client) sniffer() {
+ c.mu.RLock()
+ timeout := c.snifferTimeout
+ interval := c.snifferInterval
+ c.mu.RUnlock()
+
+ ticker := time.NewTicker(interval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-c.snifferStop:
+ // we are asked to stop, so we signal back that we're stopping now
+ c.snifferStop <- true
+ return
+ case <-ticker.C:
+ c.sniff(timeout)
+ }
+ }
+}
+
+// sniff uses the Node Info API to return the list of nodes in the cluster.
+// It uses the list of URLs passed on startup plus the list of URLs found
+// by the preceding sniffing process (if sniffing is enabled).
+//
+// If sniffing is disabled, this is a no-op.
+func (c *Client) sniff(timeout time.Duration) error {
+ c.mu.RLock()
+ if !c.snifferEnabled {
+ c.mu.RUnlock()
+ return nil
+ }
+
+ // Use all available URLs provided to sniff the cluster.
+ var urls []string
+ urlsMap := make(map[string]bool)
+
+ // Add all URLs provided on startup
+ for _, url := range c.urls {
+ urlsMap[url] = true
+ urls = append(urls, url)
+ }
+ c.mu.RUnlock()
+
+ // Add all URLs found by sniffing
+ c.connsMu.RLock()
+ for _, conn := range c.conns {
+ if !conn.IsDead() {
+ url := conn.URL()
+ if _, found := urlsMap[url]; !found {
+ urls = append(urls, url)
+ }
+ }
+ }
+ c.connsMu.RUnlock()
+
+ if len(urls) == 0 {
+ return errors.Wrap(ErrNoClient, "no URLs found")
+ }
+
+ // Start sniffing on all found URLs
+ ch := make(chan []*conn, len(urls))
+
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+
+ for _, url := range urls {
+ go func(url string) { ch <- c.sniffNode(ctx, url) }(url)
+ }
+
+ // Wait for the results to come back, or the process times out.
+ for {
+ select {
+ case conns := <-ch:
+ if len(conns) > 0 {
+ c.updateConns(conns)
+ return nil
+ }
+ case <-ctx.Done():
+ // We get here if no cluster responds in time
+ return errors.Wrap(ErrNoClient, "sniff timeout")
+ }
+ }
+}
+
+// sniffNode sniffs a single node. This method is run as a goroutine
+// in sniff. If successful, it returns the list of node URLs extracted
+// from the result of calling Nodes Info API. Otherwise, an empty array
+// is returned.
+func (c *Client) sniffNode(ctx context.Context, url string) []*conn {
+ var nodes []*conn
+
+ // Call the Nodes Info API at /_nodes/http
+ req, err := NewRequest("GET", url+"/_nodes/http")
+ if err != nil {
+ return nodes
+ }
+
+ c.mu.RLock()
+ if c.basicAuth {
+ req.SetBasicAuth(c.basicAuthUsername, c.basicAuthPassword)
+ }
+ c.mu.RUnlock()
+
+ res, err := c.c.Do((*http.Request)(req).WithContext(ctx))
+ if err != nil {
+ return nodes
+ }
+ if res == nil {
+ return nodes
+ }
+
+ if res.Body != nil {
+ defer res.Body.Close()
+ }
+
+ var info NodesInfoResponse
+ if err := json.NewDecoder(res.Body).Decode(&info); err == nil {
+ if len(info.Nodes) > 0 {
+ for nodeID, node := range info.Nodes {
+ if c.snifferCallback(node) {
+ if node.HTTP != nil && len(node.HTTP.PublishAddress) > 0 {
+ url := c.extractHostname(c.scheme, node.HTTP.PublishAddress)
+ if url != "" {
+ nodes = append(nodes, newConn(nodeID, url))
+ }
+ }
+ }
+ }
+ }
+ }
+ return nodes
+}
+
+// reSniffHostAndPort is used to extract hostname and port from a result
+// from a Nodes Info API (example: "inet[/127.0.0.1:9200]").
+var reSniffHostAndPort = regexp.MustCompile(`\/([^:]*):([0-9]+)\]`)
+
+func (c *Client) extractHostname(scheme, address string) string {
+ if strings.HasPrefix(address, "inet") {
+ m := reSniffHostAndPort.FindStringSubmatch(address)
+ if len(m) == 3 {
+ return fmt.Sprintf("%s://%s:%s", scheme, m[1], m[2])
+ }
+ }
+ s := address
+ if idx := strings.Index(s, "/"); idx >= 0 {
+ s = s[idx+1:]
+ }
+ if strings.Index(s, ":") < 0 {
+ return ""
+ }
+ return fmt.Sprintf("%s://%s", scheme, s)
+}
+
+// updateConns updates the clients' connections with new information
+// gather by a sniff operation.
+func (c *Client) updateConns(conns []*conn) {
+ c.connsMu.Lock()
+
+ // Build up new connections:
+ // If we find an existing connection, use that (including no. of failures etc.).
+ // If we find a new connection, add it.
+ var newConns []*conn
+ for _, conn := range conns {
+ var found bool
+ for _, oldConn := range c.conns {
+ if oldConn.NodeID() == conn.NodeID() {
+ // Take over the old connection
+ newConns = append(newConns, oldConn)
+ found = true
+ break
+ }
+ }
+ if !found {
+ // New connection didn't exist, so add it to our list of new conns.
+ c.infof("elastic: %s joined the cluster", conn.URL())
+ newConns = append(newConns, conn)
+ }
+ }
+
+ c.conns = newConns
+ c.cindex = -1
+ c.connsMu.Unlock()
+}
+
+// healthchecker periodically runs healthcheck.
+func (c *Client) healthchecker() {
+ c.mu.RLock()
+ timeout := c.healthcheckTimeout
+ interval := c.healthcheckInterval
+ c.mu.RUnlock()
+
+ ticker := time.NewTicker(interval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-c.healthcheckStop:
+ // we are asked to stop, so we signal back that we're stopping now
+ c.healthcheckStop <- true
+ return
+ case <-ticker.C:
+ c.healthcheck(timeout, false)
+ }
+ }
+}
+
+// healthcheck does a health check on all nodes in the cluster. Depending on
+// the node state, it marks connections as dead, sets them alive etc.
+// If healthchecks are disabled and force is false, this is a no-op.
+// The timeout specifies how long to wait for a response from Elasticsearch.
+func (c *Client) healthcheck(timeout time.Duration, force bool) {
+ c.mu.RLock()
+ if !c.healthcheckEnabled && !force {
+ c.mu.RUnlock()
+ return
+ }
+ basicAuth := c.basicAuth
+ basicAuthUsername := c.basicAuthUsername
+ basicAuthPassword := c.basicAuthPassword
+ c.mu.RUnlock()
+
+ c.connsMu.RLock()
+ conns := c.conns
+ c.connsMu.RUnlock()
+
+ for _, conn := range conns {
+ // Run the HEAD request against ES with a timeout
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+
+ // Goroutine executes the HTTP request, returns an error and sets status
+ var status int
+ errc := make(chan error, 1)
+ go func(url string) {
+ req, err := NewRequest("HEAD", url)
+ if err != nil {
+ errc <- err
+ return
+ }
+ if basicAuth {
+ req.SetBasicAuth(basicAuthUsername, basicAuthPassword)
+ }
+ res, err := c.c.Do((*http.Request)(req).WithContext(ctx))
+ if res != nil {
+ status = res.StatusCode
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ }
+ errc <- err
+ }(conn.URL())
+
+ // Wait for the Goroutine (or its timeout)
+ select {
+ case <-ctx.Done(): // timeout
+ c.errorf("elastic: %s is dead", conn.URL())
+ conn.MarkAsDead()
+ case err := <-errc:
+ if err != nil {
+ c.errorf("elastic: %s is dead", conn.URL())
+ conn.MarkAsDead()
+ break
+ }
+ if status >= 200 && status < 300 {
+ conn.MarkAsAlive()
+ } else {
+ conn.MarkAsDead()
+ c.errorf("elastic: %s is dead [status=%d]", conn.URL(), status)
+ }
+ }
+ }
+}
+
+// startupHealthcheck is used at startup to check if the server is available
+// at all.
+func (c *Client) startupHealthcheck(timeout time.Duration) error {
+ c.mu.Lock()
+ urls := c.urls
+ basicAuth := c.basicAuth
+ basicAuthUsername := c.basicAuthUsername
+ basicAuthPassword := c.basicAuthPassword
+ c.mu.Unlock()
+
+ // If we don't get a connection after "timeout", we bail.
+ var lastErr error
+ start := time.Now()
+ for {
+ // Make a copy of the HTTP client provided via options to respect
+ // settings like Basic Auth or a user-specified http.Transport.
+ cl := new(http.Client)
+ *cl = *c.c
+ cl.Timeout = timeout
+ for _, url := range urls {
+ req, err := http.NewRequest("HEAD", url, nil)
+ if err != nil {
+ return err
+ }
+ if basicAuth {
+ req.SetBasicAuth(basicAuthUsername, basicAuthPassword)
+ }
+ res, err := cl.Do(req)
+ if err == nil && res != nil && res.StatusCode >= 200 && res.StatusCode < 300 {
+ return nil
+ } else if err != nil {
+ lastErr = err
+ }
+ }
+ time.Sleep(1 * time.Second)
+ if time.Now().Sub(start) > timeout {
+ break
+ }
+ }
+ if lastErr != nil {
+ return errors.Wrapf(ErrNoClient, "health check timeout: %v", lastErr)
+ }
+ return errors.Wrap(ErrNoClient, "health check timeout")
+}
+
+// next returns the next available connection, or ErrNoClient.
+func (c *Client) next() (*conn, error) {
+ // We do round-robin here.
+ // TODO(oe) This should be a pluggable strategy, like the Selector in the official clients.
+ c.connsMu.Lock()
+ defer c.connsMu.Unlock()
+
+ i := 0
+ numConns := len(c.conns)
+ for {
+ i++
+ if i > numConns {
+ break // we visited all conns: they all seem to be dead
+ }
+ c.cindex++
+ if c.cindex >= numConns {
+ c.cindex = 0
+ }
+ conn := c.conns[c.cindex]
+ if !conn.IsDead() {
+ return conn, nil
+ }
+ }
+
+ // We have a deadlock here: All nodes are marked as dead.
+ // If sniffing is disabled, connections will never be marked alive again.
+ // So we are marking them as alive--if sniffing is disabled.
+ // They'll then be picked up in the next call to PerformRequest.
+ if !c.snifferEnabled {
+ c.errorf("elastic: all %d nodes marked as dead; resurrecting them to prevent deadlock", len(c.conns))
+ for _, conn := range c.conns {
+ conn.MarkAsAlive()
+ }
+ }
+
+ // We tried hard, but there is no node available
+ return nil, errors.Wrap(ErrNoClient, "no available connection")
+}
+
+// mustActiveConn returns nil if there is an active connection,
+// otherwise ErrNoClient is returned.
+func (c *Client) mustActiveConn() error {
+ c.connsMu.Lock()
+ defer c.connsMu.Unlock()
+
+ for _, c := range c.conns {
+ if !c.IsDead() {
+ return nil
+ }
+ }
+ return errors.Wrap(ErrNoClient, "no active connection found")
+}
+
+// -- PerformRequest --
+
+// PerformRequestOptions must be passed into PerformRequest.
+type PerformRequestOptions struct {
+ Method string
+ Path string
+ Params url.Values
+ Body interface{}
+ ContentType string
+ IgnoreErrors []int
+ Retrier Retrier
+}
+
+// PerformRequest does a HTTP request to Elasticsearch.
+// It returns a response (which might be nil) and an error on failure.
+//
+// Optionally, a list of HTTP error codes to ignore can be passed.
+// This is necessary for services that expect e.g. HTTP status 404 as a
+// valid outcome (Exists, IndicesExists, IndicesTypeExists).
+func (c *Client) PerformRequest(ctx context.Context, opt PerformRequestOptions) (*Response, error) {
+ start := time.Now().UTC()
+
+ c.mu.RLock()
+ timeout := c.healthcheckTimeout
+ basicAuth := c.basicAuth
+ basicAuthUsername := c.basicAuthUsername
+ basicAuthPassword := c.basicAuthPassword
+ sendGetBodyAs := c.sendGetBodyAs
+ retrier := c.retrier
+ if opt.Retrier != nil {
+ retrier = opt.Retrier
+ }
+ c.mu.RUnlock()
+
+ var err error
+ var conn *conn
+ var req *Request
+ var resp *Response
+ var retried bool
+ var n int
+
+ // Change method if sendGetBodyAs is specified.
+ if opt.Method == "GET" && opt.Body != nil && sendGetBodyAs != "GET" {
+ opt.Method = sendGetBodyAs
+ }
+
+ for {
+ pathWithParams := opt.Path
+ if len(opt.Params) > 0 {
+ pathWithParams += "?" + opt.Params.Encode()
+ }
+
+ // Get a connection
+ conn, err = c.next()
+ if errors.Cause(err) == ErrNoClient {
+ n++
+ if !retried {
+ // Force a healtcheck as all connections seem to be dead.
+ c.healthcheck(timeout, false)
+ }
+ wait, ok, rerr := retrier.Retry(ctx, n, nil, nil, err)
+ if rerr != nil {
+ return nil, rerr
+ }
+ if !ok {
+ return nil, err
+ }
+ retried = true
+ time.Sleep(wait)
+ continue // try again
+ }
+ if err != nil {
+ c.errorf("elastic: cannot get connection from pool")
+ return nil, err
+ }
+
+ req, err = NewRequest(opt.Method, conn.URL()+pathWithParams)
+ if err != nil {
+ c.errorf("elastic: cannot create request for %s %s: %v", strings.ToUpper(opt.Method), conn.URL()+pathWithParams, err)
+ return nil, err
+ }
+
+ if basicAuth {
+ req.SetBasicAuth(basicAuthUsername, basicAuthPassword)
+ }
+ if opt.ContentType != "" {
+ req.Header.Set("Content-Type", opt.ContentType)
+ }
+
+ // Set body
+ if opt.Body != nil {
+ err = req.SetBody(opt.Body)
+ if err != nil {
+ c.errorf("elastic: couldn't set body %+v for request: %v", opt.Body, err)
+ return nil, err
+ }
+ }
+
+ // Tracing
+ c.dumpRequest((*http.Request)(req))
+
+ // Get response
+ res, err := c.c.Do((*http.Request)(req).WithContext(ctx))
+ if err == context.Canceled || err == context.DeadlineExceeded {
+ // Proceed, but don't mark the node as dead
+ return nil, err
+ }
+ if ue, ok := err.(*url.Error); ok {
+ // This happens e.g. on redirect errors, see https://golang.org/src/net/http/client_test.go#L329
+ if ue.Err == context.Canceled || ue.Err == context.DeadlineExceeded {
+ // Proceed, but don't mark the node as dead
+ return nil, err
+ }
+ }
+ if err != nil {
+ n++
+ wait, ok, rerr := retrier.Retry(ctx, n, (*http.Request)(req), res, err)
+ if rerr != nil {
+ c.errorf("elastic: %s is dead", conn.URL())
+ conn.MarkAsDead()
+ return nil, rerr
+ }
+ if !ok {
+ c.errorf("elastic: %s is dead", conn.URL())
+ conn.MarkAsDead()
+ return nil, err
+ }
+ retried = true
+ time.Sleep(wait)
+ continue // try again
+ }
+ if res.Body != nil {
+ defer res.Body.Close()
+ }
+
+ // Tracing
+ c.dumpResponse(res)
+
+ // Log deprecation warnings as errors
+ if s := res.Header.Get("Warning"); s != "" {
+ c.errorf(s)
+ }
+
+ // Check for errors
+ if err := checkResponse((*http.Request)(req), res, opt.IgnoreErrors...); err != nil {
+ // No retry if request succeeded
+ // We still try to return a response.
+ resp, _ = c.newResponse(res)
+ return resp, err
+ }
+
+ // We successfully made a request with this connection
+ conn.MarkAsHealthy()
+
+ resp, err = c.newResponse(res)
+ if err != nil {
+ return nil, err
+ }
+
+ break
+ }
+
+ duration := time.Now().UTC().Sub(start)
+ c.infof("%s %s [status:%d, request:%.3fs]",
+ strings.ToUpper(opt.Method),
+ req.URL,
+ resp.StatusCode,
+ float64(int64(duration/time.Millisecond))/1000)
+
+ return resp, nil
+}
+
+// -- Document APIs --
+
+// Index a document.
+func (c *Client) Index() *IndexService {
+ return NewIndexService(c)
+}
+
+// Get a document.
+func (c *Client) Get() *GetService {
+ return NewGetService(c)
+}
+
+// MultiGet retrieves multiple documents in one roundtrip.
+func (c *Client) MultiGet() *MgetService {
+ return NewMgetService(c)
+}
+
+// Mget retrieves multiple documents in one roundtrip.
+func (c *Client) Mget() *MgetService {
+ return NewMgetService(c)
+}
+
+// Delete a document.
+func (c *Client) Delete() *DeleteService {
+ return NewDeleteService(c)
+}
+
+// DeleteByQuery deletes documents as found by a query.
+func (c *Client) DeleteByQuery(indices ...string) *DeleteByQueryService {
+ return NewDeleteByQueryService(c).Index(indices...)
+}
+
+// Update a document.
+func (c *Client) Update() *UpdateService {
+ return NewUpdateService(c)
+}
+
+// UpdateByQuery performs an update on a set of documents.
+func (c *Client) UpdateByQuery(indices ...string) *UpdateByQueryService {
+ return NewUpdateByQueryService(c).Index(indices...)
+}
+
+// Bulk is the entry point to mass insert/update/delete documents.
+func (c *Client) Bulk() *BulkService {
+ return NewBulkService(c)
+}
+
+// BulkProcessor allows setting up a concurrent processor of bulk requests.
+func (c *Client) BulkProcessor() *BulkProcessorService {
+ return NewBulkProcessorService(c)
+}
+
+// Reindex copies data from a source index into a destination index.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-reindex.html
+// for details on the Reindex API.
+func (c *Client) Reindex() *ReindexService {
+ return NewReindexService(c)
+}
+
+// TermVectors returns information and statistics on terms in the fields
+// of a particular document.
+func (c *Client) TermVectors(index, typ string) *TermvectorsService {
+ builder := NewTermvectorsService(c)
+ builder = builder.Index(index).Type(typ)
+ return builder
+}
+
+// MultiTermVectors returns information and statistics on terms in the fields
+// of multiple documents.
+func (c *Client) MultiTermVectors() *MultiTermvectorService {
+ return NewMultiTermvectorService(c)
+}
+
+// -- Search APIs --
+
+// Search is the entry point for searches.
+func (c *Client) Search(indices ...string) *SearchService {
+ return NewSearchService(c).Index(indices...)
+}
+
+// MultiSearch is the entry point for multi searches.
+func (c *Client) MultiSearch() *MultiSearchService {
+ return NewMultiSearchService(c)
+}
+
+// Count documents.
+func (c *Client) Count(indices ...string) *CountService {
+ return NewCountService(c).Index(indices...)
+}
+
+// Explain computes a score explanation for a query and a specific document.
+func (c *Client) Explain(index, typ, id string) *ExplainService {
+ return NewExplainService(c).Index(index).Type(typ).Id(id)
+}
+
+// TODO Search Template
+// TODO Search Shards API
+// TODO Search Exists API
+// TODO Validate API
+
+// FieldCaps returns statistical information about fields in indices.
+func (c *Client) FieldCaps(indices ...string) *FieldCapsService {
+ return NewFieldCapsService(c).Index(indices...)
+}
+
+// Exists checks if a document exists.
+func (c *Client) Exists() *ExistsService {
+ return NewExistsService(c)
+}
+
+// Scroll through documents. Use this to efficiently scroll through results
+// while returning the results to a client.
+func (c *Client) Scroll(indices ...string) *ScrollService {
+ return NewScrollService(c).Index(indices...)
+}
+
+// ClearScroll can be used to clear search contexts manually.
+func (c *Client) ClearScroll(scrollIds ...string) *ClearScrollService {
+ return NewClearScrollService(c).ScrollId(scrollIds...)
+}
+
+// -- Indices APIs --
+
+// CreateIndex returns a service to create a new index.
+func (c *Client) CreateIndex(name string) *IndicesCreateService {
+ return NewIndicesCreateService(c).Index(name)
+}
+
+// DeleteIndex returns a service to delete an index.
+func (c *Client) DeleteIndex(indices ...string) *IndicesDeleteService {
+ return NewIndicesDeleteService(c).Index(indices)
+}
+
+// IndexExists allows to check if an index exists.
+func (c *Client) IndexExists(indices ...string) *IndicesExistsService {
+ return NewIndicesExistsService(c).Index(indices)
+}
+
+// ShrinkIndex returns a service to shrink one index into another.
+func (c *Client) ShrinkIndex(source, target string) *IndicesShrinkService {
+ return NewIndicesShrinkService(c).Source(source).Target(target)
+}
+
+// RolloverIndex rolls an alias over to a new index when the existing index
+// is considered to be too large or too old.
+func (c *Client) RolloverIndex(alias string) *IndicesRolloverService {
+ return NewIndicesRolloverService(c).Alias(alias)
+}
+
+// TypeExists allows to check if one or more types exist in one or more indices.
+func (c *Client) TypeExists() *IndicesExistsTypeService {
+ return NewIndicesExistsTypeService(c)
+}
+
+// IndexStats provides statistics on different operations happining
+// in one or more indices.
+func (c *Client) IndexStats(indices ...string) *IndicesStatsService {
+ return NewIndicesStatsService(c).Index(indices...)
+}
+
+// OpenIndex opens an index.
+func (c *Client) OpenIndex(name string) *IndicesOpenService {
+ return NewIndicesOpenService(c).Index(name)
+}
+
+// CloseIndex closes an index.
+func (c *Client) CloseIndex(name string) *IndicesCloseService {
+ return NewIndicesCloseService(c).Index(name)
+}
+
+// IndexGet retrieves information about one or more indices.
+// IndexGet is only available for Elasticsearch 1.4 or later.
+func (c *Client) IndexGet(indices ...string) *IndicesGetService {
+ return NewIndicesGetService(c).Index(indices...)
+}
+
+// IndexGetSettings retrieves settings of all, one or more indices.
+func (c *Client) IndexGetSettings(indices ...string) *IndicesGetSettingsService {
+ return NewIndicesGetSettingsService(c).Index(indices...)
+}
+
+// IndexPutSettings sets settings for all, one or more indices.
+func (c *Client) IndexPutSettings(indices ...string) *IndicesPutSettingsService {
+ return NewIndicesPutSettingsService(c).Index(indices...)
+}
+
+// IndexSegments retrieves low level segment information for all, one or more indices.
+func (c *Client) IndexSegments(indices ...string) *IndicesSegmentsService {
+ return NewIndicesSegmentsService(c).Index(indices...)
+}
+
+// IndexAnalyze performs the analysis process on a text and returns the
+// token breakdown of the text.
+func (c *Client) IndexAnalyze() *IndicesAnalyzeService {
+ return NewIndicesAnalyzeService(c)
+}
+
+// Forcemerge optimizes one or more indices.
+// It replaces the deprecated Optimize API.
+func (c *Client) Forcemerge(indices ...string) *IndicesForcemergeService {
+ return NewIndicesForcemergeService(c).Index(indices...)
+}
+
+// Refresh asks Elasticsearch to refresh one or more indices.
+func (c *Client) Refresh(indices ...string) *RefreshService {
+ return NewRefreshService(c).Index(indices...)
+}
+
+// Flush asks Elasticsearch to free memory from the index and
+// flush data to disk.
+func (c *Client) Flush(indices ...string) *IndicesFlushService {
+ return NewIndicesFlushService(c).Index(indices...)
+}
+
+// Alias enables the caller to add and/or remove aliases.
+func (c *Client) Alias() *AliasService {
+ return NewAliasService(c)
+}
+
+// Aliases returns aliases by index name(s).
+func (c *Client) Aliases() *AliasesService {
+ return NewAliasesService(c)
+}
+
+// IndexGetTemplate gets an index template.
+// Use XXXTemplate funcs to manage search templates.
+func (c *Client) IndexGetTemplate(names ...string) *IndicesGetTemplateService {
+ return NewIndicesGetTemplateService(c).Name(names...)
+}
+
+// IndexTemplateExists gets check if an index template exists.
+// Use XXXTemplate funcs to manage search templates.
+func (c *Client) IndexTemplateExists(name string) *IndicesExistsTemplateService {
+ return NewIndicesExistsTemplateService(c).Name(name)
+}
+
+// IndexPutTemplate creates or updates an index template.
+// Use XXXTemplate funcs to manage search templates.
+func (c *Client) IndexPutTemplate(name string) *IndicesPutTemplateService {
+ return NewIndicesPutTemplateService(c).Name(name)
+}
+
+// IndexDeleteTemplate deletes an index template.
+// Use XXXTemplate funcs to manage search templates.
+func (c *Client) IndexDeleteTemplate(name string) *IndicesDeleteTemplateService {
+ return NewIndicesDeleteTemplateService(c).Name(name)
+}
+
+// GetMapping gets a mapping.
+func (c *Client) GetMapping() *IndicesGetMappingService {
+ return NewIndicesGetMappingService(c)
+}
+
+// PutMapping registers a mapping.
+func (c *Client) PutMapping() *IndicesPutMappingService {
+ return NewIndicesPutMappingService(c)
+}
+
+// GetFieldMapping gets mapping for fields.
+func (c *Client) GetFieldMapping() *IndicesGetFieldMappingService {
+ return NewIndicesGetFieldMappingService(c)
+}
+
+// -- cat APIs --
+
+// TODO cat aliases
+// TODO cat allocation
+// TODO cat count
+// TODO cat fielddata
+// TODO cat health
+// TODO cat indices
+// TODO cat master
+// TODO cat nodes
+// TODO cat pending tasks
+// TODO cat plugins
+// TODO cat recovery
+// TODO cat thread pool
+// TODO cat shards
+// TODO cat segments
+
+// -- Ingest APIs --
+
+// IngestPutPipeline adds pipelines and updates existing pipelines in
+// the cluster.
+func (c *Client) IngestPutPipeline(id string) *IngestPutPipelineService {
+ return NewIngestPutPipelineService(c).Id(id)
+}
+
+// IngestGetPipeline returns pipelines based on ID.
+func (c *Client) IngestGetPipeline(ids ...string) *IngestGetPipelineService {
+ return NewIngestGetPipelineService(c).Id(ids...)
+}
+
+// IngestDeletePipeline deletes a pipeline by ID.
+func (c *Client) IngestDeletePipeline(id string) *IngestDeletePipelineService {
+ return NewIngestDeletePipelineService(c).Id(id)
+}
+
+// IngestSimulatePipeline executes a specific pipeline against the set of
+// documents provided in the body of the request.
+func (c *Client) IngestSimulatePipeline() *IngestSimulatePipelineService {
+ return NewIngestSimulatePipelineService(c)
+}
+
+// -- Cluster APIs --
+
+// ClusterHealth retrieves the health of the cluster.
+func (c *Client) ClusterHealth() *ClusterHealthService {
+ return NewClusterHealthService(c)
+}
+
+// ClusterState retrieves the state of the cluster.
+func (c *Client) ClusterState() *ClusterStateService {
+ return NewClusterStateService(c)
+}
+
+// ClusterStats retrieves cluster statistics.
+func (c *Client) ClusterStats() *ClusterStatsService {
+ return NewClusterStatsService(c)
+}
+
+// NodesInfo retrieves one or more or all of the cluster nodes information.
+func (c *Client) NodesInfo() *NodesInfoService {
+ return NewNodesInfoService(c)
+}
+
+// NodesStats retrieves one or more or all of the cluster nodes statistics.
+func (c *Client) NodesStats() *NodesStatsService {
+ return NewNodesStatsService(c)
+}
+
+// TasksCancel cancels tasks running on the specified nodes.
+func (c *Client) TasksCancel() *TasksCancelService {
+ return NewTasksCancelService(c)
+}
+
+// TasksList retrieves the list of tasks running on the specified nodes.
+func (c *Client) TasksList() *TasksListService {
+ return NewTasksListService(c)
+}
+
+// TasksGetTask retrieves a task running on the cluster.
+func (c *Client) TasksGetTask() *TasksGetTaskService {
+ return NewTasksGetTaskService(c)
+}
+
+// TODO Pending cluster tasks
+// TODO Cluster Reroute
+// TODO Cluster Update Settings
+// TODO Nodes Stats
+// TODO Nodes hot_threads
+
+// -- Snapshot and Restore --
+
+// TODO Snapshot Delete
+// TODO Snapshot Get
+// TODO Snapshot Restore
+// TODO Snapshot Status
+
+// SnapshotCreate creates a snapshot.
+func (c *Client) SnapshotCreate(repository string, snapshot string) *SnapshotCreateService {
+ return NewSnapshotCreateService(c).Repository(repository).Snapshot(snapshot)
+}
+
+// SnapshotCreateRepository creates or updates a snapshot repository.
+func (c *Client) SnapshotCreateRepository(repository string) *SnapshotCreateRepositoryService {
+ return NewSnapshotCreateRepositoryService(c).Repository(repository)
+}
+
+// SnapshotDeleteRepository deletes a snapshot repository.
+func (c *Client) SnapshotDeleteRepository(repositories ...string) *SnapshotDeleteRepositoryService {
+ return NewSnapshotDeleteRepositoryService(c).Repository(repositories...)
+}
+
+// SnapshotGetRepository gets a snapshot repository.
+func (c *Client) SnapshotGetRepository(repositories ...string) *SnapshotGetRepositoryService {
+ return NewSnapshotGetRepositoryService(c).Repository(repositories...)
+}
+
+// SnapshotVerifyRepository verifies a snapshot repository.
+func (c *Client) SnapshotVerifyRepository(repository string) *SnapshotVerifyRepositoryService {
+ return NewSnapshotVerifyRepositoryService(c).Repository(repository)
+}
+
+// -- Helpers and shortcuts --
+
+// ElasticsearchVersion returns the version number of Elasticsearch
+// running on the given URL.
+func (c *Client) ElasticsearchVersion(url string) (string, error) {
+ res, _, err := c.Ping(url).Do(context.Background())
+ if err != nil {
+ return "", err
+ }
+ return res.Version.Number, nil
+}
+
+// IndexNames returns the names of all indices in the cluster.
+func (c *Client) IndexNames() ([]string, error) {
+ res, err := c.IndexGetSettings().Index("_all").Do(context.Background())
+ if err != nil {
+ return nil, err
+ }
+ var names []string
+ for name := range res {
+ names = append(names, name)
+ }
+ return names, nil
+}
+
+// Ping checks if a given node in a cluster exists and (optionally)
+// returns some basic information about the Elasticsearch server,
+// e.g. the Elasticsearch version number.
+//
+// Notice that you need to specify a URL here explicitly.
+func (c *Client) Ping(url string) *PingService {
+ return NewPingService(c).URL(url)
+}
+
+// WaitForStatus waits for the cluster to have the given status.
+// This is a shortcut method for the ClusterHealth service.
+//
+// WaitForStatus waits for the specified timeout, e.g. "10s".
+// If the cluster will have the given state within the timeout, nil is returned.
+// If the request timed out, ErrTimeout is returned.
+func (c *Client) WaitForStatus(status string, timeout string) error {
+ health, err := c.ClusterHealth().WaitForStatus(status).Timeout(timeout).Do(context.Background())
+ if err != nil {
+ return err
+ }
+ if health.TimedOut {
+ return ErrTimeout
+ }
+ return nil
+}
+
+// WaitForGreenStatus waits for the cluster to have the "green" status.
+// See WaitForStatus for more details.
+func (c *Client) WaitForGreenStatus(timeout string) error {
+ return c.WaitForStatus("green", timeout)
+}
+
+// WaitForYellowStatus waits for the cluster to have the "yellow" status.
+// See WaitForStatus for more details.
+func (c *Client) WaitForYellowStatus(timeout string) error {
+ return c.WaitForStatus("yellow", timeout)
+}
+
+// IsConnError unwraps the given error value and checks if it is equal to
+// elastic.ErrNoClient.
+func IsConnErr(err error) bool {
+ return errors.Cause(err) == ErrNoClient
+}
diff --git a/vendor/github.com/olivere/elastic/client_test.go b/vendor/github.com/olivere/elastic/client_test.go
new file mode 100644
index 000000000..4d0440ee0
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/client_test.go
@@ -0,0 +1,1319 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log"
+ "net"
+ "net/http"
+ "reflect"
+ "regexp"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/fortytw2/leaktest"
+)
+
+func findConn(s string, slice ...*conn) (int, bool) {
+ for i, t := range slice {
+ if s == t.URL() {
+ return i, true
+ }
+ }
+ return -1, false
+}
+
+// -- NewClient --
+
+func TestClientDefaults(t *testing.T) {
+ client, err := NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if client.healthcheckEnabled != true {
+ t.Errorf("expected health checks to be enabled, got: %v", client.healthcheckEnabled)
+ }
+ if client.healthcheckTimeoutStartup != DefaultHealthcheckTimeoutStartup {
+ t.Errorf("expected health checks timeout on startup = %v, got: %v", DefaultHealthcheckTimeoutStartup, client.healthcheckTimeoutStartup)
+ }
+ if client.healthcheckTimeout != DefaultHealthcheckTimeout {
+ t.Errorf("expected health checks timeout = %v, got: %v", DefaultHealthcheckTimeout, client.healthcheckTimeout)
+ }
+ if client.healthcheckInterval != DefaultHealthcheckInterval {
+ t.Errorf("expected health checks interval = %v, got: %v", DefaultHealthcheckInterval, client.healthcheckInterval)
+ }
+ if client.snifferEnabled != true {
+ t.Errorf("expected sniffing to be enabled, got: %v", client.snifferEnabled)
+ }
+ if client.snifferTimeoutStartup != DefaultSnifferTimeoutStartup {
+ t.Errorf("expected sniffer timeout on startup = %v, got: %v", DefaultSnifferTimeoutStartup, client.snifferTimeoutStartup)
+ }
+ if client.snifferTimeout != DefaultSnifferTimeout {
+ t.Errorf("expected sniffer timeout = %v, got: %v", DefaultSnifferTimeout, client.snifferTimeout)
+ }
+ if client.snifferInterval != DefaultSnifferInterval {
+ t.Errorf("expected sniffer interval = %v, got: %v", DefaultSnifferInterval, client.snifferInterval)
+ }
+ if client.basicAuth != false {
+ t.Errorf("expected no basic auth; got: %v", client.basicAuth)
+ }
+ if client.basicAuthUsername != "" {
+ t.Errorf("expected no basic auth username; got: %q", client.basicAuthUsername)
+ }
+ if client.basicAuthPassword != "" {
+ t.Errorf("expected no basic auth password; got: %q", client.basicAuthUsername)
+ }
+ if client.sendGetBodyAs != "GET" {
+ t.Errorf("expected sendGetBodyAs to be GET; got: %q", client.sendGetBodyAs)
+ }
+}
+
+func TestClientWithoutURL(t *testing.T) {
+ client, err := NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Two things should happen here:
+ // 1. The client starts sniffing the cluster on DefaultURL
+ // 2. The sniffing process should find (at least) one node in the cluster, i.e. the DefaultURL
+ if len(client.conns) == 0 {
+ t.Fatalf("expected at least 1 node in the cluster, got: %d (%v)", len(client.conns), client.conns)
+ }
+ if !isTravis() {
+ if _, found := findConn(DefaultURL, client.conns...); !found {
+ t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns)
+ }
+ }
+}
+
+func TestClientWithSingleURL(t *testing.T) {
+ client, err := NewClient(SetURL("http://127.0.0.1:9200"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Two things should happen here:
+ // 1. The client starts sniffing the cluster on DefaultURL
+ // 2. The sniffing process should find (at least) one node in the cluster, i.e. the DefaultURL
+ if len(client.conns) == 0 {
+ t.Fatalf("expected at least 1 node in the cluster, got: %d (%v)", len(client.conns), client.conns)
+ }
+ if !isTravis() {
+ if _, found := findConn(DefaultURL, client.conns...); !found {
+ t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns)
+ }
+ }
+}
+
+func TestClientWithMultipleURLs(t *testing.T) {
+ client, err := NewClient(SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // The client should sniff both URLs, but only 127.0.0.1:9200 should return nodes.
+ if len(client.conns) != 1 {
+ t.Fatalf("expected exactly 1 node in the local cluster, got: %d (%v)", len(client.conns), client.conns)
+ }
+ if !isTravis() {
+ if client.conns[0].URL() != DefaultURL {
+ t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns)
+ }
+ }
+}
+
+func TestClientWithBasicAuth(t *testing.T) {
+ client, err := NewClient(SetBasicAuth("user", "secret"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if client.basicAuth != true {
+ t.Errorf("expected basic auth; got: %v", client.basicAuth)
+ }
+ if got, want := client.basicAuthUsername, "user"; got != want {
+ t.Errorf("expected basic auth username %q; got: %q", want, got)
+ }
+ if got, want := client.basicAuthPassword, "secret"; got != want {
+ t.Errorf("expected basic auth password %q; got: %q", want, got)
+ }
+}
+
+func TestClientWithBasicAuthInUserInfo(t *testing.T) {
+ client, err := NewClient(SetURL("http://user1:secret1@localhost:9200", "http://user2:secret2@localhost:9200"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if client.basicAuth != true {
+ t.Errorf("expected basic auth; got: %v", client.basicAuth)
+ }
+ if got, want := client.basicAuthUsername, "user1"; got != want {
+ t.Errorf("expected basic auth username %q; got: %q", want, got)
+ }
+ if got, want := client.basicAuthPassword, "secret1"; got != want {
+ t.Errorf("expected basic auth password %q; got: %q", want, got)
+ }
+}
+
+func TestClientSniffSuccess(t *testing.T) {
+ client, err := NewClient(SetURL("http://127.0.0.1:19200", "http://127.0.0.1:9200"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // The client should sniff both URLs, but only 127.0.0.1:9200 should return nodes.
+ if len(client.conns) != 1 {
+ t.Fatalf("expected exactly 1 node in the local cluster, got: %d (%v)", len(client.conns), client.conns)
+ }
+}
+
+func TestClientSniffFailure(t *testing.T) {
+ _, err := NewClient(SetURL("http://127.0.0.1:19200", "http://127.0.0.1:19201"))
+ if err == nil {
+ t.Fatalf("expected cluster to fail with no nodes found")
+ }
+}
+
+func TestClientSnifferCallback(t *testing.T) {
+ var calls int
+ cb := func(node *NodesInfoNode) bool {
+ calls++
+ return false
+ }
+ _, err := NewClient(
+ SetURL("http://127.0.0.1:19200", "http://127.0.0.1:9200"),
+ SetSnifferCallback(cb))
+ if err == nil {
+ t.Fatalf("expected cluster to fail with no nodes found")
+ }
+ if calls != 1 {
+ t.Fatalf("expected 1 call to the sniffer callback, got %d", calls)
+ }
+}
+
+func TestClientSniffDisabled(t *testing.T) {
+ client, err := NewClient(SetSniff(false), SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // The client should not sniff, so it should have two connections.
+ if len(client.conns) != 2 {
+ t.Fatalf("expected 2 nodes, got: %d (%v)", len(client.conns), client.conns)
+ }
+ // Make two requests, so that both connections are being used
+ for i := 0; i < len(client.conns); i++ {
+ client.Flush().Do(context.TODO())
+ }
+ // The first connection (127.0.0.1:9200) should now be okay.
+ if i, found := findConn("http://127.0.0.1:9200", client.conns...); !found {
+ t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9200")
+ } else {
+ if conn := client.conns[i]; conn.IsDead() {
+ t.Fatal("expected connection to be alive, but it is dead")
+ }
+ }
+ // The second connection (127.0.0.1:9201) should now be marked as dead.
+ if i, found := findConn("http://127.0.0.1:9201", client.conns...); !found {
+ t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9201")
+ } else {
+ if conn := client.conns[i]; !conn.IsDead() {
+ t.Fatal("expected connection to be dead, but it is alive")
+ }
+ }
+}
+
+func TestClientWillMarkConnectionsAsAliveWhenAllAreDead(t *testing.T) {
+ client, err := NewClient(SetURL("http://127.0.0.1:9201"),
+ SetSniff(false), SetHealthcheck(false), SetMaxRetries(0))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // We should have a connection.
+ if len(client.conns) != 1 {
+ t.Fatalf("expected 1 node, got: %d (%v)", len(client.conns), client.conns)
+ }
+
+ // Make a request, so that the connections is marked as dead.
+ client.Flush().Do(context.TODO())
+
+ // The connection should now be marked as dead.
+ if i, found := findConn("http://127.0.0.1:9201", client.conns...); !found {
+ t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9201")
+ } else {
+ if conn := client.conns[i]; !conn.IsDead() {
+ t.Fatalf("expected connection to be dead, got: %v", conn)
+ }
+ }
+
+ // Now send another request and the connection should be marked as alive again.
+ client.Flush().Do(context.TODO())
+
+ if i, found := findConn("http://127.0.0.1:9201", client.conns...); !found {
+ t.Fatalf("expected connection to %q to be found", "http://127.0.0.1:9201")
+ } else {
+ if conn := client.conns[i]; conn.IsDead() {
+ t.Fatalf("expected connection to be alive, got: %v", conn)
+ }
+ }
+}
+
+func TestClientWithRequiredPlugins(t *testing.T) {
+ _, err := NewClient(SetRequiredPlugins("no-such-plugin"))
+ if err == nil {
+ t.Fatal("expected error when creating client")
+ }
+ if got, want := err.Error(), "elastic: plugin no-such-plugin not found"; got != want {
+ t.Fatalf("expected error %q; got: %q", want, got)
+ }
+}
+
+func TestClientHealthcheckStartupTimeout(t *testing.T) {
+ start := time.Now()
+ _, err := NewClient(SetURL("http://localhost:9299"), SetHealthcheckTimeoutStartup(5*time.Second))
+ duration := time.Now().Sub(start)
+ if !IsConnErr(err) {
+ t.Fatal(err)
+ }
+ if !strings.Contains(err.Error(), "connection refused") {
+ t.Fatalf("expected error to contain %q, have %q", "connection refused", err.Error())
+ }
+ if duration < 5*time.Second {
+ t.Fatalf("expected a timeout in more than 5 seconds; got: %v", duration)
+ }
+}
+
+func TestClientHealthcheckTimeoutLeak(t *testing.T) {
+ // This test test checks if healthcheck requests are canceled
+ // after timeout.
+ // It contains couple of hacks which won't be needed once we
+ // stop supporting Go1.7.
+ // On Go1.7 it uses server side effects to monitor if connection
+ // was closed,
+ // and on Go 1.8+ we're additionally honestly monitoring routine
+ // leaks via leaktest.
+ mux := http.NewServeMux()
+
+ var reqDoneMu sync.Mutex
+ var reqDone bool
+ mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ cn, ok := w.(http.CloseNotifier)
+ if !ok {
+ t.Fatalf("Writer is not CloseNotifier, but %v", reflect.TypeOf(w).Name())
+ }
+ <-cn.CloseNotify()
+ reqDoneMu.Lock()
+ reqDone = true
+ reqDoneMu.Unlock()
+ })
+
+ lis, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("Couldn't setup listener: %v", err)
+ }
+ addr := lis.Addr().String()
+
+ srv := &http.Server{
+ Handler: mux,
+ }
+ go srv.Serve(lis)
+
+ cli := &Client{
+ c: &http.Client{},
+ conns: []*conn{
+ &conn{
+ url: "http://" + addr + "/",
+ },
+ },
+ }
+
+ type closer interface {
+ Shutdown(context.Context) error
+ }
+
+ // pre-Go1.8 Server can't Shutdown
+ cl, isServerCloseable := (interface{}(srv)).(closer)
+
+ // Since Go1.7 can't Shutdown() - there will be leak from server
+ // Monitor leaks on Go 1.8+
+ if isServerCloseable {
+ defer leaktest.CheckTimeout(t, time.Second*10)()
+ }
+
+ cli.healthcheck(time.Millisecond*500, true)
+
+ if isServerCloseable {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+ cl.Shutdown(ctx)
+ }
+
+ <-time.After(time.Second)
+ reqDoneMu.Lock()
+ if !reqDone {
+ reqDoneMu.Unlock()
+ t.Fatal("Request wasn't canceled or stopped")
+ }
+ reqDoneMu.Unlock()
+}
+
+// -- NewSimpleClient --
+
+func TestSimpleClientDefaults(t *testing.T) {
+ client, err := NewSimpleClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if client.healthcheckEnabled != false {
+ t.Errorf("expected health checks to be disabled, got: %v", client.healthcheckEnabled)
+ }
+ if client.healthcheckTimeoutStartup != off {
+ t.Errorf("expected health checks timeout on startup = %v, got: %v", off, client.healthcheckTimeoutStartup)
+ }
+ if client.healthcheckTimeout != off {
+ t.Errorf("expected health checks timeout = %v, got: %v", off, client.healthcheckTimeout)
+ }
+ if client.healthcheckInterval != off {
+ t.Errorf("expected health checks interval = %v, got: %v", off, client.healthcheckInterval)
+ }
+ if client.snifferEnabled != false {
+ t.Errorf("expected sniffing to be disabled, got: %v", client.snifferEnabled)
+ }
+ if client.snifferTimeoutStartup != off {
+ t.Errorf("expected sniffer timeout on startup = %v, got: %v", off, client.snifferTimeoutStartup)
+ }
+ if client.snifferTimeout != off {
+ t.Errorf("expected sniffer timeout = %v, got: %v", off, client.snifferTimeout)
+ }
+ if client.snifferInterval != off {
+ t.Errorf("expected sniffer interval = %v, got: %v", off, client.snifferInterval)
+ }
+ if client.basicAuth != false {
+ t.Errorf("expected no basic auth; got: %v", client.basicAuth)
+ }
+ if client.basicAuthUsername != "" {
+ t.Errorf("expected no basic auth username; got: %q", client.basicAuthUsername)
+ }
+ if client.basicAuthPassword != "" {
+ t.Errorf("expected no basic auth password; got: %q", client.basicAuthUsername)
+ }
+ if client.sendGetBodyAs != "GET" {
+ t.Errorf("expected sendGetBodyAs to be GET; got: %q", client.sendGetBodyAs)
+ }
+}
+
+// -- Start and stop --
+
+func TestClientStartAndStop(t *testing.T) {
+ client, err := NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ running := client.IsRunning()
+ if !running {
+ t.Fatalf("expected background processes to run; got: %v", running)
+ }
+
+ // Stop
+ client.Stop()
+ running = client.IsRunning()
+ if running {
+ t.Fatalf("expected background processes to be stopped; got: %v", running)
+ }
+
+ // Stop again => no-op
+ client.Stop()
+ running = client.IsRunning()
+ if running {
+ t.Fatalf("expected background processes to be stopped; got: %v", running)
+ }
+
+ // Start
+ client.Start()
+ running = client.IsRunning()
+ if !running {
+ t.Fatalf("expected background processes to run; got: %v", running)
+ }
+
+ // Start again => no-op
+ client.Start()
+ running = client.IsRunning()
+ if !running {
+ t.Fatalf("expected background processes to run; got: %v", running)
+ }
+}
+
+func TestClientStartAndStopWithSnifferAndHealthchecksDisabled(t *testing.T) {
+ client, err := NewClient(SetSniff(false), SetHealthcheck(false))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ running := client.IsRunning()
+ if !running {
+ t.Fatalf("expected background processes to run; got: %v", running)
+ }
+
+ // Stop
+ client.Stop()
+ running = client.IsRunning()
+ if running {
+ t.Fatalf("expected background processes to be stopped; got: %v", running)
+ }
+
+ // Stop again => no-op
+ client.Stop()
+ running = client.IsRunning()
+ if running {
+ t.Fatalf("expected background processes to be stopped; got: %v", running)
+ }
+
+ // Start
+ client.Start()
+ running = client.IsRunning()
+ if !running {
+ t.Fatalf("expected background processes to run; got: %v", running)
+ }
+
+ // Start again => no-op
+ client.Start()
+ running = client.IsRunning()
+ if !running {
+ t.Fatalf("expected background processes to run; got: %v", running)
+ }
+}
+
+// -- Sniffing --
+
+func TestClientSniffNode(t *testing.T) {
+ client, err := NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch := make(chan []*conn)
+ go func() { ch <- client.sniffNode(context.Background(), DefaultURL) }()
+
+ select {
+ case nodes := <-ch:
+ if len(nodes) != 1 {
+ t.Fatalf("expected %d nodes; got: %d", 1, len(nodes))
+ }
+ pattern := `http:\/\/[\d\.]+:9200`
+ matched, err := regexp.MatchString(pattern, nodes[0].URL())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !matched {
+ t.Fatalf("expected node URL pattern %q; got: %q", pattern, nodes[0].URL())
+ }
+ case <-time.After(2 * time.Second):
+ t.Fatal("expected no timeout in sniff node")
+ break
+ }
+}
+
+func TestClientSniffOnDefaultURL(t *testing.T) {
+ client, _ := NewClient()
+ if client == nil {
+ t.Fatal("no client returned")
+ }
+
+ ch := make(chan error, 1)
+ go func() {
+ ch <- client.sniff(DefaultSnifferTimeoutStartup)
+ }()
+
+ select {
+ case err := <-ch:
+ if err != nil {
+ t.Fatalf("expected sniff to succeed; got: %v", err)
+ }
+ if len(client.conns) != 1 {
+ t.Fatalf("expected %d nodes; got: %d", 1, len(client.conns))
+ }
+ pattern := `http:\/\/[\d\.]+:9200`
+ matched, err := regexp.MatchString(pattern, client.conns[0].URL())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !matched {
+ t.Fatalf("expected node URL pattern %q; got: %q", pattern, client.conns[0].URL())
+ }
+ case <-time.After(2 * time.Second):
+ t.Fatal("expected no timeout in sniff")
+ break
+ }
+}
+
+func TestClientSniffTimeoutLeak(t *testing.T) {
+ // This test test checks if sniff requests are canceled
+ // after timeout.
+ // It contains couple of hacks which won't be needed once we
+ // stop supporting Go1.7.
+ // On Go1.7 it uses server side effects to monitor if connection
+ // was closed,
+ // and on Go 1.8+ we're additionally honestly monitoring routine
+ // leaks via leaktest.
+ mux := http.NewServeMux()
+
+ var reqDoneMu sync.Mutex
+ var reqDone bool
+ mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ cn, ok := w.(http.CloseNotifier)
+ if !ok {
+ t.Fatalf("Writer is not CloseNotifier, but %v", reflect.TypeOf(w).Name())
+ }
+ <-cn.CloseNotify()
+ reqDoneMu.Lock()
+ reqDone = true
+ reqDoneMu.Unlock()
+ })
+
+ lis, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("Couldn't setup listener: %v", err)
+ }
+ addr := lis.Addr().String()
+
+ srv := &http.Server{
+ Handler: mux,
+ }
+ go srv.Serve(lis)
+
+ cli := &Client{
+ c: &http.Client{},
+ conns: []*conn{
+ &conn{
+ url: "http://" + addr + "/",
+ },
+ },
+ snifferEnabled: true,
+ }
+
+ type closer interface {
+ Shutdown(context.Context) error
+ }
+
+ // pre-Go1.8 Server can't Shutdown
+ cl, isServerCloseable := (interface{}(srv)).(closer)
+
+ // Since Go1.7 can't Shutdown() - there will be leak from server
+ // Monitor leaks on Go 1.8+
+ if isServerCloseable {
+ defer leaktest.CheckTimeout(t, time.Second*10)()
+ }
+
+ cli.sniff(time.Millisecond * 500)
+
+ if isServerCloseable {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+ cl.Shutdown(ctx)
+ }
+
+ <-time.After(time.Second)
+ reqDoneMu.Lock()
+ if !reqDone {
+ reqDoneMu.Unlock()
+ t.Fatal("Request wasn't canceled or stopped")
+ }
+ reqDoneMu.Unlock()
+}
+
+func TestClientExtractHostname(t *testing.T) {
+ tests := []struct {
+ Scheme string
+ Address string
+ Output string
+ }{
+ {
+ Scheme: "http",
+ Address: "",
+ Output: "",
+ },
+ {
+ Scheme: "https",
+ Address: "abc",
+ Output: "",
+ },
+ {
+ Scheme: "http",
+ Address: "127.0.0.1:19200",
+ Output: "http://127.0.0.1:19200",
+ },
+ {
+ Scheme: "https",
+ Address: "127.0.0.1:9200",
+ Output: "https://127.0.0.1:9200",
+ },
+ {
+ Scheme: "http",
+ Address: "myelk.local/10.1.0.24:9200",
+ Output: "http://10.1.0.24:9200",
+ },
+ }
+
+ client, err := NewClient(SetSniff(false), SetHealthcheck(false))
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, test := range tests {
+ got := client.extractHostname(test.Scheme, test.Address)
+ if want := test.Output; want != got {
+ t.Errorf("expected %q; got: %q", want, got)
+ }
+ }
+}
+
+// -- Selector --
+
+func TestClientSelectConnHealthy(t *testing.T) {
+ client, err := NewClient(
+ SetSniff(false),
+ SetHealthcheck(false),
+ SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Both are healthy, so we should get both URLs in round-robin
+ client.conns[0].MarkAsHealthy()
+ client.conns[1].MarkAsHealthy()
+
+ // #1: Return 1st
+ c, err := client.next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c.URL() != client.conns[0].URL() {
+ t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL())
+ }
+ // #2: Return 2nd
+ c, err = client.next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c.URL() != client.conns[1].URL() {
+ t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL())
+ }
+ // #3: Return 1st
+ c, err = client.next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c.URL() != client.conns[0].URL() {
+ t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL())
+ }
+}
+
+func TestClientSelectConnHealthyAndDead(t *testing.T) {
+ client, err := NewClient(
+ SetSniff(false),
+ SetHealthcheck(false),
+ SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // 1st is healthy, second is dead
+ client.conns[0].MarkAsHealthy()
+ client.conns[1].MarkAsDead()
+
+ // #1: Return 1st
+ c, err := client.next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c.URL() != client.conns[0].URL() {
+ t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL())
+ }
+ // #2: Return 1st again
+ c, err = client.next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c.URL() != client.conns[0].URL() {
+ t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL())
+ }
+ // #3: Return 1st again and again
+ c, err = client.next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c.URL() != client.conns[0].URL() {
+ t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL())
+ }
+}
+
+func TestClientSelectConnDeadAndHealthy(t *testing.T) {
+ client, err := NewClient(
+ SetSniff(false),
+ SetHealthcheck(false),
+ SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // 1st is dead, 2nd is healthy
+ client.conns[0].MarkAsDead()
+ client.conns[1].MarkAsHealthy()
+
+ // #1: Return 2nd
+ c, err := client.next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c.URL() != client.conns[1].URL() {
+ t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL())
+ }
+ // #2: Return 2nd again
+ c, err = client.next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c.URL() != client.conns[1].URL() {
+ t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL())
+ }
+ // #3: Return 2nd again and again
+ c, err = client.next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c.URL() != client.conns[1].URL() {
+ t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL())
+ }
+}
+
+func TestClientSelectConnAllDead(t *testing.T) {
+ client, err := NewClient(
+ SetSniff(false),
+ SetHealthcheck(false),
+ SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Both are dead
+ client.conns[0].MarkAsDead()
+ client.conns[1].MarkAsDead()
+
+ // If all connections are dead, next should make them alive again, but
+ // still return an error when it first finds out.
+ c, err := client.next()
+ if !IsConnErr(err) {
+ t.Fatal(err)
+ }
+ if c != nil {
+ t.Fatalf("expected no connection; got: %v", c)
+ }
+ // Return a connection
+ c, err = client.next()
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if c == nil {
+ t.Fatalf("expected connection; got: %v", c)
+ }
+ // Return a connection
+ c, err = client.next()
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if c == nil {
+ t.Fatalf("expected connection; got: %v", c)
+ }
+}
+
+// -- ElasticsearchVersion --
+
+func TestElasticsearchVersion(t *testing.T) {
+ client, err := NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+ version, err := client.ElasticsearchVersion(DefaultURL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if version == "" {
+ t.Errorf("expected a version number, got: %q", version)
+ }
+}
+
+// -- IndexNames --
+
+func TestIndexNames(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+ names, err := client.IndexNames()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(names) == 0 {
+ t.Fatalf("expected some index names, got: %d", len(names))
+ }
+ var found bool
+ for _, name := range names {
+ if name == testIndexName {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Fatalf("expected to find index %q; got: %v", testIndexName, found)
+ }
+}
+
+// -- PerformRequest --
+
+func TestPerformRequest(t *testing.T) {
+ client, err := NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected response to be != nil")
+ }
+
+ ret := new(PingResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ t.Fatalf("expected no error on decode; got: %v", err)
+ }
+ if ret.ClusterName == "" {
+ t.Errorf("expected cluster name; got: %q", ret.ClusterName)
+ }
+}
+
+func TestPerformRequestWithSimpleClient(t *testing.T) {
+ client, err := NewSimpleClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected response to be != nil")
+ }
+
+ ret := new(PingResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ t.Fatalf("expected no error on decode; got: %v", err)
+ }
+ if ret.ClusterName == "" {
+ t.Errorf("expected cluster name; got: %q", ret.ClusterName)
+ }
+}
+
+func TestPerformRequestWithLogger(t *testing.T) {
+ var w bytes.Buffer
+ out := log.New(&w, "LOGGER ", log.LstdFlags)
+
+ client, err := NewClient(SetInfoLog(out), SetSniff(false))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected response to be != nil")
+ }
+
+ ret := new(PingResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ t.Fatalf("expected no error on decode; got: %v", err)
+ }
+ if ret.ClusterName == "" {
+ t.Errorf("expected cluster name; got: %q", ret.ClusterName)
+ }
+
+ got := w.String()
+ pattern := `^LOGGER \d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} GET http://.*/ \[status:200, request:\d+\.\d{3}s\]\n`
+ matched, err := regexp.MatchString(pattern, got)
+ if err != nil {
+ t.Fatalf("expected log line to match %q; got: %v", pattern, err)
+ }
+ if !matched {
+ t.Errorf("expected log line to match %q; got: %v", pattern, got)
+ }
+}
+
+func TestPerformRequestWithLoggerAndTracer(t *testing.T) {
+ var lw bytes.Buffer
+ lout := log.New(&lw, "LOGGER ", log.LstdFlags)
+
+ var tw bytes.Buffer
+ tout := log.New(&tw, "TRACER ", log.LstdFlags)
+
+ client, err := NewClient(SetInfoLog(lout), SetTraceLog(tout), SetSniff(false))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected response to be != nil")
+ }
+
+ ret := new(PingResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ t.Fatalf("expected no error on decode; got: %v", err)
+ }
+ if ret.ClusterName == "" {
+ t.Errorf("expected cluster name; got: %q", ret.ClusterName)
+ }
+
+ lgot := lw.String()
+ if lgot == "" {
+ t.Errorf("expected logger output; got: %q", lgot)
+ }
+
+ tgot := tw.String()
+ if tgot == "" {
+ t.Errorf("expected tracer output; got: %q", tgot)
+ }
+}
+func TestPerformRequestWithTracerOnError(t *testing.T) {
+ var tw bytes.Buffer
+ tout := log.New(&tw, "TRACER ", log.LstdFlags)
+
+ client, err := NewClient(SetTraceLog(tout), SetSniff(false))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/no-such-index",
+ })
+
+ tgot := tw.String()
+ if tgot == "" {
+ t.Errorf("expected tracer output; got: %q", tgot)
+ }
+}
+
+type customLogger struct {
+ out bytes.Buffer
+}
+
+func (l *customLogger) Printf(format string, v ...interface{}) {
+ l.out.WriteString(fmt.Sprintf(format, v...) + "\n")
+}
+
+func TestPerformRequestWithCustomLogger(t *testing.T) {
+ logger := &customLogger{}
+
+ client, err := NewClient(SetInfoLog(logger), SetSniff(false))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected response to be != nil")
+ }
+
+ ret := new(PingResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ t.Fatalf("expected no error on decode; got: %v", err)
+ }
+ if ret.ClusterName == "" {
+ t.Errorf("expected cluster name; got: %q", ret.ClusterName)
+ }
+
+ got := logger.out.String()
+ pattern := `^GET http://.*/ \[status:200, request:\d+\.\d{3}s\]\n`
+ matched, err := regexp.MatchString(pattern, got)
+ if err != nil {
+ t.Fatalf("expected log line to match %q; got: %v", pattern, err)
+ }
+ if !matched {
+ t.Errorf("expected log line to match %q; got: %v", pattern, got)
+ }
+}
+
+// failingTransport will run a fail callback if it sees a given URL path prefix.
+type failingTransport struct {
+ path string // path prefix to look for
+ fail func(*http.Request) (*http.Response, error) // call when path prefix is found
+ next http.RoundTripper // next round-tripper (use http.DefaultTransport if nil)
+}
+
+// RoundTrip implements a failing transport.
+func (tr *failingTransport) RoundTrip(r *http.Request) (*http.Response, error) {
+ if strings.HasPrefix(r.URL.Path, tr.path) && tr.fail != nil {
+ return tr.fail(r)
+ }
+ if tr.next != nil {
+ return tr.next.RoundTrip(r)
+ }
+ return http.DefaultTransport.RoundTrip(r)
+}
+
+func TestPerformRequestRetryOnHttpError(t *testing.T) {
+ var numFailedReqs int
+ fail := func(r *http.Request) (*http.Response, error) {
+ numFailedReqs += 1
+ //return &http.Response{Request: r, StatusCode: 400}, nil
+ return nil, errors.New("request failed")
+ }
+
+ // Run against a failing endpoint and see if PerformRequest
+ // retries correctly.
+ tr := &failingTransport{path: "/fail", fail: fail}
+ httpClient := &http.Client{Transport: tr}
+
+ client, err := NewClient(SetHttpClient(httpClient), SetMaxRetries(5), SetHealthcheck(false))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/fail",
+ })
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ if res != nil {
+ t.Fatal("expected no response")
+ }
+ // Connection should be marked as dead after it failed
+ if numFailedReqs != 5 {
+ t.Errorf("expected %d failed requests; got: %d", 5, numFailedReqs)
+ }
+}
+
+func TestPerformRequestNoRetryOnValidButUnsuccessfulHttpStatus(t *testing.T) {
+ var numFailedReqs int
+ fail := func(r *http.Request) (*http.Response, error) {
+ numFailedReqs += 1
+ return &http.Response{Request: r, StatusCode: 500}, nil
+ }
+
+ // Run against a failing endpoint and see if PerformRequest
+ // retries correctly.
+ tr := &failingTransport{path: "/fail", fail: fail}
+ httpClient := &http.Client{Transport: tr}
+
+ client, err := NewClient(SetHttpClient(httpClient), SetMaxRetries(5), SetHealthcheck(false))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/fail",
+ })
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ if res == nil {
+ t.Fatal("expected response, got nil")
+ }
+ if want, got := 500, res.StatusCode; want != got {
+ t.Fatalf("expected status code = %d, got %d", want, got)
+ }
+ // Retry should not have triggered additional requests because
+ if numFailedReqs != 1 {
+ t.Errorf("expected %d failed requests; got: %d", 1, numFailedReqs)
+ }
+}
+
+// failingBody will return an error when json.Marshal is called on it.
+type failingBody struct{}
+
+// MarshalJSON implements the json.Marshaler interface and always returns an error.
+func (fb failingBody) MarshalJSON() ([]byte, error) {
+ return nil, errors.New("failing to marshal")
+}
+
+func TestPerformRequestWithSetBodyError(t *testing.T) {
+ client, err := NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/",
+ Body: failingBody{},
+ })
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ if res != nil {
+ t.Fatal("expected no response")
+ }
+}
+
+// sleepingTransport will sleep before doing a request.
+type sleepingTransport struct {
+ timeout time.Duration
+}
+
+// RoundTrip implements a "sleepy" transport.
+func (tr *sleepingTransport) RoundTrip(r *http.Request) (*http.Response, error) {
+ time.Sleep(tr.timeout)
+ return http.DefaultTransport.RoundTrip(r)
+}
+
+func TestPerformRequestWithCancel(t *testing.T) {
+ tr := &sleepingTransport{timeout: 3 * time.Second}
+ httpClient := &http.Client{Transport: tr}
+
+ client, err := NewSimpleClient(SetHttpClient(httpClient), SetMaxRetries(0))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ type result struct {
+ res *Response
+ err error
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+
+ resc := make(chan result, 1)
+ go func() {
+ res, err := client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: "/",
+ })
+ resc <- result{res: res, err: err}
+ }()
+ select {
+ case <-time.After(1 * time.Second):
+ cancel()
+ case res := <-resc:
+ t.Fatalf("expected response before cancel, got %v", res)
+ case <-ctx.Done():
+ t.Fatalf("expected no early termination, got ctx.Done(): %v", ctx.Err())
+ }
+ err = ctx.Err()
+ if err != context.Canceled {
+ t.Fatalf("expected error context.Canceled, got: %v", err)
+ }
+}
+
+func TestPerformRequestWithTimeout(t *testing.T) {
+ tr := &sleepingTransport{timeout: 3 * time.Second}
+ httpClient := &http.Client{Transport: tr}
+
+ client, err := NewSimpleClient(SetHttpClient(httpClient), SetMaxRetries(0))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ type result struct {
+ res *Response
+ err error
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
+ defer cancel()
+
+ resc := make(chan result, 1)
+ go func() {
+ res, err := client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: "/",
+ })
+ resc <- result{res: res, err: err}
+ }()
+ select {
+ case res := <-resc:
+ t.Fatalf("expected timeout before response, got %v", res)
+ case <-ctx.Done():
+ err := ctx.Err()
+ if err != context.DeadlineExceeded {
+ t.Fatalf("expected error context.DeadlineExceeded, got: %v", err)
+ }
+ }
+}
+
+// -- Compression --
+
+// Notice that the trace log does always print "Accept-Encoding: gzip"
+// regardless of whether compression is enabled or not. This is because
+// of the underlying "httputil.DumpRequestOut".
+//
+// Use a real HTTP proxy/recorder to convince yourself that
+// "Accept-Encoding: gzip" is NOT sent when DisableCompression
+// is set to true.
+//
+// See also:
+// https://groups.google.com/forum/#!topic/golang-nuts/ms8QNCzew8Q
+
+func TestPerformRequestWithCompressionEnabled(t *testing.T) {
+ testPerformRequestWithCompression(t, &http.Client{
+ Transport: &http.Transport{
+ DisableCompression: true,
+ },
+ })
+}
+
+func TestPerformRequestWithCompressionDisabled(t *testing.T) {
+ testPerformRequestWithCompression(t, &http.Client{
+ Transport: &http.Transport{
+ DisableCompression: false,
+ },
+ })
+}
+
+func testPerformRequestWithCompression(t *testing.T, hc *http.Client) {
+ client, err := NewClient(SetHttpClient(hc), SetSniff(false))
+ if err != nil {
+ t.Fatal(err)
+ }
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected response to be != nil")
+ }
+
+ ret := new(PingResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ t.Fatalf("expected no error on decode; got: %v", err)
+ }
+ if ret.ClusterName == "" {
+ t.Errorf("expected cluster name; got: %q", ret.ClusterName)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/cluster-test/Makefile b/vendor/github.com/olivere/elastic/cluster-test/Makefile
new file mode 100644
index 000000000..cc6261db5
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/cluster-test/Makefile
@@ -0,0 +1,16 @@
+.PHONY: build run-omega-cluster-test
+
+default: build
+
+build:
+ go build cluster-test.go
+
+run-omega-cluster-test:
+ go run -race cluster-test.go \
+ -nodes=http://192.168.2.65:8200,http://192.168.2.64:8200 \
+ -n=5 \
+ -retries=5 \
+ -sniff=true -sniffer=10s \
+ -healthcheck=true -healthchecker=5s \
+ -errorlog=errors.log
+
diff --git a/vendor/github.com/olivere/elastic/cluster-test/README.md b/vendor/github.com/olivere/elastic/cluster-test/README.md
new file mode 100644
index 000000000..f10748cc2
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/cluster-test/README.md
@@ -0,0 +1,63 @@
+# Cluster Test
+
+This directory contains a program you can use to test a cluster.
+
+Here's how:
+
+First, install a cluster of Elasticsearch nodes. You can install them on
+different computers, or start several nodes on a single machine.
+
+Build cluster-test by `go build cluster-test.go` (or build with `make`).
+
+Run `./cluster-test -h` to get a list of flags:
+
+```sh
+$ ./cluster-test -h
+Usage of ./cluster-test:
+ -errorlog="": error log file
+ -healthcheck=true: enable or disable healthchecks
+ -healthchecker=1m0s: healthcheck interval
+ -index="twitter": name of ES index to use
+ -infolog="": info log file
+ -n=5: number of goroutines that run searches
+ -nodes="": comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200')
+ -retries=0: number of retries
+ -sniff=true: enable or disable sniffer
+ -sniffer=15m0s: sniffer interval
+ -tracelog="": trace log file
+```
+
+Example:
+
+```sh
+$ ./cluster-test -nodes=http://127.0.0.1:9200,http://127.0.0.1:9201,http://127.0.0.1:9202 -n=5 -index=twitter -retries=5 -sniff=true -sniffer=10s -healthcheck=true -healthchecker=5s -errorlog=error.log
+```
+
+The above example will create an index and start some search jobs on the
+cluster defined by http://127.0.0.1:9200, http://127.0.0.1:9201,
+and http://127.0.0.1:9202.
+
+* It will create an index called `twitter` on the cluster (`-index=twitter`)
+* It will run 5 search jobs in parallel (`-n=5`).
+* It will retry failed requests 5 times (`-retries=5`).
+* It will sniff the cluster periodically (`-sniff=true`).
+* It will sniff the cluster every 10 seconds (`-sniffer=10s`).
+* It will perform health checks periodically (`-healthcheck=true`).
+* It will perform health checks on the nodes every 5 seconds (`-healthchecker=5s`).
+* It will write an error log file (`-errorlog=error.log`).
+
+If you want to test Elastic with nodes going up and down, you can use a
+chaos monkey script like this and run it on the nodes of your cluster:
+
+```sh
+#!/bin/bash
+while true
+do
+ echo "Starting ES node"
+ elasticsearch -d -Xmx4g -Xms1g -Des.config=elasticsearch.yml -p es.pid
+ sleep `jot -r 1 10 300` # wait for 10-300s
+ echo "Stopping ES node"
+ kill -TERM `cat es.pid`
+ sleep `jot -r 1 10 60` # wait for 10-60s
+done
+```
diff --git a/vendor/github.com/olivere/elastic/cluster-test/cluster-test.go b/vendor/github.com/olivere/elastic/cluster-test/cluster-test.go
new file mode 100644
index 000000000..96b0c5d9b
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/cluster-test/cluster-test.go
@@ -0,0 +1,361 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "flag"
+ "fmt"
+ "log"
+ "math/rand"
+ "os"
+ "runtime"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ elastic "github.com/olivere/elastic"
+)
+
+type Tweet struct {
+ User string `json:"user"`
+ Message string `json:"message"`
+ Retweets int `json:"retweets"`
+ Image string `json:"image,omitempty"`
+ Created time.Time `json:"created,omitempty"`
+ Tags []string `json:"tags,omitempty"`
+ Location string `json:"location,omitempty"`
+ Suggest *elastic.SuggestField `json:"suggest_field,omitempty"`
+}
+
+var (
+ nodes = flag.String("nodes", "", "comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200')")
+ n = flag.Int("n", 5, "number of goroutines that run searches")
+ index = flag.String("index", "twitter", "name of ES index to use")
+ errorlogfile = flag.String("errorlog", "", "error log file")
+ infologfile = flag.String("infolog", "", "info log file")
+ tracelogfile = flag.String("tracelog", "", "trace log file")
+ retries = flag.Int("retries", 0, "number of retries")
+ sniff = flag.Bool("sniff", elastic.DefaultSnifferEnabled, "enable or disable sniffer")
+ sniffer = flag.Duration("sniffer", elastic.DefaultSnifferInterval, "sniffer interval")
+ healthcheck = flag.Bool("healthcheck", elastic.DefaultHealthcheckEnabled, "enable or disable healthchecks")
+ healthchecker = flag.Duration("healthchecker", elastic.DefaultHealthcheckInterval, "healthcheck interval")
+)
+
+func main() {
+ flag.Parse()
+
+ runtime.GOMAXPROCS(runtime.NumCPU())
+
+ if *nodes == "" {
+ log.Fatal("no nodes specified")
+ }
+ urls := strings.SplitN(*nodes, ",", -1)
+
+ testcase, err := NewTestCase(*index, urls)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ testcase.SetErrorLogFile(*errorlogfile)
+ testcase.SetInfoLogFile(*infologfile)
+ testcase.SetTraceLogFile(*tracelogfile)
+ testcase.SetMaxRetries(*retries)
+ testcase.SetHealthcheck(*healthcheck)
+ testcase.SetHealthcheckInterval(*healthchecker)
+ testcase.SetSniff(*sniff)
+ testcase.SetSnifferInterval(*sniffer)
+
+ if err := testcase.Run(*n); err != nil {
+ log.Fatal(err)
+ }
+
+ select {}
+}
+
+type RunInfo struct {
+ Success bool
+}
+
+type TestCase struct {
+ nodes []string
+ client *elastic.Client
+ runs int64
+ failures int64
+ runCh chan RunInfo
+ index string
+ errorlogfile string
+ infologfile string
+ tracelogfile string
+ maxRetries int
+ healthcheck bool
+ healthcheckInterval time.Duration
+ sniff bool
+ snifferInterval time.Duration
+}
+
+func NewTestCase(index string, nodes []string) (*TestCase, error) {
+ if index == "" {
+ return nil, errors.New("no index name specified")
+ }
+
+ return &TestCase{
+ index: index,
+ nodes: nodes,
+ runCh: make(chan RunInfo),
+ }, nil
+}
+
+func (t *TestCase) SetIndex(name string) {
+ t.index = name
+}
+
+func (t *TestCase) SetErrorLogFile(name string) {
+ t.errorlogfile = name
+}
+
+func (t *TestCase) SetInfoLogFile(name string) {
+ t.infologfile = name
+}
+
+func (t *TestCase) SetTraceLogFile(name string) {
+ t.tracelogfile = name
+}
+
+func (t *TestCase) SetMaxRetries(n int) {
+ t.maxRetries = n
+}
+
+func (t *TestCase) SetSniff(enabled bool) {
+ t.sniff = enabled
+}
+
+func (t *TestCase) SetSnifferInterval(d time.Duration) {
+ t.snifferInterval = d
+}
+
+func (t *TestCase) SetHealthcheck(enabled bool) {
+ t.healthcheck = enabled
+}
+
+func (t *TestCase) SetHealthcheckInterval(d time.Duration) {
+ t.healthcheckInterval = d
+}
+
+func (t *TestCase) Run(n int) error {
+ if err := t.setup(); err != nil {
+ return err
+ }
+
+ for i := 1; i < n; i++ {
+ go t.search()
+ }
+
+ go t.monitor()
+
+ return nil
+}
+
+func (t *TestCase) monitor() {
+ print := func() {
+ fmt.Printf("\033[32m%5d\033[0m; \033[31m%5d\033[0m: %s%s\r", t.runs, t.failures, t.client.String(), " ")
+ }
+
+ for {
+ select {
+ case run := <-t.runCh:
+ atomic.AddInt64(&t.runs, 1)
+ if !run.Success {
+ atomic.AddInt64(&t.failures, 1)
+ fmt.Println()
+ }
+ print()
+ case <-time.After(5 * time.Second):
+ // Print stats after some inactivity
+ print()
+ break
+ }
+ }
+}
+
+func (t *TestCase) setup() error {
+ var errorlogger *log.Logger
+ if t.errorlogfile != "" {
+ f, err := os.OpenFile(t.errorlogfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
+ if err != nil {
+ return err
+ }
+ errorlogger = log.New(f, "", log.Ltime|log.Lmicroseconds|log.Lshortfile)
+ }
+
+ var infologger *log.Logger
+ if t.infologfile != "" {
+ f, err := os.OpenFile(t.infologfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
+ if err != nil {
+ return err
+ }
+ infologger = log.New(f, "", log.LstdFlags)
+ }
+
+ // Trace request and response details like this
+ var tracelogger *log.Logger
+ if t.tracelogfile != "" {
+ f, err := os.OpenFile(t.tracelogfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
+ if err != nil {
+ return err
+ }
+ tracelogger = log.New(f, "", log.LstdFlags)
+ }
+
+ client, err := elastic.NewClient(
+ elastic.SetURL(t.nodes...),
+ elastic.SetErrorLog(errorlogger),
+ elastic.SetInfoLog(infologger),
+ elastic.SetTraceLog(tracelogger),
+ elastic.SetMaxRetries(t.maxRetries),
+ elastic.SetSniff(t.sniff),
+ elastic.SetSnifferInterval(t.snifferInterval),
+ elastic.SetHealthcheck(t.healthcheck),
+ elastic.SetHealthcheckInterval(t.healthcheckInterval))
+ if err != nil {
+ // Handle error
+ return err
+ }
+ t.client = client
+
+ ctx := context.Background()
+
+ // Use the IndexExists service to check if a specified index exists.
+ exists, err := t.client.IndexExists(t.index).Do(ctx)
+ if err != nil {
+ return err
+ }
+ if exists {
+ deleteIndex, err := t.client.DeleteIndex(t.index).Do(ctx)
+ if err != nil {
+ return err
+ }
+ if !deleteIndex.Acknowledged {
+ return errors.New("delete index not acknowledged")
+ }
+ }
+
+ // Create a new index.
+ createIndex, err := t.client.CreateIndex(t.index).Do(ctx)
+ if err != nil {
+ return err
+ }
+ if !createIndex.Acknowledged {
+ return errors.New("create index not acknowledged")
+ }
+
+ // Index a tweet (using JSON serialization)
+ tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0}
+ _, err = t.client.Index().
+ Index(t.index).
+ Type("tweet").
+ Id("1").
+ BodyJson(tweet1).
+ Do(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Index a second tweet (by string)
+ tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}`
+ _, err = t.client.Index().
+ Index(t.index).
+ Type("tweet").
+ Id("2").
+ BodyString(tweet2).
+ Do(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Flush to make sure the documents got written.
+ _, err = t.client.Flush().Index(t.index).Do(ctx)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (t *TestCase) search() {
+ ctx := context.Background()
+
+ // Loop forever to check for connection issues
+ for {
+ // Get tweet with specified ID
+ get1, err := t.client.Get().
+ Index(t.index).
+ Type("tweet").
+ Id("1").
+ Do(ctx)
+ if err != nil {
+ //failf("Get failed: %v", err)
+ t.runCh <- RunInfo{Success: false}
+ continue
+ }
+ if !get1.Found {
+ //log.Printf("Document %s not found\n", "1")
+ //fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type)
+ t.runCh <- RunInfo{Success: false}
+ continue
+ }
+
+ // Search with a term query
+ searchResult, err := t.client.Search().
+ Index(t.index). // search in index t.index
+ Query(elastic.NewTermQuery("user", "olivere")). // specify the query
+ Sort("user", true). // sort by "user" field, ascending
+ From(0).Size(10). // take documents 0-9
+ Pretty(true). // pretty print request and response JSON
+ Do(ctx) // execute
+ if err != nil {
+ //failf("Search failed: %v\n", err)
+ t.runCh <- RunInfo{Success: false}
+ continue
+ }
+
+ // searchResult is of type SearchResult and returns hits, suggestions,
+ // and all kinds of other information from Elasticsearch.
+ //fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+
+ // Number of hits
+ if searchResult.Hits.TotalHits > 0 {
+ //fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
+
+ // Iterate through results
+ for _, hit := range searchResult.Hits.Hits {
+ // hit.Index contains the name of the index
+
+ // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
+ var tweet Tweet
+ err := json.Unmarshal(*hit.Source, &tweet)
+ if err != nil {
+ // Deserialization failed
+ //failf("Deserialize failed: %v\n", err)
+ t.runCh <- RunInfo{Success: false}
+ continue
+ }
+
+ // Work with tweet
+ //fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+ }
+ } else {
+ // No hits
+ //fmt.Print("Found no tweets\n")
+ }
+
+ t.runCh <- RunInfo{Success: true}
+
+ // Sleep some time
+ time.Sleep(time.Duration(rand.Intn(500)) * time.Millisecond)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/cluster_health.go b/vendor/github.com/olivere/elastic/cluster_health.go
new file mode 100644
index 000000000..f960cfe8e
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/cluster_health.go
@@ -0,0 +1,248 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// ClusterHealthService allows to get a very simple status on the health of the cluster.
+//
+// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-health.html
+// for details.
+type ClusterHealthService struct {
+ client *Client
+ pretty bool
+ indices []string
+ level string
+ local *bool
+ masterTimeout string
+ timeout string
+ waitForActiveShards *int
+ waitForNodes string
+ waitForNoRelocatingShards *bool
+ waitForStatus string
+}
+
+// NewClusterHealthService creates a new ClusterHealthService.
+func NewClusterHealthService(client *Client) *ClusterHealthService {
+ return &ClusterHealthService{
+ client: client,
+ indices: make([]string, 0),
+ }
+}
+
+// Index limits the information returned to specific indices.
+func (s *ClusterHealthService) Index(indices ...string) *ClusterHealthService {
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+// Level specifies the level of detail for returned information.
+func (s *ClusterHealthService) Level(level string) *ClusterHealthService {
+ s.level = level
+ return s
+}
+
+// Local indicates whether to return local information. If it is true,
+// we do not retrieve the state from master node (default: false).
+func (s *ClusterHealthService) Local(local bool) *ClusterHealthService {
+ s.local = &local
+ return s
+}
+
+// MasterTimeout specifies an explicit operation timeout for connection to master node.
+func (s *ClusterHealthService) MasterTimeout(masterTimeout string) *ClusterHealthService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Timeout specifies an explicit operation timeout.
+func (s *ClusterHealthService) Timeout(timeout string) *ClusterHealthService {
+ s.timeout = timeout
+ return s
+}
+
+// WaitForActiveShards can be used to wait until the specified number of shards are active.
+func (s *ClusterHealthService) WaitForActiveShards(waitForActiveShards int) *ClusterHealthService {
+ s.waitForActiveShards = &waitForActiveShards
+ return s
+}
+
+// WaitForNodes can be used to wait until the specified number of nodes are available.
+// Example: "12" to wait for exact values, ">12" and "<12" for ranges.
+func (s *ClusterHealthService) WaitForNodes(waitForNodes string) *ClusterHealthService {
+ s.waitForNodes = waitForNodes
+ return s
+}
+
+// WaitForNoRelocatingShards can be used to wait until all shard relocations are finished.
+func (s *ClusterHealthService) WaitForNoRelocatingShards(waitForNoRelocatingShards bool) *ClusterHealthService {
+ s.waitForNoRelocatingShards = &waitForNoRelocatingShards
+ return s
+}
+
+// WaitForStatus can be used to wait until the cluster is in a specific state.
+// Valid values are: green, yellow, or red.
+func (s *ClusterHealthService) WaitForStatus(waitForStatus string) *ClusterHealthService {
+ s.waitForStatus = waitForStatus
+ return s
+}
+
+// WaitForGreenStatus will wait for the "green" state.
+func (s *ClusterHealthService) WaitForGreenStatus() *ClusterHealthService {
+ return s.WaitForStatus("green")
+}
+
+// WaitForYellowStatus will wait for the "yellow" state.
+func (s *ClusterHealthService) WaitForYellowStatus() *ClusterHealthService {
+ return s.WaitForStatus("yellow")
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *ClusterHealthService) Pretty(pretty bool) *ClusterHealthService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ClusterHealthService) buildURL() (string, url.Values, error) {
+ // Build URL
+ var err error
+ var path string
+ if len(s.indices) > 0 {
+ path, err = uritemplates.Expand("/_cluster/health/{index}", map[string]string{
+ "index": strings.Join(s.indices, ","),
+ })
+ } else {
+ path = "/_cluster/health"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.level != "" {
+ params.Set("level", s.level)
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.waitForActiveShards != nil {
+ params.Set("wait_for_active_shards", fmt.Sprintf("%v", s.waitForActiveShards))
+ }
+ if s.waitForNodes != "" {
+ params.Set("wait_for_nodes", s.waitForNodes)
+ }
+ if s.waitForNoRelocatingShards != nil {
+ params.Set("wait_for_no_relocating_shards", fmt.Sprintf("%v", *s.waitForNoRelocatingShards))
+ }
+ if s.waitForStatus != "" {
+ params.Set("wait_for_status", s.waitForStatus)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ClusterHealthService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *ClusterHealthService) Do(ctx context.Context) (*ClusterHealthResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(ClusterHealthResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// ClusterHealthResponse is the response of ClusterHealthService.Do.
+type ClusterHealthResponse struct {
+ ClusterName string `json:"cluster_name"`
+ Status string `json:"status"`
+ TimedOut bool `json:"timed_out"`
+ NumberOfNodes int `json:"number_of_nodes"`
+ NumberOfDataNodes int `json:"number_of_data_nodes"`
+ ActivePrimaryShards int `json:"active_primary_shards"`
+ ActiveShards int `json:"active_shards"`
+ RelocatingShards int `json:"relocating_shards"`
+ InitializingShards int `json:"initializing_shards"`
+ UnassignedShards int `json:"unassigned_shards"`
+ DelayedUnassignedShards int `json:"delayed_unassigned_shards"`
+ NumberOfPendingTasks int `json:"number_of_pending_tasks"`
+ NumberOfInFlightFetch int `json:"number_of_in_flight_fetch"`
+ TaskMaxWaitTimeInQueueInMillis int `json:"task_max_waiting_in_queue_millis"`
+ ActiveShardsPercentAsNumber float64 `json:"active_shards_percent_as_number"`
+
+ // Validation failures -> index name -> array of validation failures
+ ValidationFailures []map[string][]string `json:"validation_failures"`
+
+ // Index name -> index health
+ Indices map[string]*ClusterIndexHealth `json:"indices"`
+}
+
+// ClusterIndexHealth will be returned as part of ClusterHealthResponse.
+type ClusterIndexHealth struct {
+ Status string `json:"status"`
+ NumberOfShards int `json:"number_of_shards"`
+ NumberOfReplicas int `json:"number_of_replicas"`
+ ActivePrimaryShards int `json:"active_primary_shards"`
+ ActiveShards int `json:"active_shards"`
+ RelocatingShards int `json:"relocating_shards"`
+ InitializingShards int `json:"initializing_shards"`
+ UnassignedShards int `json:"unassigned_shards"`
+ // Validation failures
+ ValidationFailures []string `json:"validation_failures"`
+ // Shards by id, e.g. "0" or "1"
+ Shards map[string]*ClusterShardHealth `json:"shards"`
+}
+
+// ClusterShardHealth will be returned as part of ClusterHealthResponse.
+type ClusterShardHealth struct {
+ Status string `json:"status"`
+ PrimaryActive bool `json:"primary_active"`
+ ActiveShards int `json:"active_shards"`
+ RelocatingShards int `json:"relocating_shards"`
+ InitializingShards int `json:"initializing_shards"`
+ UnassignedShards int `json:"unassigned_shards"`
+}
diff --git a/vendor/github.com/olivere/elastic/cluster_health_test.go b/vendor/github.com/olivere/elastic/cluster_health_test.go
new file mode 100644
index 000000000..c2caee985
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/cluster_health_test.go
@@ -0,0 +1,119 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "net/url"
+ "testing"
+)
+
+func TestClusterHealth(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ // Get cluster health
+ res, err := client.ClusterHealth().Index(testIndexName).Level("shards").Pretty(true).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatalf("expected res to be != nil; got: %v", res)
+ }
+ if res.Status != "green" && res.Status != "red" && res.Status != "yellow" {
+ t.Fatalf("expected status \"green\", \"red\", or \"yellow\"; got: %q", res.Status)
+ }
+}
+
+func TestClusterHealthURLs(t *testing.T) {
+ tests := []struct {
+ Service *ClusterHealthService
+ ExpectedPath string
+ ExpectedParams url.Values
+ }{
+ {
+ Service: &ClusterHealthService{
+ indices: []string{},
+ },
+ ExpectedPath: "/_cluster/health",
+ },
+ {
+ Service: &ClusterHealthService{
+ indices: []string{"twitter"},
+ },
+ ExpectedPath: "/_cluster/health/twitter",
+ },
+ {
+ Service: &ClusterHealthService{
+ indices: []string{"twitter", "gplus"},
+ },
+ ExpectedPath: "/_cluster/health/twitter%2Cgplus",
+ },
+ {
+ Service: &ClusterHealthService{
+ indices: []string{"twitter"},
+ waitForStatus: "yellow",
+ },
+ ExpectedPath: "/_cluster/health/twitter",
+ ExpectedParams: url.Values{"wait_for_status": []string{"yellow"}},
+ },
+ }
+
+ for _, test := range tests {
+ gotPath, gotParams, err := test.Service.buildURL()
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if gotPath != test.ExpectedPath {
+ t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath)
+ }
+ if gotParams.Encode() != test.ExpectedParams.Encode() {
+ t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams)
+ }
+ }
+}
+
+func TestClusterHealthWaitForStatus(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ // Ensure preconditions are met: A green cluster.
+ health, err := client.ClusterHealth().Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got, want := health.Status, "green"; got != want {
+ t.Skipf("precondition failed: expected cluster to be %q, not %q", want, got)
+ }
+
+ // Cluster health on an index that does not exist should never get to yellow
+ health, err = client.ClusterHealth().Index("no-such-index").WaitForStatus("yellow").Timeout("1s").Do(context.TODO())
+ if err == nil {
+ t.Fatalf("expected timeout error; got: %v", err)
+ }
+ if !IsTimeout(err) {
+ t.Fatalf("expected timeout error; got: %v", err)
+ }
+ if health != nil {
+ t.Fatalf("expected no response; got: %v", health)
+ }
+
+ // Cluster wide health
+ health, err = client.ClusterHealth().WaitForGreenStatus().Timeout("10s").Do(context.TODO())
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if health.TimedOut != false {
+ t.Fatalf("expected no timeout; got: %v "+
+ "(does your local cluster contain unassigned shards?)", health.TimedOut)
+ }
+ if health.Status != "green" {
+ t.Fatalf("expected health = %q; got: %q", "green", health.Status)
+ }
+
+ // Cluster wide health via shortcut on client
+ err = client.WaitForGreenStatus("10s")
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/cluster_state.go b/vendor/github.com/olivere/elastic/cluster_state.go
new file mode 100644
index 000000000..54e9aa428
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/cluster_state.go
@@ -0,0 +1,288 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// ClusterStateService allows to get a comprehensive state information of the whole cluster.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/cluster-state.html
+// for details.
+type ClusterStateService struct {
+ client *Client
+ pretty bool
+ indices []string
+ metrics []string
+ allowNoIndices *bool
+ expandWildcards string
+ flatSettings *bool
+ ignoreUnavailable *bool
+ local *bool
+ masterTimeout string
+}
+
+// NewClusterStateService creates a new ClusterStateService.
+func NewClusterStateService(client *Client) *ClusterStateService {
+ return &ClusterStateService{
+ client: client,
+ indices: make([]string, 0),
+ metrics: make([]string, 0),
+ }
+}
+
+// Index is a list of index names. Use _all or an empty string to
+// perform the operation on all indices.
+func (s *ClusterStateService) Index(indices ...string) *ClusterStateService {
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+// Metric limits the information returned to the specified metric.
+// It can be one of: version, master_node, nodes, routing_table, metadata,
+// blocks, or customs.
+func (s *ClusterStateService) Metric(metrics ...string) *ClusterStateService {
+ s.metrics = append(s.metrics, metrics...)
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *ClusterStateService) AllowNoIndices(allowNoIndices bool) *ClusterStateService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both..
+func (s *ClusterStateService) ExpandWildcards(expandWildcards string) *ClusterStateService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// FlatSettings, when set, returns settings in flat format (default: false).
+func (s *ClusterStateService) FlatSettings(flatSettings bool) *ClusterStateService {
+ s.flatSettings = &flatSettings
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *ClusterStateService) IgnoreUnavailable(ignoreUnavailable bool) *ClusterStateService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// Local indicates whether to return local information. When set, it does not
+// retrieve the state from master node (default: false).
+func (s *ClusterStateService) Local(local bool) *ClusterStateService {
+ s.local = &local
+ return s
+}
+
+// MasterTimeout specifies timeout for connection to master.
+func (s *ClusterStateService) MasterTimeout(masterTimeout string) *ClusterStateService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *ClusterStateService) Pretty(pretty bool) *ClusterStateService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ClusterStateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ metrics := strings.Join(s.metrics, ",")
+ if metrics == "" {
+ metrics = "_all"
+ }
+ indices := strings.Join(s.indices, ",")
+ if indices == "" {
+ indices = "_all"
+ }
+ path, err := uritemplates.Expand("/_cluster/state/{metrics}/{indices}", map[string]string{
+ "metrics": metrics,
+ "indices": indices,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.flatSettings != nil {
+ params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ClusterStateService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *ClusterStateService) Do(ctx context.Context) (*ClusterStateResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(ClusterStateResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// ClusterStateResponse is the response of ClusterStateService.Do.
+type ClusterStateResponse struct {
+ ClusterName string `json:"cluster_name"`
+ Version int64 `json:"version"`
+ StateUUID string `json:"state_uuid"`
+ MasterNode string `json:"master_node"`
+ Blocks map[string]*clusterBlocks `json:"blocks"`
+ Nodes map[string]*discoveryNode `json:"nodes"`
+ Metadata *clusterStateMetadata `json:"metadata"`
+ RoutingTable map[string]*clusterStateRoutingTable `json:"routing_table"`
+ RoutingNodes *clusterStateRoutingNode `json:"routing_nodes"`
+ Customs map[string]interface{} `json:"customs"`
+}
+
+type clusterBlocks struct {
+ Global map[string]*clusterBlock `json:"global"` // id -> cluster block
+ Indices map[string]*clusterBlock `json:"indices"` // index name -> cluster block
+}
+
+type clusterBlock struct {
+ Description string `json:"description"`
+ Retryable bool `json:"retryable"`
+ DisableStatePersistence bool `json:"disable_state_persistence"`
+ Levels []string `json:"levels"`
+}
+
+type clusterStateMetadata struct {
+ ClusterUUID string `json:"cluster_uuid"`
+ Templates map[string]*indexTemplateMetaData `json:"templates"` // template name -> index template metadata
+ Indices map[string]*indexMetaData `json:"indices"` // index name _> meta data
+ RoutingTable struct {
+ Indices map[string]*indexRoutingTable `json:"indices"` // index name -> routing table
+ } `json:"routing_table"`
+ RoutingNodes struct {
+ Unassigned []*shardRouting `json:"unassigned"`
+ Nodes []*shardRouting `json:"nodes"`
+ } `json:"routing_nodes"`
+ Customs map[string]interface{} `json:"customs"`
+}
+
+type discoveryNode struct {
+ Name string `json:"name"` // server name, e.g. "es1"
+ TransportAddress string `json:"transport_address"` // e.g. inet[/1.2.3.4:9300]
+ Attributes map[string]interface{} `json:"attributes"` // e.g. { "data": true, "master": true }
+}
+
+type clusterStateRoutingTable struct {
+ Indices map[string]interface{} `json:"indices"`
+}
+
+type clusterStateRoutingNode struct {
+ Unassigned []*shardRouting `json:"unassigned"`
+ // Node Id -> shardRouting
+ Nodes map[string][]*shardRouting `json:"nodes"`
+}
+
+type indexTemplateMetaData struct {
+ IndexPatterns []string `json:"index_patterns"` // e.g. ["store-*"]
+ Order int `json:"order"`
+ Settings map[string]interface{} `json:"settings"` // index settings
+ Mappings map[string]interface{} `json:"mappings"` // type name -> mapping
+}
+
+type indexMetaData struct {
+ State string `json:"state"`
+ Settings map[string]interface{} `json:"settings"`
+ Mappings map[string]interface{} `json:"mappings"`
+ Aliases []string `json:"aliases"` // e.g. [ "alias1", "alias2" ]
+}
+
+type indexRoutingTable struct {
+ Shards map[string]*shardRouting `json:"shards"`
+}
+
+type shardRouting struct {
+ State string `json:"state"`
+ Primary bool `json:"primary"`
+ Node string `json:"node"`
+ RelocatingNode string `json:"relocating_node"`
+ Shard int `json:"shard"`
+ Index string `json:"index"`
+ Version int64 `json:"version"`
+ RestoreSource *RestoreSource `json:"restore_source"`
+ AllocationId *allocationId `json:"allocation_id"`
+ UnassignedInfo *unassignedInfo `json:"unassigned_info"`
+}
+
+type RestoreSource struct {
+ Repository string `json:"repository"`
+ Snapshot string `json:"snapshot"`
+ Version string `json:"version"`
+ Index string `json:"index"`
+}
+
+type allocationId struct {
+ Id string `json:"id"`
+ RelocationId string `json:"relocation_id"`
+}
+
+type unassignedInfo struct {
+ Reason string `json:"reason"`
+ At string `json:"at"`
+ Details string `json:"details"`
+}
diff --git a/vendor/github.com/olivere/elastic/cluster_state_test.go b/vendor/github.com/olivere/elastic/cluster_state_test.go
new file mode 100644
index 000000000..6eedb0c1b
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/cluster_state_test.go
@@ -0,0 +1,93 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "net/url"
+ "testing"
+)
+
+func TestClusterState(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ // Get cluster state
+ res, err := client.ClusterState().Index("_all").Metric("_all").Pretty(true).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatalf("expected res to be != nil; got: %v", res)
+ }
+ if res.ClusterName == "" {
+ t.Fatalf("expected a cluster name; got: %q", res.ClusterName)
+ }
+}
+
+func TestClusterStateURLs(t *testing.T) {
+ tests := []struct {
+ Service *ClusterStateService
+ ExpectedPath string
+ ExpectedParams url.Values
+ }{
+ {
+ Service: &ClusterStateService{
+ indices: []string{},
+ metrics: []string{},
+ },
+ ExpectedPath: "/_cluster/state/_all/_all",
+ },
+ {
+ Service: &ClusterStateService{
+ indices: []string{"twitter"},
+ metrics: []string{},
+ },
+ ExpectedPath: "/_cluster/state/_all/twitter",
+ },
+ {
+ Service: &ClusterStateService{
+ indices: []string{"twitter", "gplus"},
+ metrics: []string{},
+ },
+ ExpectedPath: "/_cluster/state/_all/twitter%2Cgplus",
+ },
+ {
+ Service: &ClusterStateService{
+ indices: []string{},
+ metrics: []string{"nodes"},
+ },
+ ExpectedPath: "/_cluster/state/nodes/_all",
+ },
+ {
+ Service: &ClusterStateService{
+ indices: []string{"twitter"},
+ metrics: []string{"nodes"},
+ },
+ ExpectedPath: "/_cluster/state/nodes/twitter",
+ },
+ {
+ Service: &ClusterStateService{
+ indices: []string{"twitter"},
+ metrics: []string{"nodes"},
+ masterTimeout: "1s",
+ },
+ ExpectedPath: "/_cluster/state/nodes/twitter",
+ ExpectedParams: url.Values{"master_timeout": []string{"1s"}},
+ },
+ }
+
+ for _, test := range tests {
+ gotPath, gotParams, err := test.Service.buildURL()
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if gotPath != test.ExpectedPath {
+ t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath)
+ }
+ if gotParams.Encode() != test.ExpectedParams.Encode() {
+ t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/cluster_stats.go b/vendor/github.com/olivere/elastic/cluster_stats.go
new file mode 100644
index 000000000..4d05c2e97
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/cluster_stats.go
@@ -0,0 +1,352 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// ClusterStatsService is documented at
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/cluster-stats.html.
+type ClusterStatsService struct {
+ client *Client
+ pretty bool
+ nodeId []string
+ flatSettings *bool
+ human *bool
+}
+
+// NewClusterStatsService creates a new ClusterStatsService.
+func NewClusterStatsService(client *Client) *ClusterStatsService {
+ return &ClusterStatsService{
+ client: client,
+ nodeId: make([]string, 0),
+ }
+}
+
+// NodeId is documented as: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes.
+func (s *ClusterStatsService) NodeId(nodeId []string) *ClusterStatsService {
+ s.nodeId = nodeId
+ return s
+}
+
+// FlatSettings is documented as: Return settings in flat format (default: false).
+func (s *ClusterStatsService) FlatSettings(flatSettings bool) *ClusterStatsService {
+ s.flatSettings = &flatSettings
+ return s
+}
+
+// Human is documented as: Whether to return time and byte values in human-readable format..
+func (s *ClusterStatsService) Human(human bool) *ClusterStatsService {
+ s.human = &human
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *ClusterStatsService) Pretty(pretty bool) *ClusterStatsService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ClusterStatsService) buildURL() (string, url.Values, error) {
+ // Build URL
+ var err error
+ var path string
+
+ if len(s.nodeId) > 0 {
+ path, err = uritemplates.Expand("/_cluster/stats/nodes/{node_id}", map[string]string{
+ "node_id": strings.Join(s.nodeId, ","),
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+ } else {
+ path, err = uritemplates.Expand("/_cluster/stats", map[string]string{})
+ if err != nil {
+ return "", url.Values{}, err
+ }
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.flatSettings != nil {
+ params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+ }
+ if s.human != nil {
+ params.Set("human", fmt.Sprintf("%v", *s.human))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ClusterStatsService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *ClusterStatsService) Do(ctx context.Context) (*ClusterStatsResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(ClusterStatsResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// ClusterStatsResponse is the response of ClusterStatsService.Do.
+type ClusterStatsResponse struct {
+ Timestamp int64 `json:"timestamp"`
+ ClusterName string `json:"cluster_name"`
+ ClusterUUID string `json:"uuid"`
+ Status string `json:"status"`
+ Indices *ClusterStatsIndices `json:"indices"`
+ Nodes *ClusterStatsNodes `json:"nodes"`
+}
+
+type ClusterStatsIndices struct {
+ Count int `json:"count"`
+ Shards *ClusterStatsIndicesShards `json:"shards"`
+ Docs *ClusterStatsIndicesDocs `json:"docs"`
+ Store *ClusterStatsIndicesStore `json:"store"`
+ FieldData *ClusterStatsIndicesFieldData `json:"fielddata"`
+ FilterCache *ClusterStatsIndicesFilterCache `json:"filter_cache"`
+ IdCache *ClusterStatsIndicesIdCache `json:"id_cache"`
+ Completion *ClusterStatsIndicesCompletion `json:"completion"`
+ Segments *ClusterStatsIndicesSegments `json:"segments"`
+ Percolate *ClusterStatsIndicesPercolate `json:"percolate"`
+}
+
+type ClusterStatsIndicesShards struct {
+ Total int `json:"total"`
+ Primaries int `json:"primaries"`
+ Replication float64 `json:"replication"`
+ Index *ClusterStatsIndicesShardsIndex `json:"index"`
+}
+
+type ClusterStatsIndicesShardsIndex struct {
+ Shards *ClusterStatsIndicesShardsIndexIntMinMax `json:"shards"`
+ Primaries *ClusterStatsIndicesShardsIndexIntMinMax `json:"primaries"`
+ Replication *ClusterStatsIndicesShardsIndexFloat64MinMax `json:"replication"`
+}
+
+type ClusterStatsIndicesShardsIndexIntMinMax struct {
+ Min int `json:"min"`
+ Max int `json:"max"`
+ Avg float64 `json:"avg"`
+}
+
+type ClusterStatsIndicesShardsIndexFloat64MinMax struct {
+ Min float64 `json:"min"`
+ Max float64 `json:"max"`
+ Avg float64 `json:"avg"`
+}
+
+type ClusterStatsIndicesDocs struct {
+ Count int `json:"count"`
+ Deleted int `json:"deleted"`
+}
+
+type ClusterStatsIndicesStore struct {
+ Size string `json:"size"` // e.g. "5.3gb"
+ SizeInBytes int64 `json:"size_in_bytes"`
+}
+
+type ClusterStatsIndicesFieldData struct {
+ MemorySize string `json:"memory_size"` // e.g. "61.3kb"
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
+ Evictions int64 `json:"evictions"`
+ Fields map[string]struct {
+ MemorySize string `json:"memory_size"` // e.g. "61.3kb"
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
+ } `json:"fields"`
+}
+
+type ClusterStatsIndicesFilterCache struct {
+ MemorySize string `json:"memory_size"` // e.g. "61.3kb"
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
+ Evictions int64 `json:"evictions"`
+}
+
+type ClusterStatsIndicesIdCache struct {
+ MemorySize string `json:"memory_size"` // e.g. "61.3kb"
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
+}
+
+type ClusterStatsIndicesCompletion struct {
+ Size string `json:"size"` // e.g. "61.3kb"
+ SizeInBytes int64 `json:"size_in_bytes"`
+ Fields map[string]struct {
+ Size string `json:"size"` // e.g. "61.3kb"
+ SizeInBytes int64 `json:"size_in_bytes"`
+ } `json:"fields"`
+}
+
+type ClusterStatsIndicesSegments struct {
+ Count int64 `json:"count"`
+ Memory string `json:"memory"` // e.g. "61.3kb"
+ MemoryInBytes int64 `json:"memory_in_bytes"`
+ IndexWriterMemory string `json:"index_writer_memory"` // e.g. "61.3kb"
+ IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"`
+ IndexWriterMaxMemory string `json:"index_writer_max_memory"` // e.g. "61.3kb"
+ IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes"`
+ VersionMapMemory string `json:"version_map_memory"` // e.g. "61.3kb"
+ VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"`
+ FixedBitSet string `json:"fixed_bit_set"` // e.g. "61.3kb"
+ FixedBitSetInBytes int64 `json:"fixed_bit_set_memory_in_bytes"`
+}
+
+type ClusterStatsIndicesPercolate struct {
+ Total int64 `json:"total"`
+ // TODO(oe) The JSON tag here is wrong as of ES 1.5.2 it seems
+ Time string `json:"get_time"` // e.g. "1s"
+ TimeInBytes int64 `json:"time_in_millis"`
+ Current int64 `json:"current"`
+ MemorySize string `json:"memory_size"` // e.g. "61.3kb"
+ MemorySizeInBytes int64 `json:"memory_sitze_in_bytes"`
+ Queries int64 `json:"queries"`
+}
+
+// ---
+
+type ClusterStatsNodes struct {
+ Count *ClusterStatsNodesCount `json:"count"`
+ Versions []string `json:"versions"`
+ OS *ClusterStatsNodesOsStats `json:"os"`
+ Process *ClusterStatsNodesProcessStats `json:"process"`
+ JVM *ClusterStatsNodesJvmStats `json:"jvm"`
+ FS *ClusterStatsNodesFsStats `json:"fs"`
+ Plugins []*ClusterStatsNodesPlugin `json:"plugins"`
+}
+
+type ClusterStatsNodesCount struct {
+ Total int `json:"total"`
+ Data int `json:"data"`
+ CoordinatingOnly int `json:"coordinating_only"`
+ Master int `json:"master"`
+ Ingest int `json:"ingest"`
+}
+
+type ClusterStatsNodesOsStats struct {
+ AvailableProcessors int `json:"available_processors"`
+ Mem *ClusterStatsNodesOsStatsMem `json:"mem"`
+ CPU []*ClusterStatsNodesOsStatsCPU `json:"cpu"`
+}
+
+type ClusterStatsNodesOsStatsMem struct {
+ Total string `json:"total"` // e.g. "16gb"
+ TotalInBytes int64 `json:"total_in_bytes"`
+}
+
+type ClusterStatsNodesOsStatsCPU struct {
+ Vendor string `json:"vendor"`
+ Model string `json:"model"`
+ MHz int `json:"mhz"`
+ TotalCores int `json:"total_cores"`
+ TotalSockets int `json:"total_sockets"`
+ CoresPerSocket int `json:"cores_per_socket"`
+ CacheSize string `json:"cache_size"` // e.g. "256b"
+ CacheSizeInBytes int64 `json:"cache_size_in_bytes"`
+ Count int `json:"count"`
+}
+
+type ClusterStatsNodesProcessStats struct {
+ CPU *ClusterStatsNodesProcessStatsCPU `json:"cpu"`
+ OpenFileDescriptors *ClusterStatsNodesProcessStatsOpenFileDescriptors `json:"open_file_descriptors"`
+}
+
+type ClusterStatsNodesProcessStatsCPU struct {
+ Percent float64 `json:"percent"`
+}
+
+type ClusterStatsNodesProcessStatsOpenFileDescriptors struct {
+ Min int64 `json:"min"`
+ Max int64 `json:"max"`
+ Avg int64 `json:"avg"`
+}
+
+type ClusterStatsNodesJvmStats struct {
+ MaxUptime string `json:"max_uptime"` // e.g. "5h"
+ MaxUptimeInMillis int64 `json:"max_uptime_in_millis"`
+ Versions []*ClusterStatsNodesJvmStatsVersion `json:"versions"`
+ Mem *ClusterStatsNodesJvmStatsMem `json:"mem"`
+ Threads int64 `json:"threads"`
+}
+
+type ClusterStatsNodesJvmStatsVersion struct {
+ Version string `json:"version"` // e.g. "1.8.0_45"
+ VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM"
+ VMVersion string `json:"vm_version"` // e.g. "25.45-b02"
+ VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation"
+ Count int `json:"count"`
+}
+
+type ClusterStatsNodesJvmStatsMem struct {
+ HeapUsed string `json:"heap_used"`
+ HeapUsedInBytes int64 `json:"heap_used_in_bytes"`
+ HeapMax string `json:"heap_max"`
+ HeapMaxInBytes int64 `json:"heap_max_in_bytes"`
+}
+
+type ClusterStatsNodesFsStats struct {
+ Path string `json:"path"`
+ Mount string `json:"mount"`
+ Dev string `json:"dev"`
+ Total string `json:"total"` // e.g. "930.7gb"`
+ TotalInBytes int64 `json:"total_in_bytes"`
+ Free string `json:"free"` // e.g. "930.7gb"`
+ FreeInBytes int64 `json:"free_in_bytes"`
+ Available string `json:"available"` // e.g. "930.7gb"`
+ AvailableInBytes int64 `json:"available_in_bytes"`
+ DiskReads int64 `json:"disk_reads"`
+ DiskWrites int64 `json:"disk_writes"`
+ DiskIOOp int64 `json:"disk_io_op"`
+ DiskReadSize string `json:"disk_read_size"` // e.g. "0b"`
+ DiskReadSizeInBytes int64 `json:"disk_read_size_in_bytes"`
+ DiskWriteSize string `json:"disk_write_size"` // e.g. "0b"`
+ DiskWriteSizeInBytes int64 `json:"disk_write_size_in_bytes"`
+ DiskIOSize string `json:"disk_io_size"` // e.g. "0b"`
+ DiskIOSizeInBytes int64 `json:"disk_io_size_in_bytes"`
+ DiskQueue string `json:"disk_queue"`
+ DiskServiceTime string `json:"disk_service_time"`
+}
+
+type ClusterStatsNodesPlugin struct {
+ Name string `json:"name"`
+ Version string `json:"version"`
+ Description string `json:"description"`
+ URL string `json:"url"`
+ JVM bool `json:"jvm"`
+ Site bool `json:"site"`
+}
diff --git a/vendor/github.com/olivere/elastic/cluster_stats_test.go b/vendor/github.com/olivere/elastic/cluster_stats_test.go
new file mode 100644
index 000000000..fe6da4704
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/cluster_stats_test.go
@@ -0,0 +1,92 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "net/url"
+ "testing"
+)
+
+func TestClusterStats(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ // Get cluster stats
+ res, err := client.ClusterStats().Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatalf("expected res to be != nil; got: %v", res)
+ }
+ if res.ClusterName == "" {
+ t.Fatalf("expected a cluster name; got: %q", res.ClusterName)
+ }
+ if res.Nodes == nil {
+ t.Fatalf("expected nodes; got: %v", res.Nodes)
+ }
+ if res.Nodes.Count == nil {
+ t.Fatalf("expected nodes count; got: %v", res.Nodes.Count)
+ }
+}
+
+func TestClusterStatsURLs(t *testing.T) {
+ fFlag := false
+ tFlag := true
+
+ tests := []struct {
+ Service *ClusterStatsService
+ ExpectedPath string
+ ExpectedParams url.Values
+ }{
+ {
+ Service: &ClusterStatsService{
+ nodeId: []string{},
+ },
+ ExpectedPath: "/_cluster/stats",
+ },
+ {
+ Service: &ClusterStatsService{
+ nodeId: []string{"node1"},
+ },
+ ExpectedPath: "/_cluster/stats/nodes/node1",
+ },
+ {
+ Service: &ClusterStatsService{
+ nodeId: []string{"node1", "node2"},
+ },
+ ExpectedPath: "/_cluster/stats/nodes/node1%2Cnode2",
+ },
+ {
+ Service: &ClusterStatsService{
+ nodeId: []string{},
+ flatSettings: &tFlag,
+ },
+ ExpectedPath: "/_cluster/stats",
+ ExpectedParams: url.Values{"flat_settings": []string{"true"}},
+ },
+ {
+ Service: &ClusterStatsService{
+ nodeId: []string{"node1"},
+ flatSettings: &fFlag,
+ },
+ ExpectedPath: "/_cluster/stats/nodes/node1",
+ ExpectedParams: url.Values{"flat_settings": []string{"false"}},
+ },
+ }
+
+ for _, test := range tests {
+ gotPath, gotParams, err := test.Service.buildURL()
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if gotPath != test.ExpectedPath {
+ t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath)
+ }
+ if gotParams.Encode() != test.ExpectedParams.Encode() {
+ t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/config/config.go b/vendor/github.com/olivere/elastic/config/config.go
new file mode 100644
index 000000000..a511c4157
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/config/config.go
@@ -0,0 +1,90 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package config
+
+import (
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+// Config represents an Elasticsearch configuration.
+type Config struct {
+ URL string
+ Index string
+ Username string
+ Password string
+ Shards int
+ Replicas int
+ Sniff *bool
+ Infolog string
+ Errorlog string
+ Tracelog string
+}
+
+// Parse returns the Elasticsearch configuration by extracting it
+// from the URL, its path, and its query string.
+//
+// Example:
+// http://127.0.0.1:9200/store-blobs?shards=1&replicas=0&sniff=false&tracelog=elastic.trace.log
+//
+// The code above will return a URL of http://127.0.0.1:9200, an index name
+// of store-blobs, and the related settings from the query string.
+func Parse(elasticURL string) (*Config, error) {
+ cfg := &Config{
+ Shards: 1,
+ Replicas: 0,
+ Sniff: nil,
+ }
+
+ uri, err := url.Parse(elasticURL)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing elastic parameter %q: %v", elasticURL, err)
+ }
+ index := uri.Path
+ if strings.HasPrefix(index, "/") {
+ index = index[1:]
+ }
+ if strings.HasSuffix(index, "/") {
+ index = index[:len(index)-1]
+ }
+ if index == "" {
+ return nil, fmt.Errorf("missing index in elastic parameter %q", elasticURL)
+ }
+ if uri.User != nil {
+ cfg.Username = uri.User.Username()
+ cfg.Password, _ = uri.User.Password()
+ }
+ uri.User = nil
+
+ if i, err := strconv.Atoi(uri.Query().Get("shards")); err == nil {
+ cfg.Shards = i
+ }
+ if i, err := strconv.Atoi(uri.Query().Get("replicas")); err == nil {
+ cfg.Replicas = i
+ }
+ if s := uri.Query().Get("sniff"); s != "" {
+ if b, err := strconv.ParseBool(s); err == nil {
+ cfg.Sniff = &b
+ }
+ }
+ if s := uri.Query().Get("infolog"); s != "" {
+ cfg.Infolog = s
+ }
+ if s := uri.Query().Get("errorlog"); s != "" {
+ cfg.Errorlog = s
+ }
+ if s := uri.Query().Get("tracelog"); s != "" {
+ cfg.Tracelog = s
+ }
+
+ uri.Path = ""
+ uri.RawQuery = ""
+ cfg.URL = uri.String()
+ cfg.Index = index
+
+ return cfg, nil
+}
diff --git a/vendor/github.com/olivere/elastic/config/config_test.go b/vendor/github.com/olivere/elastic/config/config_test.go
new file mode 100644
index 000000000..caa3bbadb
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/config/config_test.go
@@ -0,0 +1,45 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package config
+
+import "testing"
+
+func TestParse(t *testing.T) {
+ urls := "http://user:pwd@elastic:19220/store-blobs?shards=5&replicas=2&sniff=true&errorlog=elastic.error.log&infolog=elastic.info.log&tracelog=elastic.trace.log"
+ cfg, err := Parse(urls)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want, got := "http://elastic:19220", cfg.URL; want != got {
+ t.Fatalf("expected URL = %q, got %q", want, got)
+ }
+ if want, got := "store-blobs", cfg.Index; want != got {
+ t.Fatalf("expected Index = %q, got %q", want, got)
+ }
+ if want, got := "user", cfg.Username; want != got {
+ t.Fatalf("expected Username = %q, got %q", want, got)
+ }
+ if want, got := "pwd", cfg.Password; want != got {
+ t.Fatalf("expected Password = %q, got %q", want, got)
+ }
+ if want, got := 5, cfg.Shards; want != got {
+ t.Fatalf("expected Shards = %v, got %v", want, got)
+ }
+ if want, got := 2, cfg.Replicas; want != got {
+ t.Fatalf("expected Replicas = %v, got %v", want, got)
+ }
+ if want, got := true, *cfg.Sniff; want != got {
+ t.Fatalf("expected Sniff = %v, got %v", want, got)
+ }
+ if want, got := "elastic.error.log", cfg.Errorlog; want != got {
+ t.Fatalf("expected Errorlog = %q, got %q", want, got)
+ }
+ if want, got := "elastic.info.log", cfg.Infolog; want != got {
+ t.Fatalf("expected Infolog = %q, got %q", want, got)
+ }
+ if want, got := "elastic.trace.log", cfg.Tracelog; want != got {
+ t.Fatalf("expected Tracelog = %q, got %q", want, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/config/doc.go b/vendor/github.com/olivere/elastic/config/doc.go
new file mode 100644
index 000000000..c9acd5ff1
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/config/doc.go
@@ -0,0 +1,9 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+/*
+Package config allows parsing a configuration for Elasticsearch
+from a URL.
+*/
+package config
diff --git a/vendor/github.com/olivere/elastic/connection.go b/vendor/github.com/olivere/elastic/connection.go
new file mode 100644
index 000000000..0f27a8756
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/connection.go
@@ -0,0 +1,90 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+ "sync"
+ "time"
+)
+
+// conn represents a single connection to a node in a cluster.
+type conn struct {
+ sync.RWMutex
+ nodeID string // node ID
+ url string
+ failures int
+ dead bool
+ deadSince *time.Time
+}
+
+// newConn creates a new connection to the given URL.
+func newConn(nodeID, url string) *conn {
+ c := &conn{
+ nodeID: nodeID,
+ url: url,
+ }
+ return c
+}
+
+// String returns a representation of the connection status.
+func (c *conn) String() string {
+ c.RLock()
+ defer c.RUnlock()
+ return fmt.Sprintf("%s [dead=%v,failures=%d,deadSince=%v]", c.url, c.dead, c.failures, c.deadSince)
+}
+
+// NodeID returns the ID of the node of this connection.
+func (c *conn) NodeID() string {
+ c.RLock()
+ defer c.RUnlock()
+ return c.nodeID
+}
+
+// URL returns the URL of this connection.
+func (c *conn) URL() string {
+ c.RLock()
+ defer c.RUnlock()
+ return c.url
+}
+
+// IsDead returns true if this connection is marked as dead, i.e. a previous
+// request to the URL has been unsuccessful.
+func (c *conn) IsDead() bool {
+ c.RLock()
+ defer c.RUnlock()
+ return c.dead
+}
+
+// MarkAsDead marks this connection as dead, increments the failures
+// counter and stores the current time in dead since.
+func (c *conn) MarkAsDead() {
+ c.Lock()
+ c.dead = true
+ if c.deadSince == nil {
+ utcNow := time.Now().UTC()
+ c.deadSince = &utcNow
+ }
+ c.failures += 1
+ c.Unlock()
+}
+
+// MarkAsAlive marks this connection as eligible to be returned from the
+// pool of connections by the selector.
+func (c *conn) MarkAsAlive() {
+ c.Lock()
+ c.dead = false
+ c.Unlock()
+}
+
+// MarkAsHealthy marks this connection as healthy, i.e. a request has been
+// successfully performed with it.
+func (c *conn) MarkAsHealthy() {
+ c.Lock()
+ c.dead = false
+ c.deadSince = nil
+ c.failures = 0
+ c.Unlock()
+}
diff --git a/vendor/github.com/olivere/elastic/count.go b/vendor/github.com/olivere/elastic/count.go
new file mode 100644
index 000000000..44416fab0
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/count.go
@@ -0,0 +1,315 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// CountService is a convenient service for determining the
+// number of documents in an index. Use SearchService with
+// a SearchType of count for counting with queries etc.
+type CountService struct {
+ client *Client
+ pretty bool
+ index []string
+ typ []string
+ allowNoIndices *bool
+ analyzeWildcard *bool
+ analyzer string
+ defaultOperator string
+ df string
+ expandWildcards string
+ ignoreUnavailable *bool
+ lenient *bool
+ lowercaseExpandedTerms *bool
+ minScore interface{}
+ preference string
+ q string
+ query Query
+ routing string
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewCountService creates a new CountService.
+func NewCountService(client *Client) *CountService {
+ return &CountService{
+ client: client,
+ }
+}
+
+// Index sets the names of the indices to restrict the results.
+func (s *CountService) Index(index ...string) *CountService {
+ if s.index == nil {
+ s.index = make([]string, 0)
+ }
+ s.index = append(s.index, index...)
+ return s
+}
+
+// Type sets the types to use to restrict the results.
+func (s *CountService) Type(typ ...string) *CountService {
+ if s.typ == nil {
+ s.typ = make([]string, 0)
+ }
+ s.typ = append(s.typ, typ...)
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices. (This includes "_all" string
+// or when no indices have been specified).
+func (s *CountService) AllowNoIndices(allowNoIndices bool) *CountService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// AnalyzeWildcard specifies whether wildcard and prefix queries should be
+// analyzed (default: false).
+func (s *CountService) AnalyzeWildcard(analyzeWildcard bool) *CountService {
+ s.analyzeWildcard = &analyzeWildcard
+ return s
+}
+
+// Analyzer specifies the analyzer to use for the query string.
+func (s *CountService) Analyzer(analyzer string) *CountService {
+ s.analyzer = analyzer
+ return s
+}
+
+// DefaultOperator specifies the default operator for query string query (AND or OR).
+func (s *CountService) DefaultOperator(defaultOperator string) *CountService {
+ s.defaultOperator = defaultOperator
+ return s
+}
+
+// Df specifies the field to use as default where no field prefix is given
+// in the query string.
+func (s *CountService) Df(df string) *CountService {
+ s.df = df
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *CountService) ExpandWildcards(expandWildcards string) *CountService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *CountService) IgnoreUnavailable(ignoreUnavailable bool) *CountService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// Lenient specifies whether format-based query failures (such as
+// providing text to a numeric field) should be ignored.
+func (s *CountService) Lenient(lenient bool) *CountService {
+ s.lenient = &lenient
+ return s
+}
+
+// LowercaseExpandedTerms specifies whether query terms should be lowercased.
+func (s *CountService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *CountService {
+ s.lowercaseExpandedTerms = &lowercaseExpandedTerms
+ return s
+}
+
+// MinScore indicates to include only documents with a specific `_score`
+// value in the result.
+func (s *CountService) MinScore(minScore interface{}) *CountService {
+ s.minScore = minScore
+ return s
+}
+
+// Preference specifies the node or shard the operation should be
+// performed on (default: random).
+func (s *CountService) Preference(preference string) *CountService {
+ s.preference = preference
+ return s
+}
+
+// Q in the Lucene query string syntax. You can also use Query to pass
+// a Query struct.
+func (s *CountService) Q(q string) *CountService {
+ s.q = q
+ return s
+}
+
+// Query specifies the query to pass. You can also pass a query string with Q.
+func (s *CountService) Query(query Query) *CountService {
+ s.query = query
+ return s
+}
+
+// Routing specifies the routing value.
+func (s *CountService) Routing(routing string) *CountService {
+ s.routing = routing
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *CountService) Pretty(pretty bool) *CountService {
+ s.pretty = pretty
+ return s
+}
+
+// BodyJson specifies the query to restrict the results specified with the
+// Query DSL (optional). The interface{} will be serialized to a JSON document,
+// so use a map[string]interface{}.
+func (s *CountService) BodyJson(body interface{}) *CountService {
+ s.bodyJson = body
+ return s
+}
+
+// Body specifies a query to restrict the results specified with
+// the Query DSL (optional).
+func (s *CountService) BodyString(body string) *CountService {
+ s.bodyString = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *CountService) buildURL() (string, url.Values, error) {
+ var err error
+ var path string
+
+ if len(s.index) > 0 && len(s.typ) > 0 {
+ path, err = uritemplates.Expand("/{index}/{type}/_count", map[string]string{
+ "index": strings.Join(s.index, ","),
+ "type": strings.Join(s.typ, ","),
+ })
+ } else if len(s.index) > 0 {
+ path, err = uritemplates.Expand("/{index}/_count", map[string]string{
+ "index": strings.Join(s.index, ","),
+ })
+ } else if len(s.typ) > 0 {
+ path, err = uritemplates.Expand("/_all/{type}/_count", map[string]string{
+ "type": strings.Join(s.typ, ","),
+ })
+ } else {
+ path = "/_all/_count"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.analyzeWildcard != nil {
+ params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard))
+ }
+ if s.analyzer != "" {
+ params.Set("analyzer", s.analyzer)
+ }
+ if s.defaultOperator != "" {
+ params.Set("default_operator", s.defaultOperator)
+ }
+ if s.df != "" {
+ params.Set("df", s.df)
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.lenient != nil {
+ params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
+ }
+ if s.lowercaseExpandedTerms != nil {
+ params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms))
+ }
+ if s.minScore != nil {
+ params.Set("min_score", fmt.Sprintf("%v", s.minScore))
+ }
+ if s.preference != "" {
+ params.Set("preference", s.preference)
+ }
+ if s.q != "" {
+ params.Set("q", s.q)
+ }
+ if s.routing != "" {
+ params.Set("routing", s.routing)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *CountService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *CountService) Do(ctx context.Context) (int64, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return 0, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return 0, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.query != nil {
+ src, err := s.query.Source()
+ if err != nil {
+ return 0, err
+ }
+ query := make(map[string]interface{})
+ query["query"] = src
+ body = query
+ } else if s.bodyJson != nil {
+ body = s.bodyJson
+ } else if s.bodyString != "" {
+ body = s.bodyString
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return 0, err
+ }
+
+ // Return result
+ ret := new(CountResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return 0, err
+ }
+ if ret != nil {
+ return ret.Count, nil
+ }
+
+ return int64(0), nil
+}
+
+// CountResponse is the response of using the Count API.
+type CountResponse struct {
+ Count int64 `json:"count"`
+ Shards shardsInfo `json:"_shards,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/count_test.go b/vendor/github.com/olivere/elastic/count_test.go
new file mode 100644
index 000000000..a0ee52112
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/count_test.go
@@ -0,0 +1,127 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestCountURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Indices []string
+ Types []string
+ Expected string
+ }{
+ {
+ []string{},
+ []string{},
+ "/_all/_count",
+ },
+ {
+ []string{},
+ []string{"tweet"},
+ "/_all/tweet/_count",
+ },
+ {
+ []string{"twitter-*"},
+ []string{"tweet", "follower"},
+ "/twitter-%2A/tweet%2Cfollower/_count",
+ },
+ {
+ []string{"twitter-2014", "twitter-2015"},
+ []string{"tweet", "follower"},
+ "/twitter-2014%2Ctwitter-2015/tweet%2Cfollower/_count",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := client.Count().Index(test.Indices...).Type(test.Types...).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
+
+func TestCount(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Count documents
+ count, err := client.Count(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 3 {
+ t.Errorf("expected Count = %d; got %d", 3, count)
+ }
+
+ // Count documents
+ count, err = client.Count(testIndexName).Type("doc").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 3 {
+ t.Errorf("expected Count = %d; got %d", 3, count)
+ }
+
+ // Count documents
+ count, err = client.Count(testIndexName).Type("gezwitscher").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 0 {
+ t.Errorf("expected Count = %d; got %d", 0, count)
+ }
+
+ // Count with query
+ query := NewTermQuery("user", "olivere")
+ count, err = client.Count(testIndexName).Query(query).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 2 {
+ t.Errorf("expected Count = %d; got %d", 2, count)
+ }
+
+ // Count with query and type
+ query = NewTermQuery("user", "olivere")
+ count, err = client.Count(testIndexName).Type("doc").Query(query).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 2 {
+ t.Errorf("expected Count = %d; got %d", 2, count)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/decoder.go b/vendor/github.com/olivere/elastic/decoder.go
new file mode 100644
index 000000000..9cd2cf720
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/decoder.go
@@ -0,0 +1,26 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+)
+
+// Decoder is used to decode responses from Elasticsearch.
+// Users of elastic can implement their own marshaler for advanced purposes
+// and set them per Client (see SetDecoder). If none is specified,
+// DefaultDecoder is used.
+type Decoder interface {
+ Decode(data []byte, v interface{}) error
+}
+
+// DefaultDecoder uses json.Unmarshal from the Go standard library
+// to decode JSON data.
+type DefaultDecoder struct{}
+
+// Decode decodes with json.Unmarshal from the Go standard library.
+func (u *DefaultDecoder) Decode(data []byte, v interface{}) error {
+ return json.Unmarshal(data, v)
+}
diff --git a/vendor/github.com/olivere/elastic/decoder_test.go b/vendor/github.com/olivere/elastic/decoder_test.go
new file mode 100644
index 000000000..2c3dde8ca
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/decoder_test.go
@@ -0,0 +1,50 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "sync/atomic"
+ "testing"
+)
+
+type decoder struct {
+ dec json.Decoder
+
+ N int64
+}
+
+func (d *decoder) Decode(data []byte, v interface{}) error {
+ atomic.AddInt64(&d.N, 1)
+ dec := json.NewDecoder(bytes.NewReader(data))
+ dec.UseNumber()
+ return dec.Decode(v)
+}
+
+func TestDecoder(t *testing.T) {
+ dec := &decoder{}
+ client := setupTestClientAndCreateIndex(t, SetDecoder(dec), SetMaxRetries(0))
+
+ tweet := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+
+ // Add a document
+ indexResult, err := client.Index().
+ Index(testIndexName).
+ Type("doc").
+ Id("1").
+ BodyJson(&tweet).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if indexResult == nil {
+ t.Errorf("expected result to be != nil; got: %v", indexResult)
+ }
+ if dec.N == 0 {
+ t.Errorf("expected at least 1 call of decoder; got: %d", dec.N)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/delete.go b/vendor/github.com/olivere/elastic/delete.go
new file mode 100644
index 000000000..1e20de11f
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/delete.go
@@ -0,0 +1,226 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "net/url"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// DeleteService allows to delete a typed JSON document from a specified
+// index based on its id.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-delete.html
+// for details.
+type DeleteService struct {
+ client *Client
+ pretty bool
+ id string
+ index string
+ typ string
+ routing string
+ timeout string
+ version interface{}
+ versionType string
+ waitForActiveShards string
+ parent string
+ refresh string
+}
+
+// NewDeleteService creates a new DeleteService.
+func NewDeleteService(client *Client) *DeleteService {
+ return &DeleteService{
+ client: client,
+ }
+}
+
+// Type is the type of the document.
+func (s *DeleteService) Type(typ string) *DeleteService {
+ s.typ = typ
+ return s
+}
+
+// Id is the document ID.
+func (s *DeleteService) Id(id string) *DeleteService {
+ s.id = id
+ return s
+}
+
+// Index is the name of the index.
+func (s *DeleteService) Index(index string) *DeleteService {
+ s.index = index
+ return s
+}
+
+// Routing is a specific routing value.
+func (s *DeleteService) Routing(routing string) *DeleteService {
+ s.routing = routing
+ return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *DeleteService) Timeout(timeout string) *DeleteService {
+ s.timeout = timeout
+ return s
+}
+
+// Version is an explicit version number for concurrency control.
+func (s *DeleteService) Version(version interface{}) *DeleteService {
+ s.version = version
+ return s
+}
+
+// VersionType is a specific version type.
+func (s *DeleteService) VersionType(versionType string) *DeleteService {
+ s.versionType = versionType
+ return s
+}
+
+// WaitForActiveShards sets the number of shard copies that must be active
+// before proceeding with the delete operation. Defaults to 1, meaning the
+// primary shard only. Set to `all` for all shard copies, otherwise set to
+// any non-negative value less than or equal to the total number of copies
+// for the shard (number of replicas + 1).
+func (s *DeleteService) WaitForActiveShards(waitForActiveShards string) *DeleteService {
+ s.waitForActiveShards = waitForActiveShards
+ return s
+}
+
+// Parent is the ID of parent document.
+func (s *DeleteService) Parent(parent string) *DeleteService {
+ s.parent = parent
+ return s
+}
+
+// Refresh the index after performing the operation.
+func (s *DeleteService) Refresh(refresh string) *DeleteService {
+ s.refresh = refresh
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *DeleteService) Pretty(pretty bool) *DeleteService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *DeleteService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
+ "index": s.index,
+ "type": s.typ,
+ "id": s.id,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.refresh != "" {
+ params.Set("refresh", s.refresh)
+ }
+ if s.routing != "" {
+ params.Set("routing", s.routing)
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.version != nil {
+ params.Set("version", fmt.Sprintf("%v", s.version))
+ }
+ if s.versionType != "" {
+ params.Set("version_type", s.versionType)
+ }
+ if s.waitForActiveShards != "" {
+ params.Set("wait_for_active_shards", s.waitForActiveShards)
+ }
+ if s.parent != "" {
+ params.Set("parent", s.parent)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *DeleteService) Validate() error {
+ var invalid []string
+ if s.typ == "" {
+ invalid = append(invalid, "Type")
+ }
+ if s.id == "" {
+ invalid = append(invalid, "Id")
+ }
+ if s.index == "" {
+ invalid = append(invalid, "Index")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation. If the document is not found (404), Elasticsearch will
+// still return a response. This response is serialized and returned as well. In other
+// words, for HTTP status code 404, both an error and a response might be returned.
+func (s *DeleteService) Do(ctx context.Context) (*DeleteResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "DELETE",
+ Path: path,
+ Params: params,
+ IgnoreErrors: []int{http.StatusNotFound},
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(DeleteResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+
+ // If we have a 404, we return both a result and an error, just like ES does
+ if res.StatusCode == http.StatusNotFound {
+ return ret, &Error{Status: http.StatusNotFound}
+ }
+
+ return ret, nil
+}
+
+// -- Result of a delete request.
+
+// DeleteResponse is the outcome of running DeleteService.Do.
+type DeleteResponse struct {
+ Index string `json:"_index,omitempty"`
+ Type string `json:"_type,omitempty"`
+ Id string `json:"_id,omitempty"`
+ Version int64 `json:"_version,omitempty"`
+ Result string `json:"result,omitempty"`
+ Shards *shardsInfo `json:"_shards,omitempty"`
+ SeqNo int64 `json:"_seq_no,omitempty"`
+ PrimaryTerm int64 `json:"_primary_term,omitempty"`
+ Status int `json:"status,omitempty"`
+ ForcedRefresh bool `json:"forced_refresh,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/delete_by_query.go b/vendor/github.com/olivere/elastic/delete_by_query.go
new file mode 100644
index 000000000..694d81c2a
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/delete_by_query.go
@@ -0,0 +1,654 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// DeleteByQueryService deletes documents that match a query.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-delete-by-query.html.
+type DeleteByQueryService struct {
+ client *Client
+ index []string
+ typ []string
+ query Query
+ body interface{}
+ xSource []string
+ xSourceExclude []string
+ xSourceInclude []string
+ analyzer string
+ analyzeWildcard *bool
+ allowNoIndices *bool
+ conflicts string
+ defaultOperator string
+ df string
+ docvalueFields []string
+ expandWildcards string
+ explain *bool
+ from *int
+ ignoreUnavailable *bool
+ lenient *bool
+ lowercaseExpandedTerms *bool
+ preference string
+ q string
+ refresh string
+ requestCache *bool
+ requestsPerSecond *int
+ routing []string
+ scroll string
+ scrollSize *int
+ searchTimeout string
+ searchType string
+ size *int
+ sort []string
+ stats []string
+ storedFields []string
+ suggestField string
+ suggestMode string
+ suggestSize *int
+ suggestText string
+ terminateAfter *int
+ timeout string
+ trackScores *bool
+ version *bool
+ waitForActiveShards string
+ waitForCompletion *bool
+ pretty bool
+}
+
+// NewDeleteByQueryService creates a new DeleteByQueryService.
+// You typically use the client's DeleteByQuery to get a reference to
+// the service.
+func NewDeleteByQueryService(client *Client) *DeleteByQueryService {
+ builder := &DeleteByQueryService{
+ client: client,
+ }
+ return builder
+}
+
+// Index sets the indices on which to perform the delete operation.
+func (s *DeleteByQueryService) Index(index ...string) *DeleteByQueryService {
+ s.index = append(s.index, index...)
+ return s
+}
+
+// Type limits the delete operation to the given types.
+func (s *DeleteByQueryService) Type(typ ...string) *DeleteByQueryService {
+ s.typ = append(s.typ, typ...)
+ return s
+}
+
+// XSource is true or false to return the _source field or not,
+// or a list of fields to return.
+func (s *DeleteByQueryService) XSource(xSource ...string) *DeleteByQueryService {
+ s.xSource = append(s.xSource, xSource...)
+ return s
+}
+
+// XSourceExclude represents a list of fields to exclude from the returned _source field.
+func (s *DeleteByQueryService) XSourceExclude(xSourceExclude ...string) *DeleteByQueryService {
+ s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...)
+ return s
+}
+
+// XSourceInclude represents a list of fields to extract and return from the _source field.
+func (s *DeleteByQueryService) XSourceInclude(xSourceInclude ...string) *DeleteByQueryService {
+ s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...)
+ return s
+}
+
+// Analyzer to use for the query string.
+func (s *DeleteByQueryService) Analyzer(analyzer string) *DeleteByQueryService {
+ s.analyzer = analyzer
+ return s
+}
+
+// AnalyzeWildcard specifies whether wildcard and prefix queries should be
+// analyzed (default: false).
+func (s *DeleteByQueryService) AnalyzeWildcard(analyzeWildcard bool) *DeleteByQueryService {
+ s.analyzeWildcard = &analyzeWildcard
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices (including the _all string
+// or when no indices have been specified).
+func (s *DeleteByQueryService) AllowNoIndices(allow bool) *DeleteByQueryService {
+ s.allowNoIndices = &allow
+ return s
+}
+
+// Conflicts indicates what to do when the process detects version conflicts.
+// Possible values are "proceed" and "abort".
+func (s *DeleteByQueryService) Conflicts(conflicts string) *DeleteByQueryService {
+ s.conflicts = conflicts
+ return s
+}
+
+// AbortOnVersionConflict aborts the request on version conflicts.
+// It is an alias to setting Conflicts("abort").
+func (s *DeleteByQueryService) AbortOnVersionConflict() *DeleteByQueryService {
+ s.conflicts = "abort"
+ return s
+}
+
+// ProceedOnVersionConflict aborts the request on version conflicts.
+// It is an alias to setting Conflicts("proceed").
+func (s *DeleteByQueryService) ProceedOnVersionConflict() *DeleteByQueryService {
+ s.conflicts = "proceed"
+ return s
+}
+
+// DefaultOperator for query string query (AND or OR).
+func (s *DeleteByQueryService) DefaultOperator(defaultOperator string) *DeleteByQueryService {
+ s.defaultOperator = defaultOperator
+ return s
+}
+
+// DF is the field to use as default where no field prefix is given in the query string.
+func (s *DeleteByQueryService) DF(defaultField string) *DeleteByQueryService {
+ s.df = defaultField
+ return s
+}
+
+// DefaultField is the field to use as default where no field prefix is given in the query string.
+// It is an alias to the DF func.
+func (s *DeleteByQueryService) DefaultField(defaultField string) *DeleteByQueryService {
+ s.df = defaultField
+ return s
+}
+
+// DocvalueFields specifies the list of fields to return as the docvalue representation of a field for each hit.
+func (s *DeleteByQueryService) DocvalueFields(docvalueFields ...string) *DeleteByQueryService {
+ s.docvalueFields = docvalueFields
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both. It can be "open" or "closed".
+func (s *DeleteByQueryService) ExpandWildcards(expand string) *DeleteByQueryService {
+ s.expandWildcards = expand
+ return s
+}
+
+// Explain specifies whether to return detailed information about score
+// computation as part of a hit.
+func (s *DeleteByQueryService) Explain(explain bool) *DeleteByQueryService {
+ s.explain = &explain
+ return s
+}
+
+// From is the starting offset (default: 0).
+func (s *DeleteByQueryService) From(from int) *DeleteByQueryService {
+ s.from = &from
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *DeleteByQueryService) IgnoreUnavailable(ignore bool) *DeleteByQueryService {
+ s.ignoreUnavailable = &ignore
+ return s
+}
+
+// Lenient specifies whether format-based query failures
+// (such as providing text to a numeric field) should be ignored.
+func (s *DeleteByQueryService) Lenient(lenient bool) *DeleteByQueryService {
+ s.lenient = &lenient
+ return s
+}
+
+// LowercaseExpandedTerms specifies whether query terms should be lowercased.
+func (s *DeleteByQueryService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *DeleteByQueryService {
+ s.lowercaseExpandedTerms = &lowercaseExpandedTerms
+ return s
+}
+
+// Preference specifies the node or shard the operation should be performed on
+// (default: random).
+func (s *DeleteByQueryService) Preference(preference string) *DeleteByQueryService {
+ s.preference = preference
+ return s
+}
+
+// Q specifies the query in Lucene query string syntax. You can also use
+// Query to programmatically specify the query.
+func (s *DeleteByQueryService) Q(query string) *DeleteByQueryService {
+ s.q = query
+ return s
+}
+
+// QueryString is an alias to Q. Notice that you can also use Query to
+// programmatically set the query.
+func (s *DeleteByQueryService) QueryString(query string) *DeleteByQueryService {
+ s.q = query
+ return s
+}
+
+// Query sets the query programmatically.
+func (s *DeleteByQueryService) Query(query Query) *DeleteByQueryService {
+ s.query = query
+ return s
+}
+
+// Refresh indicates whether the effected indexes should be refreshed.
+func (s *DeleteByQueryService) Refresh(refresh string) *DeleteByQueryService {
+ s.refresh = refresh
+ return s
+}
+
+// RequestCache specifies if request cache should be used for this request
+// or not, defaults to index level setting.
+func (s *DeleteByQueryService) RequestCache(requestCache bool) *DeleteByQueryService {
+ s.requestCache = &requestCache
+ return s
+}
+
+// RequestsPerSecond sets the throttle on this request in sub-requests per second.
+// -1 means set no throttle as does "unlimited" which is the only non-float this accepts.
+func (s *DeleteByQueryService) RequestsPerSecond(requestsPerSecond int) *DeleteByQueryService {
+ s.requestsPerSecond = &requestsPerSecond
+ return s
+}
+
+// Routing is a list of specific routing values.
+func (s *DeleteByQueryService) Routing(routing ...string) *DeleteByQueryService {
+ s.routing = append(s.routing, routing...)
+ return s
+}
+
+// Scroll specifies how long a consistent view of the index should be maintained
+// for scrolled search.
+func (s *DeleteByQueryService) Scroll(scroll string) *DeleteByQueryService {
+ s.scroll = scroll
+ return s
+}
+
+// ScrollSize is the size on the scroll request powering the update_by_query.
+func (s *DeleteByQueryService) ScrollSize(scrollSize int) *DeleteByQueryService {
+ s.scrollSize = &scrollSize
+ return s
+}
+
+// SearchTimeout defines an explicit timeout for each search request.
+// Defaults to no timeout.
+func (s *DeleteByQueryService) SearchTimeout(searchTimeout string) *DeleteByQueryService {
+ s.searchTimeout = searchTimeout
+ return s
+}
+
+// SearchType is the search operation type. Possible values are
+// "query_then_fetch" and "dfs_query_then_fetch".
+func (s *DeleteByQueryService) SearchType(searchType string) *DeleteByQueryService {
+ s.searchType = searchType
+ return s
+}
+
+// Size represents the number of hits to return (default: 10).
+func (s *DeleteByQueryService) Size(size int) *DeleteByQueryService {
+ s.size = &size
+ return s
+}
+
+// Sort is a list of <field>:<direction> pairs.
+func (s *DeleteByQueryService) Sort(sort ...string) *DeleteByQueryService {
+ s.sort = append(s.sort, sort...)
+ return s
+}
+
+// SortByField adds a sort order.
+func (s *DeleteByQueryService) SortByField(field string, ascending bool) *DeleteByQueryService {
+ if ascending {
+ s.sort = append(s.sort, fmt.Sprintf("%s:asc", field))
+ } else {
+ s.sort = append(s.sort, fmt.Sprintf("%s:desc", field))
+ }
+ return s
+}
+
+// Stats specifies specific tag(s) of the request for logging and statistical purposes.
+func (s *DeleteByQueryService) Stats(stats ...string) *DeleteByQueryService {
+ s.stats = append(s.stats, stats...)
+ return s
+}
+
+// StoredFields specifies the list of stored fields to return as part of a hit.
+func (s *DeleteByQueryService) StoredFields(storedFields ...string) *DeleteByQueryService {
+ s.storedFields = storedFields
+ return s
+}
+
+// SuggestField specifies which field to use for suggestions.
+func (s *DeleteByQueryService) SuggestField(suggestField string) *DeleteByQueryService {
+ s.suggestField = suggestField
+ return s
+}
+
+// SuggestMode specifies the suggest mode. Possible values are
+// "missing", "popular", and "always".
+func (s *DeleteByQueryService) SuggestMode(suggestMode string) *DeleteByQueryService {
+ s.suggestMode = suggestMode
+ return s
+}
+
+// SuggestSize specifies how many suggestions to return in response.
+func (s *DeleteByQueryService) SuggestSize(suggestSize int) *DeleteByQueryService {
+ s.suggestSize = &suggestSize
+ return s
+}
+
+// SuggestText specifies the source text for which the suggestions should be returned.
+func (s *DeleteByQueryService) SuggestText(suggestText string) *DeleteByQueryService {
+ s.suggestText = suggestText
+ return s
+}
+
+// TerminateAfter indicates the maximum number of documents to collect
+// for each shard, upon reaching which the query execution will terminate early.
+func (s *DeleteByQueryService) TerminateAfter(terminateAfter int) *DeleteByQueryService {
+ s.terminateAfter = &terminateAfter
+ return s
+}
+
+// Timeout is the time each individual bulk request should wait for shards
+// that are unavailable.
+func (s *DeleteByQueryService) Timeout(timeout string) *DeleteByQueryService {
+ s.timeout = timeout
+ return s
+}
+
+// TimeoutInMillis sets the timeout in milliseconds.
+func (s *DeleteByQueryService) TimeoutInMillis(timeoutInMillis int) *DeleteByQueryService {
+ s.timeout = fmt.Sprintf("%dms", timeoutInMillis)
+ return s
+}
+
+// TrackScores indicates whether to calculate and return scores even if
+// they are not used for sorting.
+func (s *DeleteByQueryService) TrackScores(trackScores bool) *DeleteByQueryService {
+ s.trackScores = &trackScores
+ return s
+}
+
+// Version specifies whether to return document version as part of a hit.
+func (s *DeleteByQueryService) Version(version bool) *DeleteByQueryService {
+ s.version = &version
+ return s
+}
+
+// WaitForActiveShards sets the number of shard copies that must be active before proceeding
+// with the update by query operation. Defaults to 1, meaning the primary shard only.
+// Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal
+// to the total number of copies for the shard (number of replicas + 1).
+func (s *DeleteByQueryService) WaitForActiveShards(waitForActiveShards string) *DeleteByQueryService {
+ s.waitForActiveShards = waitForActiveShards
+ return s
+}
+
+// WaitForCompletion indicates if the request should block until the reindex is complete.
+func (s *DeleteByQueryService) WaitForCompletion(waitForCompletion bool) *DeleteByQueryService {
+ s.waitForCompletion = &waitForCompletion
+ return s
+}
+
+// Pretty indents the JSON output from Elasticsearch.
+func (s *DeleteByQueryService) Pretty(pretty bool) *DeleteByQueryService {
+ s.pretty = pretty
+ return s
+}
+
+// Body specifies the body of the request. It overrides data being specified via SearchService.
+func (s *DeleteByQueryService) Body(body string) *DeleteByQueryService {
+ s.body = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *DeleteByQueryService) buildURL() (string, url.Values, error) {
+ // Build URL
+ var err error
+ var path string
+ if len(s.typ) > 0 {
+ path, err = uritemplates.Expand("/{index}/{type}/_delete_by_query", map[string]string{
+ "index": strings.Join(s.index, ","),
+ "type": strings.Join(s.typ, ","),
+ })
+ } else {
+ path, err = uritemplates.Expand("/{index}/_delete_by_query", map[string]string{
+ "index": strings.Join(s.index, ","),
+ })
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if len(s.xSource) > 0 {
+ params.Set("_source", strings.Join(s.xSource, ","))
+ }
+ if len(s.xSourceExclude) > 0 {
+ params.Set("_source_exclude", strings.Join(s.xSourceExclude, ","))
+ }
+ if len(s.xSourceInclude) > 0 {
+ params.Set("_source_include", strings.Join(s.xSourceInclude, ","))
+ }
+ if s.analyzer != "" {
+ params.Set("analyzer", s.analyzer)
+ }
+ if s.analyzeWildcard != nil {
+ params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard))
+ }
+ if s.defaultOperator != "" {
+ params.Set("default_operator", s.defaultOperator)
+ }
+ if s.df != "" {
+ params.Set("df", s.df)
+ }
+ if s.explain != nil {
+ params.Set("explain", fmt.Sprintf("%v", *s.explain))
+ }
+ if len(s.storedFields) > 0 {
+ params.Set("stored_fields", strings.Join(s.storedFields, ","))
+ }
+ if len(s.docvalueFields) > 0 {
+ params.Set("docvalue_fields", strings.Join(s.docvalueFields, ","))
+ }
+ if s.from != nil {
+ params.Set("from", fmt.Sprintf("%d", *s.from))
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.conflicts != "" {
+ params.Set("conflicts", s.conflicts)
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.lenient != nil {
+ params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
+ }
+ if s.lowercaseExpandedTerms != nil {
+ params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms))
+ }
+ if s.preference != "" {
+ params.Set("preference", s.preference)
+ }
+ if s.q != "" {
+ params.Set("q", s.q)
+ }
+ if len(s.routing) > 0 {
+ params.Set("routing", strings.Join(s.routing, ","))
+ }
+ if s.scroll != "" {
+ params.Set("scroll", s.scroll)
+ }
+ if s.searchType != "" {
+ params.Set("search_type", s.searchType)
+ }
+ if s.searchTimeout != "" {
+ params.Set("search_timeout", s.searchTimeout)
+ }
+ if s.size != nil {
+ params.Set("size", fmt.Sprintf("%d", *s.size))
+ }
+ if len(s.sort) > 0 {
+ params.Set("sort", strings.Join(s.sort, ","))
+ }
+ if s.terminateAfter != nil {
+ params.Set("terminate_after", fmt.Sprintf("%v", *s.terminateAfter))
+ }
+ if len(s.stats) > 0 {
+ params.Set("stats", strings.Join(s.stats, ","))
+ }
+ if s.suggestField != "" {
+ params.Set("suggest_field", s.suggestField)
+ }
+ if s.suggestMode != "" {
+ params.Set("suggest_mode", s.suggestMode)
+ }
+ if s.suggestSize != nil {
+ params.Set("suggest_size", fmt.Sprintf("%v", *s.suggestSize))
+ }
+ if s.suggestText != "" {
+ params.Set("suggest_text", s.suggestText)
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.trackScores != nil {
+ params.Set("track_scores", fmt.Sprintf("%v", *s.trackScores))
+ }
+ if s.version != nil {
+ params.Set("version", fmt.Sprintf("%v", *s.version))
+ }
+ if s.requestCache != nil {
+ params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache))
+ }
+ if s.refresh != "" {
+ params.Set("refresh", s.refresh)
+ }
+ if s.waitForActiveShards != "" {
+ params.Set("wait_for_active_shards", s.waitForActiveShards)
+ }
+ if s.scrollSize != nil {
+ params.Set("scroll_size", fmt.Sprintf("%d", *s.scrollSize))
+ }
+ if s.waitForCompletion != nil {
+ params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion))
+ }
+ if s.requestsPerSecond != nil {
+ params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond))
+ }
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *DeleteByQueryService) Validate() error {
+ var invalid []string
+ if len(s.index) == 0 {
+ invalid = append(invalid, "Index")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the delete-by-query operation.
+func (s *DeleteByQueryService) Do(ctx context.Context) (*BulkIndexByScrollResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Set body if there is a query set
+ var body interface{}
+ if s.body != nil {
+ body = s.body
+ } else if s.query != nil {
+ src, err := s.query.Source()
+ if err != nil {
+ return nil, err
+ }
+ body = map[string]interface{}{
+ "query": src,
+ }
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(BulkIndexByScrollResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// BulkIndexByScrollResponse is the outcome of executing Do with
+// DeleteByQueryService and UpdateByQueryService.
+type BulkIndexByScrollResponse struct {
+ Took int64 `json:"took"`
+ SliceId *int64 `json:"slice_id,omitempty"`
+ TimedOut bool `json:"timed_out"`
+ Total int64 `json:"total"`
+ Updated int64 `json:"updated,omitempty"`
+ Created int64 `json:"created,omitempty"`
+ Deleted int64 `json:"deleted"`
+ Batches int64 `json:"batches"`
+ VersionConflicts int64 `json:"version_conflicts"`
+ Noops int64 `json:"noops"`
+ Retries struct {
+ Bulk int64 `json:"bulk"`
+ Search int64 `json:"search"`
+ } `json:"retries,omitempty"`
+ Throttled string `json:"throttled"`
+ ThrottledMillis int64 `json:"throttled_millis"`
+ RequestsPerSecond float64 `json:"requests_per_second"`
+ Canceled string `json:"canceled,omitempty"`
+ ThrottledUntil string `json:"throttled_until"`
+ ThrottledUntilMillis int64 `json:"throttled_until_millis"`
+ Failures []bulkIndexByScrollResponseFailure `json:"failures"`
+}
+
+type bulkIndexByScrollResponseFailure struct {
+ Index string `json:"index,omitempty"`
+ Type string `json:"type,omitempty"`
+ Id string `json:"id,omitempty"`
+ Status int `json:"status,omitempty"`
+ Shard int `json:"shard,omitempty"`
+ Node int `json:"node,omitempty"`
+ // TOOD "cause" contains exception details
+ // TOOD "reason" contains exception details
+}
diff --git a/vendor/github.com/olivere/elastic/delete_by_query_test.go b/vendor/github.com/olivere/elastic/delete_by_query_test.go
new file mode 100644
index 000000000..40e45b871
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/delete_by_query_test.go
@@ -0,0 +1,146 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestDeleteByQueryBuildURL(t *testing.T) {
+ client := setupTestClient(t)
+
+ tests := []struct {
+ Indices []string
+ Types []string
+ Expected string
+ ExpectErr bool
+ }{
+ {
+ []string{},
+ []string{},
+ "",
+ true,
+ },
+ {
+ []string{"index1"},
+ []string{},
+ "/index1/_delete_by_query",
+ false,
+ },
+ {
+ []string{"index1", "index2"},
+ []string{},
+ "/index1%2Cindex2/_delete_by_query",
+ false,
+ },
+ {
+ []string{},
+ []string{"type1"},
+ "",
+ true,
+ },
+ {
+ []string{"index1"},
+ []string{"type1"},
+ "/index1/type1/_delete_by_query",
+ false,
+ },
+ {
+ []string{"index1", "index2"},
+ []string{"type1", "type2"},
+ "/index1%2Cindex2/type1%2Ctype2/_delete_by_query",
+ false,
+ },
+ }
+
+ for i, test := range tests {
+ builder := client.DeleteByQuery().Index(test.Indices...).Type(test.Types...)
+ err := builder.Validate()
+ if err != nil {
+ if !test.ExpectErr {
+ t.Errorf("case #%d: %v", i+1, err)
+ continue
+ }
+ } else {
+ // err == nil
+ if test.ExpectErr {
+ t.Errorf("case #%d: expected error", i+1)
+ continue
+ }
+ path, _, _ := builder.buildURL()
+ if path != test.Expected {
+ t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
+ }
+ }
+ }
+}
+
+func TestDeleteByQuery(t *testing.T) {
+ // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Count documents
+ count, err := client.Count(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 3 {
+ t.Fatalf("expected count = %d; got: %d", 3, count)
+ }
+
+ // Delete all documents by sandrae
+ q := NewTermQuery("user", "sandrae")
+ res, err := client.DeleteByQuery().
+ Index(testIndexName).
+ Type("doc").
+ Query(q).
+ Pretty(true).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatalf("expected response != nil; got: %v", res)
+ }
+
+ // Flush and check count
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ count, err = client.Count(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 2 {
+ t.Fatalf("expected Count = %d; got: %d", 2, count)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/delete_test.go b/vendor/github.com/olivere/elastic/delete_test.go
new file mode 100644
index 000000000..571fcf589
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/delete_test.go
@@ -0,0 +1,134 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestDelete(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Count documents
+ count, err := client.Count(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 3 {
+ t.Errorf("expected Count = %d; got %d", 3, count)
+ }
+
+ // Delete document 1
+ res, err := client.Delete().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want, have := "deleted", res.Result; want != have {
+ t.Errorf("expected Result = %q; got %q", want, have)
+ }
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ count, err = client.Count(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 2 {
+ t.Errorf("expected Count = %d; got %d", 2, count)
+ }
+
+ // Delete non existent document 99
+ res, err = client.Delete().Index(testIndexName).Type("doc").Id("99").Refresh("true").Do(context.TODO())
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ if !IsNotFound(err) {
+ t.Fatalf("expected 404, got: %v", err)
+ }
+ if _, ok := err.(*Error); !ok {
+ t.Fatalf("expected error type *Error, got: %T", err)
+ }
+ if res == nil {
+ t.Fatal("expected response")
+ }
+ if have, want := res.Id, "99"; have != want {
+ t.Errorf("expected _id = %q, got %q", have, want)
+ }
+ if have, want := res.Index, testIndexName; have != want {
+ t.Errorf("expected _index = %q, got %q", have, want)
+ }
+ if have, want := res.Type, "doc"; have != want {
+ t.Errorf("expected _type = %q, got %q", have, want)
+ }
+ if have, want := res.Result, "not_found"; have != want {
+ t.Errorf("expected Result = %q, got %q", have, want)
+ }
+
+ count, err = client.Count(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 2 {
+ t.Errorf("expected Count = %d; got %d", 2, count)
+ }
+}
+
+func TestDeleteValidate(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ // No index name -> fail with error
+ res, err := NewDeleteService(client).Type("doc").Id("1").Do(context.TODO())
+ if err == nil {
+ t.Fatalf("expected Delete to fail without index name")
+ }
+ if res != nil {
+ t.Fatalf("expected result to be == nil; got: %v", res)
+ }
+
+ // No type -> fail with error
+ res, err = NewDeleteService(client).Index(testIndexName).Id("1").Do(context.TODO())
+ if err == nil {
+ t.Fatalf("expected Delete to fail without type")
+ }
+ if res != nil {
+ t.Fatalf("expected result to be == nil; got: %v", res)
+ }
+
+ // No id -> fail with error
+ res, err = NewDeleteService(client).Index(testIndexName).Type("doc").Do(context.TODO())
+ if err == nil {
+ t.Fatalf("expected Delete to fail without id")
+ }
+ if res != nil {
+ t.Fatalf("expected result to be == nil; got: %v", res)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/doc.go b/vendor/github.com/olivere/elastic/doc.go
new file mode 100644
index 000000000..ea16d6698
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/doc.go
@@ -0,0 +1,51 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+/*
+Package elastic provides an interface to the Elasticsearch server
+(https://www.elastic.co/products/elasticsearch).
+
+The first thing you do is to create a Client. If you have Elasticsearch
+installed and running with its default settings
+(i.e. available at http://127.0.0.1:9200), all you need to do is:
+
+ client, err := elastic.NewClient()
+ if err != nil {
+ // Handle error
+ }
+
+If your Elasticsearch server is running on a different IP and/or port,
+just provide a URL to NewClient:
+
+ // Create a client and connect to http://192.168.2.10:9201
+ client, err := elastic.NewClient(elastic.SetURL("http://192.168.2.10:9201"))
+ if err != nil {
+ // Handle error
+ }
+
+You can pass many more configuration parameters to NewClient. Review the
+documentation of NewClient for more information.
+
+If no Elasticsearch server is available, services will fail when creating
+a new request and will return ErrNoClient.
+
+A Client provides services. The services usually come with a variety of
+methods to prepare the query and a Do function to execute it against the
+Elasticsearch REST interface and return a response. Here is an example
+of the IndexExists service that checks if a given index already exists.
+
+ exists, err := client.IndexExists("twitter").Do(context.Background())
+ if err != nil {
+ // Handle error
+ }
+ if !exists {
+ // Index does not exist yet.
+ }
+
+Look up the documentation for Client to get an idea of the services provided
+and what kinds of responses you get when executing the Do function of a service.
+Also see the wiki on Github for more details.
+
+*/
+package elastic
diff --git a/vendor/github.com/olivere/elastic/errors.go b/vendor/github.com/olivere/elastic/errors.go
new file mode 100644
index 000000000..00a936621
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/errors.go
@@ -0,0 +1,147 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+)
+
+// checkResponse will return an error if the request/response indicates
+// an error returned from Elasticsearch.
+//
+// HTTP status codes between in the range [200..299] are considered successful.
+// All other errors are considered errors except they are specified in
+// ignoreErrors. This is necessary because for some services, HTTP status 404
+// is a valid response from Elasticsearch (e.g. the Exists service).
+//
+// The func tries to parse error details as returned from Elasticsearch
+// and encapsulates them in type elastic.Error.
+func checkResponse(req *http.Request, res *http.Response, ignoreErrors ...int) error {
+ // 200-299 are valid status codes
+ if res.StatusCode >= 200 && res.StatusCode <= 299 {
+ return nil
+ }
+ // Ignore certain errors?
+ for _, code := range ignoreErrors {
+ if code == res.StatusCode {
+ return nil
+ }
+ }
+ return createResponseError(res)
+}
+
+// createResponseError creates an Error structure from the HTTP response,
+// its status code and the error information sent by Elasticsearch.
+func createResponseError(res *http.Response) error {
+ if res.Body == nil {
+ return &Error{Status: res.StatusCode}
+ }
+ data, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return &Error{Status: res.StatusCode}
+ }
+ errReply := new(Error)
+ err = json.Unmarshal(data, errReply)
+ if err != nil {
+ return &Error{Status: res.StatusCode}
+ }
+ if errReply != nil {
+ if errReply.Status == 0 {
+ errReply.Status = res.StatusCode
+ }
+ return errReply
+ }
+ return &Error{Status: res.StatusCode}
+}
+
+// Error encapsulates error details as returned from Elasticsearch.
+type Error struct {
+ Status int `json:"status"`
+ Details *ErrorDetails `json:"error,omitempty"`
+}
+
+// ErrorDetails encapsulate error details from Elasticsearch.
+// It is used in e.g. elastic.Error and elastic.BulkResponseItem.
+type ErrorDetails struct {
+ Type string `json:"type"`
+ Reason string `json:"reason"`
+ ResourceType string `json:"resource.type,omitempty"`
+ ResourceId string `json:"resource.id,omitempty"`
+ Index string `json:"index,omitempty"`
+ Phase string `json:"phase,omitempty"`
+ Grouped bool `json:"grouped,omitempty"`
+ CausedBy map[string]interface{} `json:"caused_by,omitempty"`
+ RootCause []*ErrorDetails `json:"root_cause,omitempty"`
+ FailedShards []map[string]interface{} `json:"failed_shards,omitempty"`
+}
+
+// Error returns a string representation of the error.
+func (e *Error) Error() string {
+ if e.Details != nil && e.Details.Reason != "" {
+ return fmt.Sprintf("elastic: Error %d (%s): %s [type=%s]", e.Status, http.StatusText(e.Status), e.Details.Reason, e.Details.Type)
+ } else {
+ return fmt.Sprintf("elastic: Error %d (%s)", e.Status, http.StatusText(e.Status))
+ }
+}
+
+// IsNotFound returns true if the given error indicates that Elasticsearch
+// returned HTTP status 404. The err parameter can be of type *elastic.Error,
+// elastic.Error, *http.Response or int (indicating the HTTP status code).
+func IsNotFound(err interface{}) bool {
+ return IsStatusCode(err, http.StatusNotFound)
+}
+
+// IsTimeout returns true if the given error indicates that Elasticsearch
+// returned HTTP status 408. The err parameter can be of type *elastic.Error,
+// elastic.Error, *http.Response or int (indicating the HTTP status code).
+func IsTimeout(err interface{}) bool {
+ return IsStatusCode(err, http.StatusRequestTimeout)
+}
+
+// IsConflict returns true if the given error indicates that the Elasticsearch
+// operation resulted in a version conflict. This can occur in operations like
+// `update` or `index` with `op_type=create`. The err parameter can be of
+// type *elastic.Error, elastic.Error, *http.Response or int (indicating the
+// HTTP status code).
+func IsConflict(err interface{}) bool {
+ return IsStatusCode(err, http.StatusConflict)
+}
+
+// IsStatusCode returns true if the given error indicates that the Elasticsearch
+// operation returned the specified HTTP status code. The err parameter can be of
+// type *http.Response, *Error, Error, or int (indicating the HTTP status code).
+func IsStatusCode(err interface{}, code int) bool {
+ switch e := err.(type) {
+ case *http.Response:
+ return e.StatusCode == code
+ case *Error:
+ return e.Status == code
+ case Error:
+ return e.Status == code
+ case int:
+ return e == code
+ }
+ return false
+}
+
+// -- General errors --
+
+// shardsInfo represents information from a shard.
+type shardsInfo struct {
+ Total int `json:"total"`
+ Successful int `json:"successful"`
+ Failed int `json:"failed"`
+}
+
+// shardOperationFailure represents a shard failure.
+type shardOperationFailure struct {
+ Shard int `json:"shard"`
+ Index string `json:"index"`
+ Status string `json:"status"`
+ // "reason"
+}
diff --git a/vendor/github.com/olivere/elastic/errors_test.go b/vendor/github.com/olivere/elastic/errors_test.go
new file mode 100644
index 000000000..75d3949e5
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/errors_test.go
@@ -0,0 +1,295 @@
+package elastic
+
+import (
+ "bufio"
+ "fmt"
+ "net/http"
+ "strings"
+ "testing"
+)
+
+func TestResponseError(t *testing.T) {
+ raw := "HTTP/1.1 404 Not Found\r\n" +
+ "\r\n" +
+ `{"error":{"root_cause":[{"type":"index_missing_exception","reason":"no such index","index":"elastic-test"}],"type":"index_missing_exception","reason":"no such index","index":"elastic-test"},"status":404}` + "\r\n"
+ r := bufio.NewReader(strings.NewReader(raw))
+
+ req, err := http.NewRequest("GET", "/", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err := http.ReadResponse(r, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = checkResponse(req, resp)
+ if err == nil {
+ t.Fatalf("expected error; got: %v", err)
+ }
+
+ // Check for correct error message
+ expected := fmt.Sprintf("elastic: Error %d (%s): no such index [type=index_missing_exception]", resp.StatusCode, http.StatusText(resp.StatusCode))
+ got := err.Error()
+ if got != expected {
+ t.Fatalf("expected %q; got: %q", expected, got)
+ }
+
+ // Check that error is of type *elastic.Error, which contains additional information
+ e, ok := err.(*Error)
+ if !ok {
+ t.Fatal("expected error to be of type *elastic.Error")
+ }
+ if e.Status != resp.StatusCode {
+ t.Fatalf("expected status code %d; got: %d", resp.StatusCode, e.Status)
+ }
+ if e.Details == nil {
+ t.Fatalf("expected error details; got: %v", e.Details)
+ }
+ if got, want := e.Details.Index, "elastic-test"; got != want {
+ t.Fatalf("expected error details index %q; got: %q", want, got)
+ }
+ if got, want := e.Details.Type, "index_missing_exception"; got != want {
+ t.Fatalf("expected error details type %q; got: %q", want, got)
+ }
+ if got, want := e.Details.Reason, "no such index"; got != want {
+ t.Fatalf("expected error details reason %q; got: %q", want, got)
+ }
+ if got, want := len(e.Details.RootCause), 1; got != want {
+ t.Fatalf("expected %d error details root causes; got: %d", want, got)
+ }
+
+ if got, want := e.Details.RootCause[0].Index, "elastic-test"; got != want {
+ t.Fatalf("expected root cause index %q; got: %q", want, got)
+ }
+ if got, want := e.Details.RootCause[0].Type, "index_missing_exception"; got != want {
+ t.Fatalf("expected root cause type %q; got: %q", want, got)
+ }
+ if got, want := e.Details.RootCause[0].Reason, "no such index"; got != want {
+ t.Fatalf("expected root cause reason %q; got: %q", want, got)
+ }
+}
+
+func TestResponseErrorHTML(t *testing.T) {
+ raw := "HTTP/1.1 413 Request Entity Too Large\r\n" +
+ "\r\n" +
+ `<html>
+<head><title>413 Request Entity Too Large</title></head>
+<body bgcolor="white">
+<center><h1>413 Request Entity Too Large</h1></center>
+<hr><center>nginx/1.6.2</center>
+</body>
+</html>` + "\r\n"
+ r := bufio.NewReader(strings.NewReader(raw))
+
+ req, err := http.NewRequest("GET", "/", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err := http.ReadResponse(r, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = checkResponse(req, resp)
+ if err == nil {
+ t.Fatalf("expected error; got: %v", err)
+ }
+
+ // Check for correct error message
+ expected := fmt.Sprintf("elastic: Error %d (%s)", http.StatusRequestEntityTooLarge, http.StatusText(http.StatusRequestEntityTooLarge))
+ got := err.Error()
+ if got != expected {
+ t.Fatalf("expected %q; got: %q", expected, got)
+ }
+}
+
+func TestResponseErrorWithIgnore(t *testing.T) {
+ raw := "HTTP/1.1 404 Not Found\r\n" +
+ "\r\n" +
+ `{"some":"response"}` + "\r\n"
+ r := bufio.NewReader(strings.NewReader(raw))
+
+ req, err := http.NewRequest("HEAD", "/", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err := http.ReadResponse(r, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = checkResponse(req, resp)
+ if err == nil {
+ t.Fatalf("expected error; got: %v", err)
+ }
+ err = checkResponse(req, resp, 404) // ignore 404 errors
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+}
+
+func TestIsNotFound(t *testing.T) {
+ if got, want := IsNotFound(nil), false; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+ if got, want := IsNotFound(""), false; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+ if got, want := IsNotFound(200), false; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+ if got, want := IsNotFound(404), true; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+
+ if got, want := IsNotFound(&Error{Status: 404}), true; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+ if got, want := IsNotFound(&Error{Status: 200}), false; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+
+ if got, want := IsNotFound(Error{Status: 404}), true; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+ if got, want := IsNotFound(Error{Status: 200}), false; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+
+ if got, want := IsNotFound(&http.Response{StatusCode: 404}), true; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+ if got, want := IsNotFound(&http.Response{StatusCode: 200}), false; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+}
+
+func TestIsTimeout(t *testing.T) {
+ if got, want := IsTimeout(nil), false; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+ if got, want := IsTimeout(""), false; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+ if got, want := IsTimeout(200), false; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+ if got, want := IsTimeout(408), true; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+
+ if got, want := IsTimeout(&Error{Status: 408}), true; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+ if got, want := IsTimeout(&Error{Status: 200}), false; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+
+ if got, want := IsTimeout(Error{Status: 408}), true; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+ if got, want := IsTimeout(Error{Status: 200}), false; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+
+ if got, want := IsTimeout(&http.Response{StatusCode: 408}), true; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+ if got, want := IsTimeout(&http.Response{StatusCode: 200}), false; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+}
+
+func TestIsConflict(t *testing.T) {
+ if got, want := IsConflict(nil), false; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+ if got, want := IsConflict(""), false; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+ if got, want := IsConflict(200), false; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+ if got, want := IsConflict(http.StatusConflict), true; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+
+ if got, want := IsConflict(&Error{Status: 409}), true; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+ if got, want := IsConflict(&Error{Status: 200}), false; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+
+ if got, want := IsConflict(Error{Status: 409}), true; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+ if got, want := IsConflict(Error{Status: 200}), false; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+
+ if got, want := IsConflict(&http.Response{StatusCode: 409}), true; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+ if got, want := IsConflict(&http.Response{StatusCode: 200}), false; got != want {
+ t.Errorf("expected %v; got: %v", want, got)
+ }
+}
+
+func TestIsStatusCode(t *testing.T) {
+ tests := []struct {
+ Error interface{}
+ Code int
+ Want bool
+ }{
+ // #0
+ {
+ Error: nil,
+ Code: 200,
+ Want: false,
+ },
+ // #1
+ {
+ Error: "",
+ Code: 200,
+ Want: false,
+ },
+ // #2
+ {
+ Error: http.StatusConflict,
+ Code: 409,
+ Want: true,
+ },
+ // #3
+ {
+ Error: http.StatusConflict,
+ Code: http.StatusInternalServerError,
+ Want: false,
+ },
+ // #4
+ {
+ Error: &Error{Status: http.StatusConflict},
+ Code: 409,
+ Want: true,
+ },
+ // #5
+ {
+ Error: Error{Status: http.StatusConflict},
+ Code: 409,
+ Want: true,
+ },
+ // #6
+ {
+ Error: &http.Response{StatusCode: http.StatusConflict},
+ Code: 409,
+ Want: true,
+ },
+ }
+
+ for i, tt := range tests {
+ if have, want := IsStatusCode(tt.Error, tt.Code), tt.Want; have != want {
+ t.Errorf("#%d: have %v, want %v", i, have, want)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/etc/elasticsearch.yml b/vendor/github.com/olivere/elastic/etc/elasticsearch.yml
new file mode 100644
index 000000000..9923cfe4f
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/etc/elasticsearch.yml
@@ -0,0 +1,15 @@
+# bootstrap.ignore_system_bootstrap_checks: true
+
+discovery.zen.minimum_master_nodes: 1
+
+network.host:
+- _local_
+- _site_
+
+network.publish_host: _local_
+
+
+# Enable scripting as described here: https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html
+script.inline: true
+script.stored: true
+script.file: true
diff --git a/vendor/github.com/olivere/elastic/etc/ingest-geoip/.gitkeep b/vendor/github.com/olivere/elastic/etc/ingest-geoip/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/etc/ingest-geoip/.gitkeep
diff --git a/vendor/github.com/olivere/elastic/etc/jvm.options b/vendor/github.com/olivere/elastic/etc/jvm.options
new file mode 100644
index 000000000..d97fbc9ec
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/etc/jvm.options
@@ -0,0 +1,100 @@
+## JVM configuration
+
+################################################################
+## IMPORTANT: JVM heap size
+################################################################
+##
+## You should always set the min and max JVM heap
+## size to the same value. For example, to set
+## the heap to 4 GB, set:
+##
+## -Xms4g
+## -Xmx4g
+##
+## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html
+## for more information
+##
+################################################################
+
+# Xms represents the initial size of total heap space
+# Xmx represents the maximum size of total heap space
+
+-Xms2g
+-Xmx2g
+
+################################################################
+## Expert settings
+################################################################
+##
+## All settings below this section are considered
+## expert settings. Don't tamper with them unless
+## you understand what you are doing
+##
+################################################################
+
+## GC configuration
+-XX:+UseConcMarkSweepGC
+-XX:CMSInitiatingOccupancyFraction=75
+-XX:+UseCMSInitiatingOccupancyOnly
+
+## optimizations
+
+# disable calls to System#gc
+-XX:+DisableExplicitGC
+
+# pre-touch memory pages used by the JVM during initialization
+-XX:+AlwaysPreTouch
+
+## basic
+
+# force the server VM
+-server
+
+# set to headless, just in case
+-Djava.awt.headless=true
+
+# ensure UTF-8 encoding by default (e.g. filenames)
+-Dfile.encoding=UTF-8
+
+# use our provided JNA always versus the system one
+-Djna.nosys=true
+
+# flags to keep Netty from being unsafe
+-Dio.netty.noUnsafe=true
+-Dio.netty.noKeySetOptimization=true
+
+# log4j 2
+-Dlog4j.shutdownHookEnabled=false
+-Dlog4j2.disable.jmx=true
+-Dlog4j.skipJansi=true
+
+## heap dumps
+
+# generate a heap dump when an allocation from the Java heap fails
+# heap dumps are created in the working directory of the JVM
+-XX:+HeapDumpOnOutOfMemoryError
+
+# specify an alternative path for heap dumps
+# ensure the directory exists and has sufficient space
+#-XX:HeapDumpPath=${heap.dump.path}
+
+## GC logging
+
+#-XX:+PrintGCDetails
+#-XX:+PrintGCTimeStamps
+#-XX:+PrintGCDateStamps
+#-XX:+PrintClassHistogram
+#-XX:+PrintTenuringDistribution
+#-XX:+PrintGCApplicationStoppedTime
+
+# log GC status to a file with time stamps
+# ensure the directory exists
+#-Xloggc:${loggc}
+
+# Elasticsearch 5.0.0 will throw an exception on unquoted field names in JSON.
+# If documents were already indexed with unquoted fields in a previous version
+# of Elasticsearch, some operations may throw errors.
+#
+# WARNING: This option will be removed in Elasticsearch 6.0.0 and is provided
+# only for migration purposes.
+#-Delasticsearch.json.allow_unquoted_field_names=true
diff --git a/vendor/github.com/olivere/elastic/etc/log4j2.properties b/vendor/github.com/olivere/elastic/etc/log4j2.properties
new file mode 100644
index 000000000..9a3147f5a
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/etc/log4j2.properties
@@ -0,0 +1,74 @@
+status = error
+
+# log action execution errors for easier debugging
+logger.action.name = org.elasticsearch.action
+logger.action.level = debug
+
+appender.console.type = Console
+appender.console.name = console
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
+
+appender.rolling.type = RollingFile
+appender.rolling.name = rolling
+appender.rolling.fileName = ${sys:es.logs}.log
+appender.rolling.layout.type = PatternLayout
+appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n
+appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log
+appender.rolling.policies.type = Policies
+appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.rolling.policies.time.interval = 1
+appender.rolling.policies.time.modulate = true
+
+rootLogger.level = info
+rootLogger.appenderRef.console.ref = console
+rootLogger.appenderRef.rolling.ref = rolling
+
+appender.deprecation_rolling.type = RollingFile
+appender.deprecation_rolling.name = deprecation_rolling
+appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log
+appender.deprecation_rolling.layout.type = PatternLayout
+appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n
+appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz
+appender.deprecation_rolling.policies.type = Policies
+appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
+appender.deprecation_rolling.policies.size.size = 1GB
+appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
+appender.deprecation_rolling.strategy.max = 4
+
+logger.deprecation.name = org.elasticsearch.deprecation
+logger.deprecation.level = warn
+logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
+logger.deprecation.additivity = false
+
+appender.index_search_slowlog_rolling.type = RollingFile
+appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
+appender.index_search_slowlog_rolling.fileName = ${sys:es.logs}_index_search_slowlog.log
+appender.index_search_slowlog_rolling.layout.type = PatternLayout
+appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n
+appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log
+appender.index_search_slowlog_rolling.policies.type = Policies
+appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.index_search_slowlog_rolling.policies.time.interval = 1
+appender.index_search_slowlog_rolling.policies.time.modulate = true
+
+logger.index_search_slowlog_rolling.name = index.search.slowlog
+logger.index_search_slowlog_rolling.level = trace
+logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
+logger.index_search_slowlog_rolling.additivity = false
+
+appender.index_indexing_slowlog_rolling.type = RollingFile
+appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
+appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs}_index_indexing_slowlog.log
+appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
+appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n
+appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
+appender.index_indexing_slowlog_rolling.policies.type = Policies
+appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.index_indexing_slowlog_rolling.policies.time.interval = 1
+appender.index_indexing_slowlog_rolling.policies.time.modulate = true
+
+logger.index_indexing_slowlog.name = index.indexing.slowlog.index
+logger.index_indexing_slowlog.level = trace
+logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
+logger.index_indexing_slowlog.additivity = false
diff --git a/vendor/github.com/olivere/elastic/etc/scripts/.gitkeep b/vendor/github.com/olivere/elastic/etc/scripts/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/etc/scripts/.gitkeep
diff --git a/vendor/github.com/olivere/elastic/example_test.go b/vendor/github.com/olivere/elastic/example_test.go
new file mode 100644
index 000000000..62dc15d89
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/example_test.go
@@ -0,0 +1,530 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic_test
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "time"
+
+ elastic "github.com/olivere/elastic"
+)
+
+type Tweet struct {
+ User string `json:"user"`
+ Message string `json:"message"`
+ Retweets int `json:"retweets"`
+ Image string `json:"image,omitempty"`
+ Created time.Time `json:"created,omitempty"`
+ Tags []string `json:"tags,omitempty"`
+ Location string `json:"location,omitempty"`
+ Suggest *elastic.SuggestField `json:"suggest_field,omitempty"`
+}
+
+func Example() {
+ errorlog := log.New(os.Stdout, "APP ", log.LstdFlags)
+
+ // Obtain a client. You can also provide your own HTTP client here.
+ client, err := elastic.NewClient(elastic.SetErrorLog(errorlog))
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+
+ // Trace request and response details like this
+ //client.SetTracer(log.New(os.Stdout, "", 0))
+
+ // Ping the Elasticsearch server to get e.g. the version number
+ info, code, err := client.Ping("http://127.0.0.1:9200").Do(context.Background())
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ fmt.Printf("Elasticsearch returned with code %d and version %s\n", code, info.Version.Number)
+
+ // Getting the ES version number is quite common, so there's a shortcut
+ esversion, err := client.ElasticsearchVersion("http://127.0.0.1:9200")
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ fmt.Printf("Elasticsearch version %s\n", esversion)
+
+ // Use the IndexExists service to check if a specified index exists.
+ exists, err := client.IndexExists("twitter").Do(context.Background())
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ if !exists {
+ // Create a new index.
+ mapping := `
+{
+ "settings":{
+ "number_of_shards":1,
+ "number_of_replicas":0
+ },
+ "mappings":{
+ "doc":{
+ "properties":{
+ "user":{
+ "type":"keyword"
+ },
+ "message":{
+ "type":"text",
+ "store": true,
+ "fielddata": true
+ },
+ "retweets":{
+ "type":"long"
+ },
+ "tags":{
+ "type":"keyword"
+ },
+ "location":{
+ "type":"geo_point"
+ },
+ "suggest_field":{
+ "type":"completion"
+ }
+ }
+ }
+ }
+}
+`
+ createIndex, err := client.CreateIndex("twitter").Body(mapping).Do(context.Background())
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ if !createIndex.Acknowledged {
+ // Not acknowledged
+ }
+ }
+
+ // Index a tweet (using JSON serialization)
+ tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0}
+ put1, err := client.Index().
+ Index("twitter").
+ Type("doc").
+ Id("1").
+ BodyJson(tweet1).
+ Do(context.Background())
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ fmt.Printf("Indexed tweet %s to index %s, type %s\n", put1.Id, put1.Index, put1.Type)
+
+ // Index a second tweet (by string)
+ tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}`
+ put2, err := client.Index().
+ Index("twitter").
+ Type("doc").
+ Id("2").
+ BodyString(tweet2).
+ Do(context.Background())
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ fmt.Printf("Indexed tweet %s to index %s, type %s\n", put2.Id, put2.Index, put2.Type)
+
+ // Get tweet with specified ID
+ get1, err := client.Get().
+ Index("twitter").
+ Type("doc").
+ Id("1").
+ Do(context.Background())
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ if get1.Found {
+ fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type)
+ }
+
+ // Flush to make sure the documents got written.
+ _, err = client.Flush().Index("twitter").Do(context.Background())
+ if err != nil {
+ panic(err)
+ }
+
+ // Search with a term query
+ termQuery := elastic.NewTermQuery("user", "olivere")
+ searchResult, err := client.Search().
+ Index("twitter"). // search in index "twitter"
+ Query(termQuery). // specify the query
+ Sort("user", true). // sort by "user" field, ascending
+ From(0).Size(10). // take documents 0-9
+ Pretty(true). // pretty print request and response JSON
+ Do(context.Background()) // execute
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+
+ // searchResult is of type SearchResult and returns hits, suggestions,
+ // and all kinds of other information from Elasticsearch.
+ fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+
+ // Each is a convenience function that iterates over hits in a search result.
+ // It makes sure you don't need to check for nil values in the response.
+ // However, it ignores errors in serialization. If you want full control
+ // over iterating the hits, see below.
+ var ttyp Tweet
+ for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) {
+ t := item.(Tweet)
+ fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+ }
+ // TotalHits is another convenience function that works even when something goes wrong.
+ fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits())
+
+ // Here's how you iterate through results with full control over each step.
+ if searchResult.Hits.TotalHits > 0 {
+ fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
+
+ // Iterate through results
+ for _, hit := range searchResult.Hits.Hits {
+ // hit.Index contains the name of the index
+
+ // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
+ var t Tweet
+ err := json.Unmarshal(*hit.Source, &t)
+ if err != nil {
+ // Deserialization failed
+ }
+
+ // Work with tweet
+ fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+ }
+ } else {
+ // No hits
+ fmt.Print("Found no tweets\n")
+ }
+
+ // Update a tweet by the update API of Elasticsearch.
+ // We just increment the number of retweets.
+ script := elastic.NewScript("ctx._source.retweets += params.num").Param("num", 1)
+ update, err := client.Update().Index("twitter").Type("doc").Id("1").
+ Script(script).
+ Upsert(map[string]interface{}{"retweets": 0}).
+ Do(context.Background())
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ fmt.Printf("New version of tweet %q is now %d", update.Id, update.Version)
+
+ // ...
+
+ // Delete an index.
+ deleteIndex, err := client.DeleteIndex("twitter").Do(context.Background())
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ if !deleteIndex.Acknowledged {
+ // Not acknowledged
+ }
+}
+
+func ExampleClient_NewClient_default() {
+ // Obtain a client to the Elasticsearch instance on http://127.0.0.1:9200.
+ client, err := elastic.NewClient()
+ if err != nil {
+ // Handle error
+ fmt.Printf("connection failed: %v\n", err)
+ } else {
+ fmt.Println("connected")
+ }
+ _ = client
+ // Output:
+ // connected
+}
+
+func ExampleClient_NewClient_cluster() {
+ // Obtain a client for an Elasticsearch cluster of two nodes,
+ // running on 10.0.1.1 and 10.0.1.2.
+ client, err := elastic.NewClient(elastic.SetURL("http://10.0.1.1:9200", "http://10.0.1.2:9200"))
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ _ = client
+}
+
+func ExampleClient_NewClient_manyOptions() {
+ // Obtain a client for an Elasticsearch cluster of two nodes,
+ // running on 10.0.1.1 and 10.0.1.2. Do not run the sniffer.
+ // Set the healthcheck interval to 10s. When requests fail,
+ // retry 5 times. Print error messages to os.Stderr and informational
+ // messages to os.Stdout.
+ client, err := elastic.NewClient(
+ elastic.SetURL("http://10.0.1.1:9200", "http://10.0.1.2:9200"),
+ elastic.SetSniff(false),
+ elastic.SetHealthcheckInterval(10*time.Second),
+ elastic.SetMaxRetries(5),
+ elastic.SetErrorLog(log.New(os.Stderr, "ELASTIC ", log.LstdFlags)),
+ elastic.SetInfoLog(log.New(os.Stdout, "", log.LstdFlags)))
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ _ = client
+}
+
+func ExampleIndexExistsService() {
+ // Get a client to the local Elasticsearch instance.
+ client, err := elastic.NewClient()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ // Use the IndexExists service to check if the index "twitter" exists.
+ exists, err := client.IndexExists("twitter").Do(context.Background())
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ if exists {
+ // ...
+ }
+}
+
+func ExampleCreateIndexService() {
+ // Get a client to the local Elasticsearch instance.
+ client, err := elastic.NewClient()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ // Create a new index.
+ createIndex, err := client.CreateIndex("twitter").Do(context.Background())
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ if !createIndex.Acknowledged {
+ // Not acknowledged
+ }
+}
+
+func ExampleDeleteIndexService() {
+ // Get a client to the local Elasticsearch instance.
+ client, err := elastic.NewClient()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ // Delete an index.
+ deleteIndex, err := client.DeleteIndex("twitter").Do(context.Background())
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ if !deleteIndex.Acknowledged {
+ // Not acknowledged
+ }
+}
+
+func ExampleSearchService() {
+ // Get a client to the local Elasticsearch instance.
+ client, err := elastic.NewClient()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+
+ // Search with a term query
+ termQuery := elastic.NewTermQuery("user", "olivere")
+ searchResult, err := client.Search().
+ Index("twitter"). // search in index "twitter"
+ Query(termQuery). // specify the query
+ Sort("user", true). // sort by "user" field, ascending
+ From(0).Size(10). // take documents 0-9
+ Pretty(true). // pretty print request and response JSON
+ Do(context.Background()) // execute
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+
+ // searchResult is of type SearchResult and returns hits, suggestions,
+ // and all kinds of other information from Elasticsearch.
+ fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+
+ // Number of hits
+ if searchResult.Hits.TotalHits > 0 {
+ fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
+
+ // Iterate through results
+ for _, hit := range searchResult.Hits.Hits {
+ // hit.Index contains the name of the index
+
+ // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
+ var t Tweet
+ err := json.Unmarshal(*hit.Source, &t)
+ if err != nil {
+ // Deserialization failed
+ }
+
+ // Work with tweet
+ fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+ }
+ } else {
+ // No hits
+ fmt.Print("Found no tweets\n")
+ }
+}
+
+func ExampleAggregations() {
+ // Get a client to the local Elasticsearch instance.
+ client, err := elastic.NewClient()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+
+ // Create an aggregation for users and a sub-aggregation for a date histogram of tweets (per year).
+ timeline := elastic.NewTermsAggregation().Field("user").Size(10).OrderByCountDesc()
+ histogram := elastic.NewDateHistogramAggregation().Field("created").Interval("year")
+ timeline = timeline.SubAggregation("history", histogram)
+
+ // Search with a term query
+ searchResult, err := client.Search().
+ Index("twitter"). // search in index "twitter"
+ Query(elastic.NewMatchAllQuery()). // return all results, but ...
+ SearchType("count"). // ... do not return hits, just the count
+ Aggregation("timeline", timeline). // add our aggregation to the query
+ Pretty(true). // pretty print request and response JSON
+ Do(context.Background()) // execute
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+
+ // Access "timeline" aggregate in search result.
+ agg, found := searchResult.Aggregations.Terms("timeline")
+ if !found {
+ log.Fatalf("we should have a terms aggregation called %q", "timeline")
+ }
+ for _, userBucket := range agg.Buckets {
+ // Every bucket should have the user field as key.
+ user := userBucket.Key
+
+ // The sub-aggregation history should have the number of tweets per year.
+ histogram, found := userBucket.DateHistogram("history")
+ if found {
+ for _, year := range histogram.Buckets {
+ fmt.Printf("user %q has %d tweets in %q\n", user, year.DocCount, year.KeyAsString)
+ }
+ }
+ }
+}
+
+func ExampleSearchResult() {
+ client, err := elastic.NewClient()
+ if err != nil {
+ panic(err)
+ }
+
+ // Do a search
+ searchResult, err := client.Search().Index("twitter").Query(elastic.NewMatchAllQuery()).Do(context.Background())
+ if err != nil {
+ panic(err)
+ }
+
+ // searchResult is of type SearchResult and returns hits, suggestions,
+ // and all kinds of other information from Elasticsearch.
+ fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+
+ // Each is a utility function that iterates over hits in a search result.
+ // It makes sure you don't need to check for nil values in the response.
+ // However, it ignores errors in serialization. If you want full control
+ // over iterating the hits, see below.
+ var ttyp Tweet
+ for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) {
+ t := item.(Tweet)
+ fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+ }
+ fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits())
+
+ // Here's how you iterate hits with full control.
+ if searchResult.Hits.TotalHits > 0 {
+ fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
+
+ // Iterate through results
+ for _, hit := range searchResult.Hits.Hits {
+ // hit.Index contains the name of the index
+
+ // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
+ var t Tweet
+ err := json.Unmarshal(*hit.Source, &t)
+ if err != nil {
+ // Deserialization failed
+ }
+
+ // Work with tweet
+ fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+ }
+ } else {
+ // No hits
+ fmt.Print("Found no tweets\n")
+ }
+}
+
+func ExampleClusterHealthService() {
+ client, err := elastic.NewClient()
+ if err != nil {
+ panic(err)
+ }
+
+ // Get cluster health
+ res, err := client.ClusterHealth().Index("twitter").Do(context.Background())
+ if err != nil {
+ panic(err)
+ }
+ if res == nil {
+ panic(err)
+ }
+ fmt.Printf("Cluster status is %q\n", res.Status)
+}
+
+func ExampleClusterHealthService_WaitForGreen() {
+ client, err := elastic.NewClient()
+ if err != nil {
+ panic(err)
+ }
+
+ // Wait for status green
+ res, err := client.ClusterHealth().WaitForStatus("green").Timeout("15s").Do(context.Background())
+ if err != nil {
+ panic(err)
+ }
+ if res.TimedOut {
+ fmt.Printf("time out waiting for cluster status %q\n", "green")
+ } else {
+ fmt.Printf("cluster status is %q\n", res.Status)
+ }
+}
+
+func ExampleClusterStateService() {
+ client, err := elastic.NewClient()
+ if err != nil {
+ panic(err)
+ }
+
+ // Get cluster state
+ res, err := client.ClusterState().Metric("version").Do(context.Background())
+ if err != nil {
+ panic(err)
+ }
+ fmt.Printf("Cluster %q has version %d", res.ClusterName, res.Version)
+}
diff --git a/vendor/github.com/olivere/elastic/exists.go b/vendor/github.com/olivere/elastic/exists.go
new file mode 100644
index 000000000..ae5a88fa7
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/exists.go
@@ -0,0 +1,181 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "net/url"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// ExistsService checks for the existence of a document using HEAD.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-get.html
+// for details.
+type ExistsService struct {
+ client *Client
+ pretty bool
+ id string
+ index string
+ typ string
+ preference string
+ realtime *bool
+ refresh string
+ routing string
+ parent string
+}
+
+// NewExistsService creates a new ExistsService.
+func NewExistsService(client *Client) *ExistsService {
+ return &ExistsService{
+ client: client,
+ }
+}
+
+// Id is the document ID.
+func (s *ExistsService) Id(id string) *ExistsService {
+ s.id = id
+ return s
+}
+
+// Index is the name of the index.
+func (s *ExistsService) Index(index string) *ExistsService {
+ s.index = index
+ return s
+}
+
+// Type is the type of the document (use `_all` to fetch the first document
+// matching the ID across all types).
+func (s *ExistsService) Type(typ string) *ExistsService {
+ s.typ = typ
+ return s
+}
+
+// Preference specifies the node or shard the operation should be performed on (default: random).
+func (s *ExistsService) Preference(preference string) *ExistsService {
+ s.preference = preference
+ return s
+}
+
+// Realtime specifies whether to perform the operation in realtime or search mode.
+func (s *ExistsService) Realtime(realtime bool) *ExistsService {
+ s.realtime = &realtime
+ return s
+}
+
+// Refresh the shard containing the document before performing the operation.
+func (s *ExistsService) Refresh(refresh string) *ExistsService {
+ s.refresh = refresh
+ return s
+}
+
+// Routing is a specific routing value.
+func (s *ExistsService) Routing(routing string) *ExistsService {
+ s.routing = routing
+ return s
+}
+
+// Parent is the ID of the parent document.
+func (s *ExistsService) Parent(parent string) *ExistsService {
+ s.parent = parent
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *ExistsService) Pretty(pretty bool) *ExistsService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ExistsService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
+ "id": s.id,
+ "index": s.index,
+ "type": s.typ,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.realtime != nil {
+ params.Set("realtime", fmt.Sprintf("%v", *s.realtime))
+ }
+ if s.refresh != "" {
+ params.Set("refresh", s.refresh)
+ }
+ if s.routing != "" {
+ params.Set("routing", s.routing)
+ }
+ if s.parent != "" {
+ params.Set("parent", s.parent)
+ }
+ if s.preference != "" {
+ params.Set("preference", s.preference)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ExistsService) Validate() error {
+ var invalid []string
+ if s.id == "" {
+ invalid = append(invalid, "Id")
+ }
+ if s.index == "" {
+ invalid = append(invalid, "Index")
+ }
+ if s.typ == "" {
+ invalid = append(invalid, "Type")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *ExistsService) Do(ctx context.Context) (bool, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return false, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return false, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "HEAD",
+ Path: path,
+ Params: params,
+ IgnoreErrors: []int{404},
+ })
+ if err != nil {
+ return false, err
+ }
+
+ // Return operation response
+ switch res.StatusCode {
+ case http.StatusOK:
+ return true, nil
+ case http.StatusNotFound:
+ return false, nil
+ default:
+ return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/exists_test.go b/vendor/github.com/olivere/elastic/exists_test.go
new file mode 100644
index 000000000..9b834223d
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/exists_test.go
@@ -0,0 +1,53 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestExists(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ exists, err := client.Exists().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !exists {
+ t.Fatal("expected document to exist")
+ }
+}
+
+func TestExistsValidate(t *testing.T) {
+ client := setupTestClient(t)
+
+ // No index -> fail with error
+ res, err := NewExistsService(client).Type("doc").Id("1").Do(context.TODO())
+ if err == nil {
+ t.Fatalf("expected Delete to fail without index name")
+ }
+ if res != false {
+ t.Fatalf("expected result to be false; got: %v", res)
+ }
+
+ // No type -> fail with error
+ res, err = NewExistsService(client).Index(testIndexName).Id("1").Do(context.TODO())
+ if err == nil {
+ t.Fatalf("expected Delete to fail without index name")
+ }
+ if res != false {
+ t.Fatalf("expected result to be false; got: %v", res)
+ }
+
+ // No id -> fail with error
+ res, err = NewExistsService(client).Index(testIndexName).Type("doc").Do(context.TODO())
+ if err == nil {
+ t.Fatalf("expected Delete to fail without index name")
+ }
+ if res != false {
+ t.Fatalf("expected result to be false; got: %v", res)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/explain.go b/vendor/github.com/olivere/elastic/explain.go
new file mode 100644
index 000000000..2b975ad5d
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/explain.go
@@ -0,0 +1,326 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// ExplainService computes a score explanation for a query and
+// a specific document.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-explain.html.
+type ExplainService struct {
+ client *Client
+ pretty bool
+ id string
+ index string
+ typ string
+ q string
+ routing string
+ lenient *bool
+ analyzer string
+ df string
+ fields []string
+ lowercaseExpandedTerms *bool
+ xSourceInclude []string
+ analyzeWildcard *bool
+ parent string
+ preference string
+ xSource []string
+ defaultOperator string
+ xSourceExclude []string
+ source string
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewExplainService creates a new ExplainService.
+func NewExplainService(client *Client) *ExplainService {
+ return &ExplainService{
+ client: client,
+ xSource: make([]string, 0),
+ xSourceExclude: make([]string, 0),
+ fields: make([]string, 0),
+ xSourceInclude: make([]string, 0),
+ }
+}
+
+// Id is the document ID.
+func (s *ExplainService) Id(id string) *ExplainService {
+ s.id = id
+ return s
+}
+
+// Index is the name of the index.
+func (s *ExplainService) Index(index string) *ExplainService {
+ s.index = index
+ return s
+}
+
+// Type is the type of the document.
+func (s *ExplainService) Type(typ string) *ExplainService {
+ s.typ = typ
+ return s
+}
+
+// Source is the URL-encoded query definition (instead of using the request body).
+func (s *ExplainService) Source(source string) *ExplainService {
+ s.source = source
+ return s
+}
+
+// XSourceExclude is a list of fields to exclude from the returned _source field.
+func (s *ExplainService) XSourceExclude(xSourceExclude ...string) *ExplainService {
+ s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...)
+ return s
+}
+
+// Lenient specifies whether format-based query failures
+// (such as providing text to a numeric field) should be ignored.
+func (s *ExplainService) Lenient(lenient bool) *ExplainService {
+ s.lenient = &lenient
+ return s
+}
+
+// Query in the Lucene query string syntax.
+func (s *ExplainService) Q(q string) *ExplainService {
+ s.q = q
+ return s
+}
+
+// Routing sets a specific routing value.
+func (s *ExplainService) Routing(routing string) *ExplainService {
+ s.routing = routing
+ return s
+}
+
+// AnalyzeWildcard specifies whether wildcards and prefix queries
+// in the query string query should be analyzed (default: false).
+func (s *ExplainService) AnalyzeWildcard(analyzeWildcard bool) *ExplainService {
+ s.analyzeWildcard = &analyzeWildcard
+ return s
+}
+
+// Analyzer is the analyzer for the query string query.
+func (s *ExplainService) Analyzer(analyzer string) *ExplainService {
+ s.analyzer = analyzer
+ return s
+}
+
+// Df is the default field for query string query (default: _all).
+func (s *ExplainService) Df(df string) *ExplainService {
+ s.df = df
+ return s
+}
+
+// Fields is a list of fields to return in the response.
+func (s *ExplainService) Fields(fields ...string) *ExplainService {
+ s.fields = append(s.fields, fields...)
+ return s
+}
+
+// LowercaseExpandedTerms specifies whether query terms should be lowercased.
+func (s *ExplainService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *ExplainService {
+ s.lowercaseExpandedTerms = &lowercaseExpandedTerms
+ return s
+}
+
+// XSourceInclude is a list of fields to extract and return from the _source field.
+func (s *ExplainService) XSourceInclude(xSourceInclude ...string) *ExplainService {
+ s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...)
+ return s
+}
+
+// DefaultOperator is the default operator for query string query (AND or OR).
+func (s *ExplainService) DefaultOperator(defaultOperator string) *ExplainService {
+ s.defaultOperator = defaultOperator
+ return s
+}
+
+// Parent is the ID of the parent document.
+func (s *ExplainService) Parent(parent string) *ExplainService {
+ s.parent = parent
+ return s
+}
+
+// Preference specifies the node or shard the operation should be performed on (default: random).
+func (s *ExplainService) Preference(preference string) *ExplainService {
+ s.preference = preference
+ return s
+}
+
+// XSource is true or false to return the _source field or not, or a list of fields to return.
+func (s *ExplainService) XSource(xSource ...string) *ExplainService {
+ s.xSource = append(s.xSource, xSource...)
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *ExplainService) Pretty(pretty bool) *ExplainService {
+ s.pretty = pretty
+ return s
+}
+
+// Query sets a query definition using the Query DSL.
+func (s *ExplainService) Query(query Query) *ExplainService {
+ src, err := query.Source()
+ if err != nil {
+ // Do nothing in case of an error
+ return s
+ }
+ body := make(map[string]interface{})
+ body["query"] = src
+ s.bodyJson = body
+ return s
+}
+
+// BodyJson sets the query definition using the Query DSL.
+func (s *ExplainService) BodyJson(body interface{}) *ExplainService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString sets the query definition using the Query DSL as a string.
+func (s *ExplainService) BodyString(body string) *ExplainService {
+ s.bodyString = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ExplainService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/{type}/{id}/_explain", map[string]string{
+ "id": s.id,
+ "index": s.index,
+ "type": s.typ,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if len(s.xSource) > 0 {
+ params.Set("_source", strings.Join(s.xSource, ","))
+ }
+ if s.defaultOperator != "" {
+ params.Set("default_operator", s.defaultOperator)
+ }
+ if s.parent != "" {
+ params.Set("parent", s.parent)
+ }
+ if s.preference != "" {
+ params.Set("preference", s.preference)
+ }
+ if s.source != "" {
+ params.Set("source", s.source)
+ }
+ if len(s.xSourceExclude) > 0 {
+ params.Set("_source_exclude", strings.Join(s.xSourceExclude, ","))
+ }
+ if s.lenient != nil {
+ params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
+ }
+ if s.q != "" {
+ params.Set("q", s.q)
+ }
+ if s.routing != "" {
+ params.Set("routing", s.routing)
+ }
+ if len(s.fields) > 0 {
+ params.Set("fields", strings.Join(s.fields, ","))
+ }
+ if s.lowercaseExpandedTerms != nil {
+ params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms))
+ }
+ if len(s.xSourceInclude) > 0 {
+ params.Set("_source_include", strings.Join(s.xSourceInclude, ","))
+ }
+ if s.analyzeWildcard != nil {
+ params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard))
+ }
+ if s.analyzer != "" {
+ params.Set("analyzer", s.analyzer)
+ }
+ if s.df != "" {
+ params.Set("df", s.df)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ExplainService) Validate() error {
+ var invalid []string
+ if s.index == "" {
+ invalid = append(invalid, "Index")
+ }
+ if s.typ == "" {
+ invalid = append(invalid, "Type")
+ }
+ if s.id == "" {
+ invalid = append(invalid, "Id")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *ExplainService) Do(ctx context.Context) (*ExplainResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else {
+ body = s.bodyString
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(ExplainResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// ExplainResponse is the response of ExplainService.Do.
+type ExplainResponse struct {
+ Index string `json:"_index"`
+ Type string `json:"_type"`
+ Id string `json:"_id"`
+ Matched bool `json:"matched"`
+ Explanation map[string]interface{} `json:"explanation"`
+}
diff --git a/vendor/github.com/olivere/elastic/explain_test.go b/vendor/github.com/olivere/elastic/explain_test.go
new file mode 100644
index 000000000..22cb9668a
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/explain_test.go
@@ -0,0 +1,44 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestExplain(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+
+ // Add a document
+ indexResult, err := client.Index().
+ Index(testIndexName).
+ Type("doc").
+ Id("1").
+ BodyJson(&tweet1).
+ Refresh("true").
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if indexResult == nil {
+ t.Errorf("expected result to be != nil; got: %v", indexResult)
+ }
+
+ // Explain
+ query := NewTermQuery("user", "olivere")
+ expl, err := client.Explain(testIndexName, "doc", "1").Query(query).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if expl == nil {
+ t.Fatal("expected to return an explanation")
+ }
+ if !expl.Matched {
+ t.Errorf("expected matched to be %v; got: %v", true, expl.Matched)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/fetch_source_context.go b/vendor/github.com/olivere/elastic/fetch_source_context.go
new file mode 100644
index 000000000..874c4c1da
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/fetch_source_context.go
@@ -0,0 +1,90 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "net/url"
+ "strings"
+)
+
+// FetchSourceContext enables source filtering, i.e. it allows control
+// over how the _source field is returned with every hit. It is used
+// with various endpoints, e.g. when searching for documents, retrieving
+// individual documents, or even updating documents.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-source-filtering.html
+// for details.
+type FetchSourceContext struct {
+ fetchSource bool
+ includes []string
+ excludes []string
+}
+
+// NewFetchSourceContext returns a new FetchSourceContext.
+func NewFetchSourceContext(fetchSource bool) *FetchSourceContext {
+ return &FetchSourceContext{
+ fetchSource: fetchSource,
+ includes: make([]string, 0),
+ excludes: make([]string, 0),
+ }
+}
+
+// FetchSource indicates whether to return the _source.
+func (fsc *FetchSourceContext) FetchSource() bool {
+ return fsc.fetchSource
+}
+
+// SetFetchSource specifies whether to return the _source.
+func (fsc *FetchSourceContext) SetFetchSource(fetchSource bool) {
+ fsc.fetchSource = fetchSource
+}
+
+// Include indicates to return specific parts of the _source.
+// Wildcards are allowed here.
+func (fsc *FetchSourceContext) Include(includes ...string) *FetchSourceContext {
+ fsc.includes = append(fsc.includes, includes...)
+ return fsc
+}
+
+// Exclude indicates to exclude specific parts of the _source.
+// Wildcards are allowed here.
+func (fsc *FetchSourceContext) Exclude(excludes ...string) *FetchSourceContext {
+ fsc.excludes = append(fsc.excludes, excludes...)
+ return fsc
+}
+
+// Source returns the JSON-serializable data to be used in a body.
+func (fsc *FetchSourceContext) Source() (interface{}, error) {
+ if !fsc.fetchSource {
+ return false, nil
+ }
+ if len(fsc.includes) == 0 && len(fsc.excludes) == 0 {
+ return true, nil
+ }
+ src := make(map[string]interface{})
+ if len(fsc.includes) > 0 {
+ src["includes"] = fsc.includes
+ }
+ if len(fsc.excludes) > 0 {
+ src["excludes"] = fsc.excludes
+ }
+ return src, nil
+}
+
+// Query returns the parameters in a form suitable for a URL query string.
+func (fsc *FetchSourceContext) Query() url.Values {
+ params := url.Values{}
+ if fsc.fetchSource {
+ if len(fsc.includes) > 0 {
+ params.Add("_source_include", strings.Join(fsc.includes, ","))
+ }
+ if len(fsc.excludes) > 0 {
+ params.Add("_source_exclude", strings.Join(fsc.excludes, ","))
+ }
+ } else {
+ params.Add("_source", "false")
+ }
+ return params
+}
diff --git a/vendor/github.com/olivere/elastic/fetch_source_context_test.go b/vendor/github.com/olivere/elastic/fetch_source_context_test.go
new file mode 100644
index 000000000..b98549036
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/fetch_source_context_test.go
@@ -0,0 +1,125 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestFetchSourceContextNoFetchSource(t *testing.T) {
+ builder := NewFetchSourceContext(false)
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `false`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFetchSourceContextNoFetchSourceIgnoreIncludesAndExcludes(t *testing.T) {
+ builder := NewFetchSourceContext(false).Include("a", "b").Exclude("c")
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `false`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFetchSourceContextFetchSource(t *testing.T) {
+ builder := NewFetchSourceContext(true)
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `true`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFetchSourceContextFetchSourceWithIncludesOnly(t *testing.T) {
+ builder := NewFetchSourceContext(true).Include("a", "b")
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"includes":["a","b"]}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFetchSourceContextFetchSourceWithIncludesAndExcludes(t *testing.T) {
+ builder := NewFetchSourceContext(true).Include("a", "b").Exclude("c")
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"excludes":["c"],"includes":["a","b"]}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFetchSourceContextQueryDefaults(t *testing.T) {
+ builder := NewFetchSourceContext(true)
+ values := builder.Query()
+ got := values.Encode()
+ expected := ""
+ if got != expected {
+ t.Errorf("expected %q; got: %q", expected, got)
+ }
+}
+
+func TestFetchSourceContextQueryNoFetchSource(t *testing.T) {
+ builder := NewFetchSourceContext(false)
+ values := builder.Query()
+ got := values.Encode()
+ expected := "_source=false"
+ if got != expected {
+ t.Errorf("expected %q; got: %q", expected, got)
+ }
+}
+
+func TestFetchSourceContextQueryFetchSourceWithIncludesAndExcludes(t *testing.T) {
+ builder := NewFetchSourceContext(true).Include("a", "b").Exclude("c")
+ values := builder.Query()
+ got := values.Encode()
+ expected := "_source_exclude=c&_source_include=a%2Cb"
+ if got != expected {
+ t.Errorf("expected %q; got: %q", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/field_caps.go b/vendor/github.com/olivere/elastic/field_caps.go
new file mode 100644
index 000000000..393cd3ce8
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/field_caps.go
@@ -0,0 +1,202 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// FieldCapsService allows retrieving the capabilities of fields among multiple indices.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.1/search-field-caps.html
+// for details
+type FieldCapsService struct {
+ client *Client
+ pretty bool
+ index []string
+ allowNoIndices *bool
+ expandWildcards string
+ fields []string
+ ignoreUnavailable *bool
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewFieldCapsService creates a new FieldCapsService
+func NewFieldCapsService(client *Client) *FieldCapsService {
+ return &FieldCapsService{
+ client: client,
+ }
+}
+
+// Index is a list of index names; use `_all` or empty string to perform
+// the operation on all indices.
+func (s *FieldCapsService) Index(index ...string) *FieldCapsService {
+ s.index = append(s.index, index...)
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices expression
+// resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *FieldCapsService) AllowNoIndices(allowNoIndices bool) *FieldCapsService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *FieldCapsService) ExpandWildcards(expandWildcards string) *FieldCapsService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// Fields is a list of fields for to get field capabilities.
+func (s *FieldCapsService) Fields(fields ...string) *FieldCapsService {
+ s.fields = append(s.fields, fields...)
+ return s
+}
+
+// IgnoreUnavailable is documented as: Whether specified concrete indices should be ignored when unavailable (missing or closed).
+func (s *FieldCapsService) IgnoreUnavailable(ignoreUnavailable bool) *FieldCapsService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *FieldCapsService) Pretty(pretty bool) *FieldCapsService {
+ s.pretty = pretty
+ return s
+}
+
+// BodyJson is documented as: Field json objects containing the name and optionally a range to filter out indices result, that have results outside the defined bounds.
+func (s *FieldCapsService) BodyJson(body interface{}) *FieldCapsService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString is documented as: Field json objects containing the name and optionally a range to filter out indices result, that have results outside the defined bounds.
+func (s *FieldCapsService) BodyString(body string) *FieldCapsService {
+ s.bodyString = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *FieldCapsService) buildURL() (string, url.Values, error) {
+ // Build URL
+ var err error
+ var path string
+ if len(s.index) > 0 {
+ path, err = uritemplates.Expand("/{index}/_field_caps", map[string]string{
+ "index": strings.Join(s.index, ","),
+ })
+ } else {
+ path = "/_field_caps"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if len(s.fields) > 0 {
+ params.Set("fields", strings.Join(s.fields, ","))
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *FieldCapsService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *FieldCapsService) Do(ctx context.Context) (*FieldCapsResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else {
+ body = s.bodyString
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ IgnoreErrors: []int{http.StatusNotFound},
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO(oe): Is 404 really a valid response here?
+ if res.StatusCode == http.StatusNotFound {
+ return &FieldCapsResponse{}, nil
+ }
+
+ // Return operation response
+ ret := new(FieldCapsResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Request --
+
+// FieldCapsRequest can be used to set up the body to be used in the
+// Field Capabilities API.
+type FieldCapsRequest struct {
+ Fields []string `json:"fields"`
+}
+
+// -- Response --
+
+// FieldCapsResponse contains field capabilities.
+type FieldCapsResponse struct {
+ Fields map[string]FieldCaps `json:"fields,omitempty"`
+}
+
+// FieldCaps contains capabilities of an individual field.
+type FieldCaps struct {
+ Type string `json:"type"`
+ Searchable bool `json:"searchable"`
+ Aggregatable bool `json:"aggregatable"`
+ Indices []string `json:"indices,omitempty"`
+ NonSearchableIndices []string `json:"non_searchable_indices,omitempty"`
+ NonAggregatableIndices []string `json:"non_aggregatable_indices,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/field_caps_test.go b/vendor/github.com/olivere/elastic/field_caps_test.go
new file mode 100644
index 000000000..e299fd516
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/field_caps_test.go
@@ -0,0 +1,146 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "net/url"
+ "reflect"
+ "sort"
+ "testing"
+)
+
+func TestFieldCapsURLs(t *testing.T) {
+ tests := []struct {
+ Service *FieldCapsService
+ ExpectedPath string
+ ExpectedParams url.Values
+ }{
+ {
+ Service: &FieldCapsService{},
+ ExpectedPath: "/_field_caps",
+ ExpectedParams: url.Values{},
+ },
+ {
+ Service: &FieldCapsService{
+ index: []string{"index1", "index2"},
+ },
+ ExpectedPath: "/index1%2Cindex2/_field_caps",
+ ExpectedParams: url.Values{},
+ },
+ {
+ Service: &FieldCapsService{
+ index: []string{"index_*"},
+ pretty: true,
+ },
+ ExpectedPath: "/index_%2A/_field_caps",
+ ExpectedParams: url.Values{"pretty": []string{"true"}},
+ },
+ }
+
+ for _, test := range tests {
+ gotPath, gotParams, err := test.Service.buildURL()
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if gotPath != test.ExpectedPath {
+ t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath)
+ }
+ if gotParams.Encode() != test.ExpectedParams.Encode() {
+ t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams)
+ }
+ }
+}
+
+func TestFieldCapsRequestSerialize(t *testing.T) {
+ req := &FieldCapsRequest{
+ Fields: []string{"creation_date", "answer_count"},
+ }
+ data, err := json.Marshal(req)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"fields":["creation_date","answer_count"]}`
+ if got != expected {
+ t.Fatalf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFieldCapsRequestDeserialize(t *testing.T) {
+ body := `{
+ "fields" : ["creation_date", "answer_count"]
+ }`
+
+ var request FieldCapsRequest
+ if err := json.Unmarshal([]byte(body), &request); err != nil {
+ t.Fatalf("unexpected error during unmarshalling: %v", err)
+ }
+
+ sort.Sort(lexicographically{request.Fields})
+
+ expectedFields := []string{"answer_count", "creation_date"}
+ if !reflect.DeepEqual(request.Fields, expectedFields) {
+ t.Fatalf("expected fields to be %v, got %v", expectedFields, request.Fields)
+ }
+}
+
+func TestFieldCapsResponseUnmarshalling(t *testing.T) {
+ clusterStats := `{
+ "_shards": {
+ "total": 1,
+ "successful": 1,
+ "failed": 0
+ },
+ "fields": {
+ "creation_date": {
+ "type": "date",
+ "searchable": true,
+ "aggregatable": true,
+ "indices": ["index1", "index2"],
+ "non_searchable_indices": null,
+ "non_aggregatable_indices": null
+ },
+ "answer": {
+ "type": "keyword",
+ "searchable": true,
+ "aggregatable": true
+ }
+ }
+ }`
+
+ var resp FieldCapsResponse
+ if err := json.Unmarshal([]byte(clusterStats), &resp); err != nil {
+ t.Errorf("unexpected error during unmarshalling: %v", err)
+ }
+
+ caps, ok := resp.Fields["creation_date"]
+ if !ok {
+ t.Errorf("expected creation_date to be in the fields map, didn't find it")
+ }
+ if want, have := true, caps.Searchable; want != have {
+ t.Errorf("expected creation_date searchable to be %v, got %v", want, have)
+ }
+ if want, have := true, caps.Aggregatable; want != have {
+ t.Errorf("expected creation_date aggregatable to be %v, got %v", want, have)
+ }
+ if want, have := []string{"index1", "index2"}, caps.Indices; !reflect.DeepEqual(want, have) {
+ t.Errorf("expected creation_date indices to be %v, got %v", want, have)
+ }
+}
+
+func TestFieldCaps123(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+ // client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ res, err := client.FieldCaps("_all").Fields("user", "message", "retweets", "created").Pretty(true).Do(context.TODO())
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if res == nil {
+ t.Fatalf("expected response; got: %v", res)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/geo_point.go b/vendor/github.com/olivere/elastic/geo_point.go
new file mode 100644
index 000000000..fb243671d
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/geo_point.go
@@ -0,0 +1,48 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// GeoPoint is a geographic position described via latitude and longitude.
+type GeoPoint struct {
+ Lat float64 `json:"lat"`
+ Lon float64 `json:"lon"`
+}
+
+// Source returns the object to be serialized in Elasticsearch DSL.
+func (pt *GeoPoint) Source() map[string]float64 {
+ return map[string]float64{
+ "lat": pt.Lat,
+ "lon": pt.Lon,
+ }
+}
+
+// GeoPointFromLatLon initializes a new GeoPoint by latitude and longitude.
+func GeoPointFromLatLon(lat, lon float64) *GeoPoint {
+ return &GeoPoint{Lat: lat, Lon: lon}
+}
+
+// GeoPointFromString initializes a new GeoPoint by a string that is
+// formatted as "{latitude},{longitude}", e.g. "40.10210,-70.12091".
+func GeoPointFromString(latLon string) (*GeoPoint, error) {
+ latlon := strings.SplitN(latLon, ",", 2)
+ if len(latlon) != 2 {
+ return nil, fmt.Errorf("elastic: %s is not a valid geo point string", latLon)
+ }
+ lat, err := strconv.ParseFloat(latlon[0], 64)
+ if err != nil {
+ return nil, err
+ }
+ lon, err := strconv.ParseFloat(latlon[1], 64)
+ if err != nil {
+ return nil, err
+ }
+ return &GeoPoint{Lat: lat, Lon: lon}, nil
+}
diff --git a/vendor/github.com/olivere/elastic/geo_point_test.go b/vendor/github.com/olivere/elastic/geo_point_test.go
new file mode 100644
index 000000000..1d085cd38
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/geo_point_test.go
@@ -0,0 +1,24 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestGeoPointSource(t *testing.T) {
+ pt := GeoPoint{Lat: 40, Lon: -70}
+
+ data, err := json.Marshal(pt.Source())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"lat":40,"lon":-70}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/get.go b/vendor/github.com/olivere/elastic/get.go
new file mode 100644
index 000000000..efcc748bb
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/get.go
@@ -0,0 +1,260 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// GetService allows to get a typed JSON document from the index based
+// on its id.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-get.html
+// for details.
+type GetService struct {
+ client *Client
+ pretty bool
+ index string
+ typ string
+ id string
+ routing string
+ preference string
+ storedFields []string
+ refresh string
+ realtime *bool
+ fsc *FetchSourceContext
+ version interface{}
+ versionType string
+ parent string
+ ignoreErrorsOnGeneratedFields *bool
+}
+
+// NewGetService creates a new GetService.
+func NewGetService(client *Client) *GetService {
+ return &GetService{
+ client: client,
+ typ: "_all",
+ }
+}
+
+// Index is the name of the index.
+func (s *GetService) Index(index string) *GetService {
+ s.index = index
+ return s
+}
+
+// Type is the type of the document (use `_all` to fetch the first document
+// matching the ID across all types).
+func (s *GetService) Type(typ string) *GetService {
+ s.typ = typ
+ return s
+}
+
+// Id is the document ID.
+func (s *GetService) Id(id string) *GetService {
+ s.id = id
+ return s
+}
+
+// Parent is the ID of the parent document.
+func (s *GetService) Parent(parent string) *GetService {
+ s.parent = parent
+ return s
+}
+
+// Routing is the specific routing value.
+func (s *GetService) Routing(routing string) *GetService {
+ s.routing = routing
+ return s
+}
+
+// Preference specifies the node or shard the operation should be performed on (default: random).
+func (s *GetService) Preference(preference string) *GetService {
+ s.preference = preference
+ return s
+}
+
+// StoredFields is a list of fields to return in the response.
+func (s *GetService) StoredFields(storedFields ...string) *GetService {
+ s.storedFields = append(s.storedFields, storedFields...)
+ return s
+}
+
+func (s *GetService) FetchSource(fetchSource bool) *GetService {
+ if s.fsc == nil {
+ s.fsc = NewFetchSourceContext(fetchSource)
+ } else {
+ s.fsc.SetFetchSource(fetchSource)
+ }
+ return s
+}
+
+func (s *GetService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *GetService {
+ s.fsc = fetchSourceContext
+ return s
+}
+
+// Refresh the shard containing the document before performing the operation.
+func (s *GetService) Refresh(refresh string) *GetService {
+ s.refresh = refresh
+ return s
+}
+
+// Realtime specifies whether to perform the operation in realtime or search mode.
+func (s *GetService) Realtime(realtime bool) *GetService {
+ s.realtime = &realtime
+ return s
+}
+
+// VersionType is the specific version type.
+func (s *GetService) VersionType(versionType string) *GetService {
+ s.versionType = versionType
+ return s
+}
+
+// Version is an explicit version number for concurrency control.
+func (s *GetService) Version(version interface{}) *GetService {
+ s.version = version
+ return s
+}
+
+// IgnoreErrorsOnGeneratedFields indicates whether to ignore fields that
+// are generated if the transaction log is accessed.
+func (s *GetService) IgnoreErrorsOnGeneratedFields(ignore bool) *GetService {
+ s.ignoreErrorsOnGeneratedFields = &ignore
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *GetService) Pretty(pretty bool) *GetService {
+ s.pretty = pretty
+ return s
+}
+
+// Validate checks if the operation is valid.
+func (s *GetService) Validate() error {
+ var invalid []string
+ if s.id == "" {
+ invalid = append(invalid, "Id")
+ }
+ if s.index == "" {
+ invalid = append(invalid, "Index")
+ }
+ if s.typ == "" {
+ invalid = append(invalid, "Type")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// buildURL builds the URL for the operation.
+func (s *GetService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
+ "id": s.id,
+ "index": s.index,
+ "type": s.typ,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.routing != "" {
+ params.Set("routing", s.routing)
+ }
+ if s.parent != "" {
+ params.Set("parent", s.parent)
+ }
+ if s.preference != "" {
+ params.Set("preference", s.preference)
+ }
+ if len(s.storedFields) > 0 {
+ params.Set("stored_fields", strings.Join(s.storedFields, ","))
+ }
+ if s.refresh != "" {
+ params.Set("refresh", s.refresh)
+ }
+ if s.version != nil {
+ params.Set("version", fmt.Sprintf("%v", s.version))
+ }
+ if s.versionType != "" {
+ params.Set("version_type", s.versionType)
+ }
+ if s.realtime != nil {
+ params.Set("realtime", fmt.Sprintf("%v", *s.realtime))
+ }
+ if s.ignoreErrorsOnGeneratedFields != nil {
+ params.Add("ignore_errors_on_generated_fields", fmt.Sprintf("%v", *s.ignoreErrorsOnGeneratedFields))
+ }
+ if s.fsc != nil {
+ for k, values := range s.fsc.Query() {
+ params.Add(k, strings.Join(values, ","))
+ }
+ }
+ return path, params, nil
+}
+
+// Do executes the operation.
+func (s *GetService) Do(ctx context.Context) (*GetResult, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(GetResult)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Result of a get request.
+
+// GetResult is the outcome of GetService.Do.
+type GetResult struct {
+ Index string `json:"_index"` // index meta field
+ Type string `json:"_type"` // type meta field
+ Id string `json:"_id"` // id meta field
+ Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields)
+ Routing string `json:"_routing"` // routing meta field
+ Parent string `json:"_parent"` // parent meta field
+ Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService
+ Source *json.RawMessage `json:"_source,omitempty"`
+ Found bool `json:"found,omitempty"`
+ Fields map[string]interface{} `json:"fields,omitempty"`
+ //Error string `json:"error,omitempty"` // used only in MultiGet
+ // TODO double-check that MultiGet now returns details error information
+ Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet
+}
diff --git a/vendor/github.com/olivere/elastic/get_test.go b/vendor/github.com/olivere/elastic/get_test.go
new file mode 100644
index 000000000..f9504bdbf
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/get_test.go
@@ -0,0 +1,166 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+)
+
+func TestGet(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Get document 1
+ res, err := client.Get().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.Found != true {
+ t.Errorf("expected Found = true; got %v", res.Found)
+ }
+ if res.Source == nil {
+ t.Errorf("expected Source != nil; got %v", res.Source)
+ }
+
+ // Get non existent document 99
+ res, err = client.Get().Index(testIndexName).Type("doc").Id("99").Do(context.TODO())
+ if err == nil {
+ t.Fatalf("expected error; got: %v", err)
+ }
+ if !IsNotFound(err) {
+ t.Errorf("expected NotFound error; got: %v", err)
+ }
+ if res != nil {
+ t.Errorf("expected no response; got: %v", res)
+ }
+}
+
+func TestGetWithSourceFiltering(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Get document 1, without source
+ res, err := client.Get().Index(testIndexName).Type("doc").Id("1").FetchSource(false).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.Found != true {
+ t.Errorf("expected Found = true; got %v", res.Found)
+ }
+ if res.Source != nil {
+ t.Errorf("expected Source == nil; got %v", res.Source)
+ }
+
+ // Get document 1, exclude Message field
+ fsc := NewFetchSourceContext(true).Exclude("message")
+ res, err = client.Get().Index(testIndexName).Type("doc").Id("1").FetchSourceContext(fsc).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.Found != true {
+ t.Errorf("expected Found = true; got %v", res.Found)
+ }
+ if res.Source == nil {
+ t.Errorf("expected Source != nil; got %v", res.Source)
+ }
+ var tw tweet
+ err = json.Unmarshal(*res.Source, &tw)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tw.User != "olivere" {
+ t.Errorf("expected user %q; got: %q", "olivere", tw.User)
+ }
+ if tw.Message != "" {
+ t.Errorf("expected message %q; got: %q", "", tw.Message)
+ }
+}
+
+func TestGetWithFields(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Get document 1, specifying fields
+ res, err := client.Get().Index(testIndexName).Type("doc").Id("1").StoredFields("message").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.Found != true {
+ t.Errorf("expected Found = true; got: %v", res.Found)
+ }
+
+ // We must NOT have the "user" field
+ _, ok := res.Fields["user"]
+ if ok {
+ t.Fatalf("expected no field %q in document", "user")
+ }
+
+ // We must have the "message" field
+ messageField, ok := res.Fields["message"]
+ if !ok {
+ t.Fatalf("expected field %q in document", "message")
+ }
+
+ // Depending on the version of elasticsearch the message field will be returned
+ // as a string or a slice of strings. This test works in both cases.
+
+ messageString, ok := messageField.(string)
+ if !ok {
+ messageArray, ok := messageField.([]interface{})
+ if !ok {
+ t.Fatalf("expected field %q to be a string or a slice of strings; got: %T", "message", messageField)
+ } else {
+ messageString, ok = messageArray[0].(string)
+ if !ok {
+ t.Fatalf("expected field %q to be a string or a slice of strings; got: %T", "message", messageField)
+ }
+ }
+ }
+
+ if messageString != tweet1.Message {
+ t.Errorf("expected message %q; got: %q", tweet1.Message, messageString)
+ }
+}
+
+func TestGetValidate(t *testing.T) {
+ // Mitigate against http://stackoverflow.com/questions/27491738/elasticsearch-go-index-failures-no-feature-for-name
+ client := setupTestClientAndCreateIndex(t)
+
+ if _, err := client.Get().Do(context.TODO()); err == nil {
+ t.Fatal("expected Get to fail")
+ }
+ if _, err := client.Get().Index(testIndexName).Do(context.TODO()); err == nil {
+ t.Fatal("expected Get to fail")
+ }
+ if _, err := client.Get().Type("doc").Do(context.TODO()); err == nil {
+ t.Fatal("expected Get to fail")
+ }
+ if _, err := client.Get().Id("1").Do(context.TODO()); err == nil {
+ t.Fatal("expected Get to fail")
+ }
+ if _, err := client.Get().Index(testIndexName).Type("doc").Do(context.TODO()); err == nil {
+ t.Fatal("expected Get to fail")
+ }
+ if _, err := client.Get().Type("doc").Id("1").Do(context.TODO()); err == nil {
+ t.Fatal("expected Get to fail")
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/highlight.go b/vendor/github.com/olivere/elastic/highlight.go
new file mode 100644
index 000000000..6d8d2ba63
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/highlight.go
@@ -0,0 +1,469 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Highlight allows highlighting search results on one or more fields.
+// For details, see:
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-highlighting.html
+type Highlight struct {
+ fields []*HighlighterField
+ tagsSchema *string
+ highlightFilter *bool
+ fragmentSize *int
+ numOfFragments *int
+ preTags []string
+ postTags []string
+ order *string
+ encoder *string
+ requireFieldMatch *bool
+ boundaryMaxScan *int
+ boundaryChars *string
+ boundaryScannerType *string
+ boundaryScannerLocale *string
+ highlighterType *string
+ fragmenter *string
+ highlightQuery Query
+ noMatchSize *int
+ phraseLimit *int
+ options map[string]interface{}
+ forceSource *bool
+ useExplicitFieldOrder bool
+}
+
+func NewHighlight() *Highlight {
+ hl := &Highlight{
+ options: make(map[string]interface{}),
+ }
+ return hl
+}
+
+func (hl *Highlight) Fields(fields ...*HighlighterField) *Highlight {
+ hl.fields = append(hl.fields, fields...)
+ return hl
+}
+
+func (hl *Highlight) Field(name string) *Highlight {
+ field := NewHighlighterField(name)
+ hl.fields = append(hl.fields, field)
+ return hl
+}
+
+func (hl *Highlight) TagsSchema(schemaName string) *Highlight {
+ hl.tagsSchema = &schemaName
+ return hl
+}
+
+func (hl *Highlight) HighlightFilter(highlightFilter bool) *Highlight {
+ hl.highlightFilter = &highlightFilter
+ return hl
+}
+
+func (hl *Highlight) FragmentSize(fragmentSize int) *Highlight {
+ hl.fragmentSize = &fragmentSize
+ return hl
+}
+
+func (hl *Highlight) NumOfFragments(numOfFragments int) *Highlight {
+ hl.numOfFragments = &numOfFragments
+ return hl
+}
+
+func (hl *Highlight) Encoder(encoder string) *Highlight {
+ hl.encoder = &encoder
+ return hl
+}
+
+func (hl *Highlight) PreTags(preTags ...string) *Highlight {
+ hl.preTags = append(hl.preTags, preTags...)
+ return hl
+}
+
+func (hl *Highlight) PostTags(postTags ...string) *Highlight {
+ hl.postTags = append(hl.postTags, postTags...)
+ return hl
+}
+
+func (hl *Highlight) Order(order string) *Highlight {
+ hl.order = &order
+ return hl
+}
+
+func (hl *Highlight) RequireFieldMatch(requireFieldMatch bool) *Highlight {
+ hl.requireFieldMatch = &requireFieldMatch
+ return hl
+}
+
+func (hl *Highlight) BoundaryMaxScan(boundaryMaxScan int) *Highlight {
+ hl.boundaryMaxScan = &boundaryMaxScan
+ return hl
+}
+
+func (hl *Highlight) BoundaryChars(boundaryChars string) *Highlight {
+ hl.boundaryChars = &boundaryChars
+ return hl
+}
+
+func (hl *Highlight) BoundaryScannerType(boundaryScannerType string) *Highlight {
+ hl.boundaryScannerType = &boundaryScannerType
+ return hl
+}
+
+func (hl *Highlight) BoundaryScannerLocale(boundaryScannerLocale string) *Highlight {
+ hl.boundaryScannerLocale = &boundaryScannerLocale
+ return hl
+}
+
+func (hl *Highlight) HighlighterType(highlighterType string) *Highlight {
+ hl.highlighterType = &highlighterType
+ return hl
+}
+
+func (hl *Highlight) Fragmenter(fragmenter string) *Highlight {
+ hl.fragmenter = &fragmenter
+ return hl
+}
+
+func (hl *Highlight) HighlighQuery(highlightQuery Query) *Highlight {
+ hl.highlightQuery = highlightQuery
+ return hl
+}
+
+func (hl *Highlight) NoMatchSize(noMatchSize int) *Highlight {
+ hl.noMatchSize = &noMatchSize
+ return hl
+}
+
+func (hl *Highlight) Options(options map[string]interface{}) *Highlight {
+ hl.options = options
+ return hl
+}
+
+func (hl *Highlight) ForceSource(forceSource bool) *Highlight {
+ hl.forceSource = &forceSource
+ return hl
+}
+
+func (hl *Highlight) UseExplicitFieldOrder(useExplicitFieldOrder bool) *Highlight {
+ hl.useExplicitFieldOrder = useExplicitFieldOrder
+ return hl
+}
+
+// Creates the query source for the bool query.
+func (hl *Highlight) Source() (interface{}, error) {
+ // Returns the map inside of "highlight":
+ // "highlight":{
+ // ... this ...
+ // }
+ source := make(map[string]interface{})
+ if hl.tagsSchema != nil {
+ source["tags_schema"] = *hl.tagsSchema
+ }
+ if hl.preTags != nil && len(hl.preTags) > 0 {
+ source["pre_tags"] = hl.preTags
+ }
+ if hl.postTags != nil && len(hl.postTags) > 0 {
+ source["post_tags"] = hl.postTags
+ }
+ if hl.order != nil {
+ source["order"] = *hl.order
+ }
+ if hl.highlightFilter != nil {
+ source["highlight_filter"] = *hl.highlightFilter
+ }
+ if hl.fragmentSize != nil {
+ source["fragment_size"] = *hl.fragmentSize
+ }
+ if hl.numOfFragments != nil {
+ source["number_of_fragments"] = *hl.numOfFragments
+ }
+ if hl.encoder != nil {
+ source["encoder"] = *hl.encoder
+ }
+ if hl.requireFieldMatch != nil {
+ source["require_field_match"] = *hl.requireFieldMatch
+ }
+ if hl.boundaryMaxScan != nil {
+ source["boundary_max_scan"] = *hl.boundaryMaxScan
+ }
+ if hl.boundaryChars != nil {
+ source["boundary_chars"] = *hl.boundaryChars
+ }
+ if hl.boundaryScannerType != nil {
+ source["boundary_scanner"] = *hl.boundaryScannerType
+ }
+ if hl.boundaryScannerLocale != nil {
+ source["boundary_scanner_locale"] = *hl.boundaryScannerLocale
+ }
+ if hl.highlighterType != nil {
+ source["type"] = *hl.highlighterType
+ }
+ if hl.fragmenter != nil {
+ source["fragmenter"] = *hl.fragmenter
+ }
+ if hl.highlightQuery != nil {
+ src, err := hl.highlightQuery.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["highlight_query"] = src
+ }
+ if hl.noMatchSize != nil {
+ source["no_match_size"] = *hl.noMatchSize
+ }
+ if hl.phraseLimit != nil {
+ source["phrase_limit"] = *hl.phraseLimit
+ }
+ if hl.options != nil && len(hl.options) > 0 {
+ source["options"] = hl.options
+ }
+ if hl.forceSource != nil {
+ source["force_source"] = *hl.forceSource
+ }
+
+ if hl.fields != nil && len(hl.fields) > 0 {
+ if hl.useExplicitFieldOrder {
+ // Use a slice for the fields
+ var fields []map[string]interface{}
+ for _, field := range hl.fields {
+ src, err := field.Source()
+ if err != nil {
+ return nil, err
+ }
+ fmap := make(map[string]interface{})
+ fmap[field.Name] = src
+ fields = append(fields, fmap)
+ }
+ source["fields"] = fields
+ } else {
+ // Use a map for the fields
+ fields := make(map[string]interface{}, 0)
+ for _, field := range hl.fields {
+ src, err := field.Source()
+ if err != nil {
+ return nil, err
+ }
+ fields[field.Name] = src
+ }
+ source["fields"] = fields
+ }
+ }
+
+ return source, nil
+}
+
+// HighlighterField specifies a highlighted field.
+type HighlighterField struct {
+ Name string
+
+ preTags []string
+ postTags []string
+ fragmentSize int
+ fragmentOffset int
+ numOfFragments int
+ highlightFilter *bool
+ order *string
+ requireFieldMatch *bool
+ boundaryMaxScan int
+ boundaryChars []rune
+ highlighterType *string
+ fragmenter *string
+ highlightQuery Query
+ noMatchSize *int
+ matchedFields []string
+ phraseLimit *int
+ options map[string]interface{}
+ forceSource *bool
+
+ /*
+ Name string
+ preTags []string
+ postTags []string
+ fragmentSize int
+ numOfFragments int
+ fragmentOffset int
+ highlightFilter *bool
+ order string
+ requireFieldMatch *bool
+ boundaryMaxScan int
+ boundaryChars []rune
+ highlighterType string
+ fragmenter string
+ highlightQuery Query
+ noMatchSize *int
+ matchedFields []string
+ options map[string]interface{}
+ forceSource *bool
+ */
+}
+
+func NewHighlighterField(name string) *HighlighterField {
+ return &HighlighterField{
+ Name: name,
+ preTags: make([]string, 0),
+ postTags: make([]string, 0),
+ fragmentSize: -1,
+ fragmentOffset: -1,
+ numOfFragments: -1,
+ boundaryMaxScan: -1,
+ boundaryChars: make([]rune, 0),
+ matchedFields: make([]string, 0),
+ options: make(map[string]interface{}),
+ }
+}
+
+func (f *HighlighterField) PreTags(preTags ...string) *HighlighterField {
+ f.preTags = append(f.preTags, preTags...)
+ return f
+}
+
+func (f *HighlighterField) PostTags(postTags ...string) *HighlighterField {
+ f.postTags = append(f.postTags, postTags...)
+ return f
+}
+
+func (f *HighlighterField) FragmentSize(fragmentSize int) *HighlighterField {
+ f.fragmentSize = fragmentSize
+ return f
+}
+
+func (f *HighlighterField) FragmentOffset(fragmentOffset int) *HighlighterField {
+ f.fragmentOffset = fragmentOffset
+ return f
+}
+
+func (f *HighlighterField) NumOfFragments(numOfFragments int) *HighlighterField {
+ f.numOfFragments = numOfFragments
+ return f
+}
+
+func (f *HighlighterField) HighlightFilter(highlightFilter bool) *HighlighterField {
+ f.highlightFilter = &highlightFilter
+ return f
+}
+
+func (f *HighlighterField) Order(order string) *HighlighterField {
+ f.order = &order
+ return f
+}
+
+func (f *HighlighterField) RequireFieldMatch(requireFieldMatch bool) *HighlighterField {
+ f.requireFieldMatch = &requireFieldMatch
+ return f
+}
+
+func (f *HighlighterField) BoundaryMaxScan(boundaryMaxScan int) *HighlighterField {
+ f.boundaryMaxScan = boundaryMaxScan
+ return f
+}
+
+func (f *HighlighterField) BoundaryChars(boundaryChars ...rune) *HighlighterField {
+ f.boundaryChars = append(f.boundaryChars, boundaryChars...)
+ return f
+}
+
+func (f *HighlighterField) HighlighterType(highlighterType string) *HighlighterField {
+ f.highlighterType = &highlighterType
+ return f
+}
+
+func (f *HighlighterField) Fragmenter(fragmenter string) *HighlighterField {
+ f.fragmenter = &fragmenter
+ return f
+}
+
+func (f *HighlighterField) HighlightQuery(highlightQuery Query) *HighlighterField {
+ f.highlightQuery = highlightQuery
+ return f
+}
+
+func (f *HighlighterField) NoMatchSize(noMatchSize int) *HighlighterField {
+ f.noMatchSize = &noMatchSize
+ return f
+}
+
+func (f *HighlighterField) Options(options map[string]interface{}) *HighlighterField {
+ f.options = options
+ return f
+}
+
+func (f *HighlighterField) MatchedFields(matchedFields ...string) *HighlighterField {
+ f.matchedFields = append(f.matchedFields, matchedFields...)
+ return f
+}
+
+func (f *HighlighterField) PhraseLimit(phraseLimit int) *HighlighterField {
+ f.phraseLimit = &phraseLimit
+ return f
+}
+
+func (f *HighlighterField) ForceSource(forceSource bool) *HighlighterField {
+ f.forceSource = &forceSource
+ return f
+}
+
+func (f *HighlighterField) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+
+ if f.preTags != nil && len(f.preTags) > 0 {
+ source["pre_tags"] = f.preTags
+ }
+ if f.postTags != nil && len(f.postTags) > 0 {
+ source["post_tags"] = f.postTags
+ }
+ if f.fragmentSize != -1 {
+ source["fragment_size"] = f.fragmentSize
+ }
+ if f.numOfFragments != -1 {
+ source["number_of_fragments"] = f.numOfFragments
+ }
+ if f.fragmentOffset != -1 {
+ source["fragment_offset"] = f.fragmentOffset
+ }
+ if f.highlightFilter != nil {
+ source["highlight_filter"] = *f.highlightFilter
+ }
+ if f.order != nil {
+ source["order"] = *f.order
+ }
+ if f.requireFieldMatch != nil {
+ source["require_field_match"] = *f.requireFieldMatch
+ }
+ if f.boundaryMaxScan != -1 {
+ source["boundary_max_scan"] = f.boundaryMaxScan
+ }
+ if f.boundaryChars != nil && len(f.boundaryChars) > 0 {
+ source["boundary_chars"] = f.boundaryChars
+ }
+ if f.highlighterType != nil {
+ source["type"] = *f.highlighterType
+ }
+ if f.fragmenter != nil {
+ source["fragmenter"] = *f.fragmenter
+ }
+ if f.highlightQuery != nil {
+ src, err := f.highlightQuery.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["highlight_query"] = src
+ }
+ if f.noMatchSize != nil {
+ source["no_match_size"] = *f.noMatchSize
+ }
+ if f.matchedFields != nil && len(f.matchedFields) > 0 {
+ source["matched_fields"] = f.matchedFields
+ }
+ if f.phraseLimit != nil {
+ source["phrase_limit"] = *f.phraseLimit
+ }
+ if f.options != nil && len(f.options) > 0 {
+ source["options"] = f.options
+ }
+ if f.forceSource != nil {
+ source["force_source"] = *f.forceSource
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/highlight_test.go b/vendor/github.com/olivere/elastic/highlight_test.go
new file mode 100644
index 000000000..c7b972c44
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/highlight_test.go
@@ -0,0 +1,211 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+)
+
+func TestHighlighterField(t *testing.T) {
+ field := NewHighlighterField("grade")
+ src, err := field.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHighlighterFieldWithOptions(t *testing.T) {
+ field := NewHighlighterField("grade").FragmentSize(2).NumOfFragments(1)
+ src, err := field.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"fragment_size":2,"number_of_fragments":1}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHighlightWithStringField(t *testing.T) {
+ builder := NewHighlight().Field("grade")
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"fields":{"grade":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHighlightWithFields(t *testing.T) {
+ gradeField := NewHighlighterField("grade")
+ builder := NewHighlight().Fields(gradeField)
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"fields":{"grade":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHighlightWithMultipleFields(t *testing.T) {
+ gradeField := NewHighlighterField("grade")
+ colorField := NewHighlighterField("color")
+ builder := NewHighlight().Fields(gradeField, colorField)
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"fields":{"color":{},"grade":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHighlighterWithExplicitFieldOrder(t *testing.T) {
+ gradeField := NewHighlighterField("grade").FragmentSize(2)
+ colorField := NewHighlighterField("color").FragmentSize(2).NumOfFragments(1)
+ builder := NewHighlight().Fields(gradeField, colorField).UseExplicitFieldOrder(true)
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"fields":[{"grade":{"fragment_size":2}},{"color":{"fragment_size":2,"number_of_fragments":1}}]}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHighlightWithBoundarySettings(t *testing.T) {
+ builder := NewHighlight().
+ BoundaryChars(" \t\r").
+ BoundaryScannerType("word")
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"boundary_chars":" \t\r","boundary_scanner":"word"}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHighlightWithTermQuery(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun to do."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Specify highlighter
+ hl := NewHighlight()
+ hl = hl.Fields(NewHighlighterField("message"))
+ hl = hl.PreTags("<em>").PostTags("</em>")
+
+ // Match all should return all documents
+ query := NewPrefixQuery("message", "golang")
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Highlight(hl).
+ Query(query).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Fatalf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 1 {
+ t.Fatalf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 1 {
+ t.Fatalf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits))
+ }
+
+ hit := searchResult.Hits.Hits[0]
+ var tw tweet
+ if err := json.Unmarshal(*hit.Source, &tw); err != nil {
+ t.Fatal(err)
+ }
+ if hit.Highlight == nil || len(hit.Highlight) == 0 {
+ t.Fatal("expected hit to have a highlight; got nil")
+ }
+ if hl, found := hit.Highlight["message"]; found {
+ if len(hl) != 1 {
+ t.Fatalf("expected to have one highlight for field \"message\"; got %d", len(hl))
+ }
+ expected := "Welcome to <em>Golang</em> and Elasticsearch."
+ if hl[0] != expected {
+ t.Errorf("expected to have highlight \"%s\"; got \"%s\"", expected, hl[0])
+ }
+ } else {
+ t.Fatal("expected to have a highlight on field \"message\"; got none")
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/index.go b/vendor/github.com/olivere/elastic/index.go
new file mode 100644
index 000000000..4a4c3278e
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/index.go
@@ -0,0 +1,297 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndexService adds or updates a typed JSON document in a specified index,
+// making it searchable.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-index_.html
+// for details.
+type IndexService struct {
+ client *Client
+ pretty bool
+ id string
+ index string
+ typ string
+ parent string
+ routing string
+ timeout string
+ timestamp string
+ ttl string
+ version interface{}
+ opType string
+ versionType string
+ refresh string
+ waitForActiveShards string
+ pipeline string
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewIndexService creates a new IndexService.
+func NewIndexService(client *Client) *IndexService {
+ return &IndexService{
+ client: client,
+ }
+}
+
+// Id is the document ID.
+func (s *IndexService) Id(id string) *IndexService {
+ s.id = id
+ return s
+}
+
+// Index is the name of the index.
+func (s *IndexService) Index(index string) *IndexService {
+ s.index = index
+ return s
+}
+
+// Type is the type of the document.
+func (s *IndexService) Type(typ string) *IndexService {
+ s.typ = typ
+ return s
+}
+
+// WaitForActiveShards sets the number of shard copies that must be active
+// before proceeding with the index operation. Defaults to 1, meaning the
+// primary shard only. Set to `all` for all shard copies, otherwise set to
+// any non-negative value less than or equal to the total number of copies
+// for the shard (number of replicas + 1).
+func (s *IndexService) WaitForActiveShards(waitForActiveShards string) *IndexService {
+ s.waitForActiveShards = waitForActiveShards
+ return s
+}
+
+// Pipeline specifies the pipeline id to preprocess incoming documents with.
+func (s *IndexService) Pipeline(pipeline string) *IndexService {
+ s.pipeline = pipeline
+ return s
+}
+
+// Refresh the index after performing the operation.
+func (s *IndexService) Refresh(refresh string) *IndexService {
+ s.refresh = refresh
+ return s
+}
+
+// Ttl is an expiration time for the document.
+func (s *IndexService) Ttl(ttl string) *IndexService {
+ s.ttl = ttl
+ return s
+}
+
+// TTL is an expiration time for the document (alias for Ttl).
+func (s *IndexService) TTL(ttl string) *IndexService {
+ s.ttl = ttl
+ return s
+}
+
+// Version is an explicit version number for concurrency control.
+func (s *IndexService) Version(version interface{}) *IndexService {
+ s.version = version
+ return s
+}
+
+// OpType is an explicit operation type, i.e. "create" or "index" (default).
+func (s *IndexService) OpType(opType string) *IndexService {
+ s.opType = opType
+ return s
+}
+
+// Parent is the ID of the parent document.
+func (s *IndexService) Parent(parent string) *IndexService {
+ s.parent = parent
+ return s
+}
+
+// Routing is a specific routing value.
+func (s *IndexService) Routing(routing string) *IndexService {
+ s.routing = routing
+ return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *IndexService) Timeout(timeout string) *IndexService {
+ s.timeout = timeout
+ return s
+}
+
+// Timestamp is an explicit timestamp for the document.
+func (s *IndexService) Timestamp(timestamp string) *IndexService {
+ s.timestamp = timestamp
+ return s
+}
+
+// VersionType is a specific version type.
+func (s *IndexService) VersionType(versionType string) *IndexService {
+ s.versionType = versionType
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndexService) Pretty(pretty bool) *IndexService {
+ s.pretty = pretty
+ return s
+}
+
+// BodyJson is the document as a serializable JSON interface.
+func (s *IndexService) BodyJson(body interface{}) *IndexService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString is the document encoded as a string.
+func (s *IndexService) BodyString(body string) *IndexService {
+ s.bodyString = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndexService) buildURL() (string, string, url.Values, error) {
+ var err error
+ var method, path string
+
+ if s.id != "" {
+ // Create document with manual id
+ method = "PUT"
+ path, err = uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
+ "id": s.id,
+ "index": s.index,
+ "type": s.typ,
+ })
+ } else {
+ // Automatic ID generation
+ // See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-index_.html#index-creation
+ method = "POST"
+ path, err = uritemplates.Expand("/{index}/{type}/", map[string]string{
+ "index": s.index,
+ "type": s.typ,
+ })
+ }
+ if err != nil {
+ return "", "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.waitForActiveShards != "" {
+ params.Set("wait_for_active_shards", s.waitForActiveShards)
+ }
+ if s.refresh != "" {
+ params.Set("refresh", s.refresh)
+ }
+ if s.opType != "" {
+ params.Set("op_type", s.opType)
+ }
+ if s.parent != "" {
+ params.Set("parent", s.parent)
+ }
+ if s.pipeline != "" {
+ params.Set("pipeline", s.pipeline)
+ }
+ if s.routing != "" {
+ params.Set("routing", s.routing)
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.timestamp != "" {
+ params.Set("timestamp", s.timestamp)
+ }
+ if s.ttl != "" {
+ params.Set("ttl", s.ttl)
+ }
+ if s.version != nil {
+ params.Set("version", fmt.Sprintf("%v", s.version))
+ }
+ if s.versionType != "" {
+ params.Set("version_type", s.versionType)
+ }
+ return method, path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndexService) Validate() error {
+ var invalid []string
+ if s.index == "" {
+ invalid = append(invalid, "Index")
+ }
+ if s.typ == "" {
+ invalid = append(invalid, "Type")
+ }
+ if s.bodyString == "" && s.bodyJson == nil {
+ invalid = append(invalid, "BodyJson")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndexService) Do(ctx context.Context) (*IndexResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ method, path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else {
+ body = s.bodyString
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: method,
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IndexResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndexResponse is the result of indexing a document in Elasticsearch.
+type IndexResponse struct {
+ Index string `json:"_index,omitempty"`
+ Type string `json:"_type,omitempty"`
+ Id string `json:"_id,omitempty"`
+ Version int64 `json:"_version,omitempty"`
+ Result string `json:"result,omitempty"`
+ Shards *shardsInfo `json:"_shards,omitempty"`
+ SeqNo int64 `json:"_seq_no,omitempty"`
+ PrimaryTerm int64 `json:"_primary_term,omitempty"`
+ Status int `json:"status,omitempty"`
+ ForcedRefresh bool `json:"forced_refresh,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/index_test.go b/vendor/github.com/olivere/elastic/index_test.go
new file mode 100644
index 000000000..1a0c38576
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/index_test.go
@@ -0,0 +1,280 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+)
+
+func TestIndexLifecycle(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+
+ // Add a document
+ indexResult, err := client.Index().
+ Index(testIndexName).
+ Type("doc").
+ Id("1").
+ BodyJson(&tweet1).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if indexResult == nil {
+ t.Errorf("expected result to be != nil; got: %v", indexResult)
+ }
+
+ // Exists
+ exists, err := client.Exists().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !exists {
+ t.Errorf("expected exists %v; got %v", true, exists)
+ }
+
+ // Get document
+ getResult, err := client.Get().
+ Index(testIndexName).
+ Type("doc").
+ Id("1").
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if getResult.Index != testIndexName {
+ t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index)
+ }
+ if getResult.Type != "doc" {
+ t.Errorf("expected GetResult.Type %q; got %q", "doc", getResult.Type)
+ }
+ if getResult.Id != "1" {
+ t.Errorf("expected GetResult.Id %q; got %q", "1", getResult.Id)
+ }
+ if getResult.Source == nil {
+ t.Errorf("expected GetResult.Source to be != nil; got nil")
+ }
+
+ // Decode the Source field
+ var tweetGot tweet
+ err = json.Unmarshal(*getResult.Source, &tweetGot)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tweetGot.User != tweet1.User {
+ t.Errorf("expected Tweet.User to be %q; got %q", tweet1.User, tweetGot.User)
+ }
+ if tweetGot.Message != tweet1.Message {
+ t.Errorf("expected Tweet.Message to be %q; got %q", tweet1.Message, tweetGot.Message)
+ }
+
+ // Delete document again
+ deleteResult, err := client.Delete().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if deleteResult == nil {
+ t.Errorf("expected result to be != nil; got: %v", deleteResult)
+ }
+
+ // Exists
+ exists, err = client.Exists().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if exists {
+ t.Errorf("expected exists %v; got %v", false, exists)
+ }
+}
+
+func TestIndexLifecycleWithAutomaticIDGeneration(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+
+ // Add a document
+ indexResult, err := client.Index().
+ Index(testIndexName).
+ Type("doc").
+ BodyJson(&tweet1).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if indexResult == nil {
+ t.Errorf("expected result to be != nil; got: %v", indexResult)
+ }
+ if indexResult.Id == "" {
+ t.Fatalf("expected Es to generate an automatic ID, got: %v", indexResult.Id)
+ }
+ id := indexResult.Id
+
+ // Exists
+ exists, err := client.Exists().Index(testIndexName).Type("doc").Id(id).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !exists {
+ t.Errorf("expected exists %v; got %v", true, exists)
+ }
+
+ // Get document
+ getResult, err := client.Get().
+ Index(testIndexName).
+ Type("doc").
+ Id(id).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if getResult.Index != testIndexName {
+ t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index)
+ }
+ if getResult.Type != "doc" {
+ t.Errorf("expected GetResult.Type %q; got %q", "doc", getResult.Type)
+ }
+ if getResult.Id != id {
+ t.Errorf("expected GetResult.Id %q; got %q", id, getResult.Id)
+ }
+ if getResult.Source == nil {
+ t.Errorf("expected GetResult.Source to be != nil; got nil")
+ }
+
+ // Decode the Source field
+ var tweetGot tweet
+ err = json.Unmarshal(*getResult.Source, &tweetGot)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tweetGot.User != tweet1.User {
+ t.Errorf("expected Tweet.User to be %q; got %q", tweet1.User, tweetGot.User)
+ }
+ if tweetGot.Message != tweet1.Message {
+ t.Errorf("expected Tweet.Message to be %q; got %q", tweet1.Message, tweetGot.Message)
+ }
+
+ // Delete document again
+ deleteResult, err := client.Delete().Index(testIndexName).Type("doc").Id(id).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if deleteResult == nil {
+ t.Errorf("expected result to be != nil; got: %v", deleteResult)
+ }
+
+ // Exists
+ exists, err = client.Exists().Index(testIndexName).Type("doc").Id(id).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if exists {
+ t.Errorf("expected exists %v; got %v", false, exists)
+ }
+}
+
+func TestIndexValidate(t *testing.T) {
+ client := setupTestClient(t)
+
+ tweet := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+
+ // No index name -> fail with error
+ res, err := NewIndexService(client).Type("doc").Id("1").BodyJson(&tweet).Do(context.TODO())
+ if err == nil {
+ t.Fatalf("expected Index to fail without index name")
+ }
+ if res != nil {
+ t.Fatalf("expected result to be == nil; got: %v", res)
+ }
+
+ // No index name -> fail with error
+ res, err = NewIndexService(client).Index(testIndexName).Id("1").BodyJson(&tweet).Do(context.TODO())
+ if err == nil {
+ t.Fatalf("expected Index to fail without type")
+ }
+ if res != nil {
+ t.Fatalf("expected result to be == nil; got: %v", res)
+ }
+}
+
+func TestIndexCreateExistsOpenCloseDelete(t *testing.T) {
+ // TODO: Find out how to make these test robust
+ t.Skip("test fails regularly with 409 (Conflict): " +
+ "IndexPrimaryShardNotAllocatedException[[elastic-test] " +
+ "primary not allocated post api... skipping")
+
+ client := setupTestClient(t)
+
+ // Create index
+ createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if createIndex == nil {
+ t.Fatalf("expected response; got: %v", createIndex)
+ }
+ if !createIndex.Acknowledged {
+ t.Errorf("expected ack for creating index; got: %v", createIndex.Acknowledged)
+ }
+
+ // Exists
+ indexExists, err := client.IndexExists(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !indexExists {
+ t.Fatalf("expected index exists=%v; got %v", true, indexExists)
+ }
+
+ // Flush
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Close index
+ closeIndex, err := client.CloseIndex(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if closeIndex == nil {
+ t.Fatalf("expected response; got: %v", closeIndex)
+ }
+ if !closeIndex.Acknowledged {
+ t.Errorf("expected ack for closing index; got: %v", closeIndex.Acknowledged)
+ }
+
+ // Open index
+ openIndex, err := client.OpenIndex(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if openIndex == nil {
+ t.Fatalf("expected response; got: %v", openIndex)
+ }
+ if !openIndex.Acknowledged {
+ t.Errorf("expected ack for opening index; got: %v", openIndex.Acknowledged)
+ }
+
+ // Flush
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Delete index
+ deleteIndex, err := client.DeleteIndex(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if deleteIndex == nil {
+ t.Fatalf("expected response; got: %v", deleteIndex)
+ }
+ if !deleteIndex.Acknowledged {
+ t.Errorf("expected ack for deleting index; got %v", deleteIndex.Acknowledged)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_analyze.go b/vendor/github.com/olivere/elastic/indices_analyze.go
new file mode 100644
index 000000000..fb3a91234
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_analyze.go
@@ -0,0 +1,284 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesAnalyzeService performs the analysis process on a text and returns
+// the tokens breakdown of the text.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-analyze.html
+// for detail.
+type IndicesAnalyzeService struct {
+ client *Client
+ pretty bool
+ index string
+ request *IndicesAnalyzeRequest
+ format string
+ preferLocal *bool
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewIndicesAnalyzeService creates a new IndicesAnalyzeService.
+func NewIndicesAnalyzeService(client *Client) *IndicesAnalyzeService {
+ return &IndicesAnalyzeService{
+ client: client,
+ request: new(IndicesAnalyzeRequest),
+ }
+}
+
+// Index is the name of the index to scope the operation.
+func (s *IndicesAnalyzeService) Index(index string) *IndicesAnalyzeService {
+ s.index = index
+ return s
+}
+
+// Format of the output.
+func (s *IndicesAnalyzeService) Format(format string) *IndicesAnalyzeService {
+ s.format = format
+ return s
+}
+
+// PreferLocal, when true, specifies that a local shard should be used
+// if available. When false, a random shard is used (default: true).
+func (s *IndicesAnalyzeService) PreferLocal(preferLocal bool) *IndicesAnalyzeService {
+ s.preferLocal = &preferLocal
+ return s
+}
+
+// Request passes the analyze request to use.
+func (s *IndicesAnalyzeService) Request(request *IndicesAnalyzeRequest) *IndicesAnalyzeService {
+ if request == nil {
+ s.request = new(IndicesAnalyzeRequest)
+ } else {
+ s.request = request
+ }
+ return s
+}
+
+// Analyzer is the name of the analyzer to use.
+func (s *IndicesAnalyzeService) Analyzer(analyzer string) *IndicesAnalyzeService {
+ s.request.Analyzer = analyzer
+ return s
+}
+
+// Attributes is a list of token attributes to output; this parameter works
+// only with explain=true.
+func (s *IndicesAnalyzeService) Attributes(attributes ...string) *IndicesAnalyzeService {
+ s.request.Attributes = attributes
+ return s
+}
+
+// CharFilter is a list of character filters to use for the analysis.
+func (s *IndicesAnalyzeService) CharFilter(charFilter ...string) *IndicesAnalyzeService {
+ s.request.CharFilter = charFilter
+ return s
+}
+
+// Explain, when true, outputs more advanced details (default: false).
+func (s *IndicesAnalyzeService) Explain(explain bool) *IndicesAnalyzeService {
+ s.request.Explain = explain
+ return s
+}
+
+// Field specifies to use a specific analyzer configured for this field (instead of passing the analyzer name).
+func (s *IndicesAnalyzeService) Field(field string) *IndicesAnalyzeService {
+ s.request.Field = field
+ return s
+}
+
+// Filter is a list of filters to use for the analysis.
+func (s *IndicesAnalyzeService) Filter(filter ...string) *IndicesAnalyzeService {
+ s.request.Filter = filter
+ return s
+}
+
+// Text is the text on which the analysis should be performed (when request body is not used).
+func (s *IndicesAnalyzeService) Text(text ...string) *IndicesAnalyzeService {
+ s.request.Text = text
+ return s
+}
+
+// Tokenizer is the name of the tokenizer to use for the analysis.
+func (s *IndicesAnalyzeService) Tokenizer(tokenizer string) *IndicesAnalyzeService {
+ s.request.Tokenizer = tokenizer
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesAnalyzeService) Pretty(pretty bool) *IndicesAnalyzeService {
+ s.pretty = pretty
+ return s
+}
+
+// BodyJson is the text on which the analysis should be performed.
+func (s *IndicesAnalyzeService) BodyJson(body interface{}) *IndicesAnalyzeService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString is the text on which the analysis should be performed.
+func (s *IndicesAnalyzeService) BodyString(body string) *IndicesAnalyzeService {
+ s.bodyString = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesAnalyzeService) buildURL() (string, url.Values, error) {
+ // Build URL
+ var err error
+ var path string
+
+ if s.index == "" {
+ path = "/_analyze"
+ } else {
+ path, err = uritemplates.Expand("/{index}/_analyze", map[string]string{
+ "index": s.index,
+ })
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.format != "" {
+ params.Set("format", s.format)
+ }
+ if s.preferLocal != nil {
+ params.Set("prefer_local", fmt.Sprintf("%v", *s.preferLocal))
+ }
+
+ return path, params, nil
+}
+
+// Do will execute the request with the given context.
+func (s *IndicesAnalyzeService) Do(ctx context.Context) (*IndicesAnalyzeResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else if s.bodyString != "" {
+ body = s.bodyString
+ } else {
+ // Request parameters are deprecated in 5.1.1, and we must use a JSON
+ // structure in the body to pass the parameters.
+ // See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-analyze.html
+ body = s.request
+ }
+
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ ret := new(IndicesAnalyzeResponse)
+ if err = s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+
+ return ret, nil
+}
+
+func (s *IndicesAnalyzeService) Validate() error {
+ var invalid []string
+ if s.bodyJson == nil && s.bodyString == "" {
+ if len(s.request.Text) == 0 {
+ invalid = append(invalid, "Text")
+ }
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// IndicesAnalyzeRequest specifies the parameters of the analyze request.
+type IndicesAnalyzeRequest struct {
+ Text []string `json:"text,omitempty"`
+ Analyzer string `json:"analyzer,omitempty"`
+ Tokenizer string `json:"tokenizer,omitempty"`
+ Filter []string `json:"filter,omitempty"`
+ CharFilter []string `json:"char_filter,omitempty"`
+ Field string `json:"field,omitempty"`
+ Explain bool `json:"explain,omitempty"`
+ Attributes []string `json:"attributes,omitempty"`
+}
+
+type IndicesAnalyzeResponse struct {
+ Tokens []IndicesAnalyzeResponseToken `json:"tokens"` // json part for normal message
+ Detail IndicesAnalyzeResponseDetail `json:"detail"` // json part for verbose message of explain request
+}
+
+type IndicesAnalyzeResponseToken struct {
+ Token string `json:"token"`
+ StartOffset int `json:"start_offset"`
+ EndOffset int `json:"end_offset"`
+ Type string `json:"type"`
+ Position int `json:"position"`
+}
+
+type IndicesAnalyzeResponseDetail struct {
+ CustomAnalyzer bool `json:"custom_analyzer"`
+ Charfilters []interface{} `json:"charfilters"`
+ Analyzer struct {
+ Name string `json:"name"`
+ Tokens []struct {
+ Token string `json:"token"`
+ StartOffset int `json:"start_offset"`
+ EndOffset int `json:"end_offset"`
+ Type string `json:"type"`
+ Position int `json:"position"`
+ Bytes string `json:"bytes"`
+ PositionLength int `json:"positionLength"`
+ } `json:"tokens"`
+ } `json:"analyzer"`
+ Tokenizer struct {
+ Name string `json:"name"`
+ Tokens []struct {
+ Token string `json:"token"`
+ StartOffset int `json:"start_offset"`
+ EndOffset int `json:"end_offset"`
+ Type string `json:"type"`
+ Position int `json:"position"`
+ } `json:"tokens"`
+ } `json:"tokenizer"`
+ Tokenfilters []struct {
+ Name string `json:"name"`
+ Tokens []struct {
+ Token string `json:"token"`
+ StartOffset int `json:"start_offset"`
+ EndOffset int `json:"end_offset"`
+ Type string `json:"type"`
+ Position int `json:"position"`
+ Keyword bool `json:"keyword"`
+ } `json:"tokens"`
+ } `json:"tokenfilters"`
+}
diff --git a/vendor/github.com/olivere/elastic/indices_analyze_test.go b/vendor/github.com/olivere/elastic/indices_analyze_test.go
new file mode 100644
index 000000000..90dbf1e73
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_analyze_test.go
@@ -0,0 +1,85 @@
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestIndicesAnalyzeURL(t *testing.T) {
+ client := setupTestClient(t)
+
+ tests := []struct {
+ Index string
+ Expected string
+ }{
+ {
+ "",
+ "/_analyze",
+ },
+ {
+ "tweets",
+ "/tweets/_analyze",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := client.IndexAnalyze().Index(test.Index).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
+
+func TestIndicesAnalyze(t *testing.T) {
+ client := setupTestClient(t)
+ // client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ res, err := client.IndexAnalyze().Text("hello hi guy").Do(context.TODO())
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+ if len(res.Tokens) != 3 {
+ t.Fatalf("expected %d, got %d (%+v)", 3, len(res.Tokens), res.Tokens)
+ }
+}
+
+func TestIndicesAnalyzeDetail(t *testing.T) {
+ client := setupTestClient(t)
+ // client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ res, err := client.IndexAnalyze().Text("hello hi guy").Explain(true).Do(context.TODO())
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ if len(res.Detail.Analyzer.Tokens) != 3 {
+ t.Fatalf("expected %d tokens, got %d (%+v)", 3, len(res.Detail.Tokenizer.Tokens), res.Detail.Tokenizer.Tokens)
+ }
+}
+
+func TestIndicesAnalyzeWithIndex(t *testing.T) {
+ client := setupTestClient(t)
+
+ _, err := client.IndexAnalyze().Index("foo").Text("hello hi guy").Do(context.TODO())
+ if err == nil {
+ t.Fatal("expected error, got nil")
+ }
+ if want, have := "elastic: Error 404 (Not Found): no such index [type=index_not_found_exception]", err.Error(); want != have {
+ t.Fatalf("expected error %q, got %q", want, have)
+ }
+}
+
+func TestIndicesAnalyzeValidate(t *testing.T) {
+ client := setupTestClient(t)
+
+ _, err := client.IndexAnalyze().Do(context.TODO())
+ if err == nil {
+ t.Fatal("expected error, got nil")
+ }
+ if want, have := "missing required fields: [Text]", err.Error(); want != have {
+ t.Fatalf("expected error %q, got %q", want, have)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_close.go b/vendor/github.com/olivere/elastic/indices_close.go
new file mode 100644
index 000000000..00ecdf966
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_close.go
@@ -0,0 +1,159 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesCloseService closes an index.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-open-close.html
+// for details.
+type IndicesCloseService struct {
+ client *Client
+ pretty bool
+ index string
+ timeout string
+ masterTimeout string
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+}
+
+// NewIndicesCloseService creates and initializes a new IndicesCloseService.
+func NewIndicesCloseService(client *Client) *IndicesCloseService {
+ return &IndicesCloseService{client: client}
+}
+
+// Index is the name of the index to close.
+func (s *IndicesCloseService) Index(index string) *IndicesCloseService {
+ s.index = index
+ return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *IndicesCloseService) Timeout(timeout string) *IndicesCloseService {
+ s.timeout = timeout
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *IndicesCloseService) MasterTimeout(masterTimeout string) *IndicesCloseService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *IndicesCloseService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesCloseService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified).
+func (s *IndicesCloseService) AllowNoIndices(allowNoIndices bool) *IndicesCloseService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *IndicesCloseService) ExpandWildcards(expandWildcards string) *IndicesCloseService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesCloseService) Pretty(pretty bool) *IndicesCloseService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesCloseService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/_close", map[string]string{
+ "index": s.index,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesCloseService) Validate() error {
+ var invalid []string
+ if s.index == "" {
+ invalid = append(invalid, "Index")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesCloseService) Do(ctx context.Context) (*IndicesCloseResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IndicesCloseResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesCloseResponse is the response of IndicesCloseService.Do.
+type IndicesCloseResponse struct {
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/indices_close_test.go b/vendor/github.com/olivere/elastic/indices_close_test.go
new file mode 100644
index 000000000..e7a4d9e05
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_close_test.go
@@ -0,0 +1,84 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+// TODO(oe): Find out why this test fails on Travis CI.
+/*
+func TestIndicesOpenAndClose(t *testing.T) {
+ client := setupTestClient(t)
+
+ // Create index
+ createIndex, err := client.CreateIndex(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !createIndex.Acknowledged {
+ t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged)
+ }
+ defer func() {
+ // Delete index
+ deleteIndex, err := client.DeleteIndex(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !deleteIndex.Acknowledged {
+ t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged)
+ }
+ }()
+
+ waitForYellow := func() {
+ // Wait for status yellow
+ res, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("15s").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res != nil && res.TimedOut {
+ t.Fatalf("cluster time out waiting for status %q", "yellow")
+ }
+ }
+
+ // Wait for cluster
+ waitForYellow()
+
+ // Close index
+ cresp, err := client.CloseIndex(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !cresp.Acknowledged {
+ t.Fatalf("expected close index of %q to be acknowledged\n", testIndexName)
+ }
+
+ // Wait for cluster
+ waitForYellow()
+
+ // Open index again
+ oresp, err := client.OpenIndex(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !oresp.Acknowledged {
+ t.Fatalf("expected open index of %q to be acknowledged\n", testIndexName)
+ }
+}
+*/
+
+func TestIndicesCloseValidate(t *testing.T) {
+ client := setupTestClient(t)
+
+ // No index name -> fail with error
+ res, err := NewIndicesCloseService(client).Do(context.TODO())
+ if err == nil {
+ t.Fatalf("expected IndicesClose to fail without index name")
+ }
+ if res != nil {
+ t.Fatalf("expected result to be == nil; got: %v", res)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_create.go b/vendor/github.com/olivere/elastic/indices_create.go
new file mode 100644
index 000000000..8d8e0c25e
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_create.go
@@ -0,0 +1,136 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "errors"
+ "net/url"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesCreateService creates a new index.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-create-index.html
+// for details.
+type IndicesCreateService struct {
+ client *Client
+ pretty bool
+ index string
+ timeout string
+ masterTimeout string
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewIndicesCreateService returns a new IndicesCreateService.
+func NewIndicesCreateService(client *Client) *IndicesCreateService {
+ return &IndicesCreateService{client: client}
+}
+
+// Index is the name of the index to create.
+func (b *IndicesCreateService) Index(index string) *IndicesCreateService {
+ b.index = index
+ return b
+}
+
+// Timeout the explicit operation timeout, e.g. "5s".
+func (s *IndicesCreateService) Timeout(timeout string) *IndicesCreateService {
+ s.timeout = timeout
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *IndicesCreateService) MasterTimeout(masterTimeout string) *IndicesCreateService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Body specifies the configuration of the index as a string.
+// It is an alias for BodyString.
+func (b *IndicesCreateService) Body(body string) *IndicesCreateService {
+ b.bodyString = body
+ return b
+}
+
+// BodyString specifies the configuration of the index as a string.
+func (b *IndicesCreateService) BodyString(body string) *IndicesCreateService {
+ b.bodyString = body
+ return b
+}
+
+// BodyJson specifies the configuration of the index. The interface{} will
+// be serializes as a JSON document, so use a map[string]interface{}.
+func (b *IndicesCreateService) BodyJson(body interface{}) *IndicesCreateService {
+ b.bodyJson = body
+ return b
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (b *IndicesCreateService) Pretty(pretty bool) *IndicesCreateService {
+ b.pretty = pretty
+ return b
+}
+
+// Do executes the operation.
+func (b *IndicesCreateService) Do(ctx context.Context) (*IndicesCreateResult, error) {
+ if b.index == "" {
+ return nil, errors.New("missing index name")
+ }
+
+ // Build url
+ path, err := uritemplates.Expand("/{index}", map[string]string{
+ "index": b.index,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ params := make(url.Values)
+ if b.pretty {
+ params.Set("pretty", "true")
+ }
+ if b.masterTimeout != "" {
+ params.Set("master_timeout", b.masterTimeout)
+ }
+ if b.timeout != "" {
+ params.Set("timeout", b.timeout)
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if b.bodyJson != nil {
+ body = b.bodyJson
+ } else {
+ body = b.bodyString
+ }
+
+ // Get response
+ res, err := b.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "PUT",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ ret := new(IndicesCreateResult)
+ if err := b.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Result of a create index request.
+
+// IndicesCreateResult is the outcome of creating a new index.
+type IndicesCreateResult struct {
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/indices_create_test.go b/vendor/github.com/olivere/elastic/indices_create_test.go
new file mode 100644
index 000000000..f37df1c54
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_create_test.go
@@ -0,0 +1,63 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestIndicesLifecycle(t *testing.T) {
+ client := setupTestClient(t)
+
+ // Create index
+ createIndex, err := client.CreateIndex(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !createIndex.Acknowledged {
+ t.Errorf("expected IndicesCreateResult.Acknowledged %v; got %v", true, createIndex.Acknowledged)
+ }
+
+ // Check if index exists
+ indexExists, err := client.IndexExists(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !indexExists {
+ t.Fatalf("index %s should exist, but doesn't\n", testIndexName)
+ }
+
+ // Delete index
+ deleteIndex, err := client.DeleteIndex(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !deleteIndex.Acknowledged {
+ t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged)
+ }
+
+ // Check if index exists
+ indexExists, err = client.IndexExists(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if indexExists {
+ t.Fatalf("index %s should not exist, but does\n", testIndexName)
+ }
+}
+
+func TestIndicesCreateValidate(t *testing.T) {
+ client := setupTestClient(t)
+
+ // No index name -> fail with error
+ res, err := NewIndicesCreateService(client).Body(testMapping).Do(context.TODO())
+ if err == nil {
+ t.Fatalf("expected IndicesCreate to fail without index name")
+ }
+ if res != nil {
+ t.Fatalf("expected result to be == nil; got: %v", res)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_delete.go b/vendor/github.com/olivere/elastic/indices_delete.go
new file mode 100644
index 000000000..2afeca978
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_delete.go
@@ -0,0 +1,133 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesDeleteService allows to delete existing indices.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-delete-index.html
+// for details.
+type IndicesDeleteService struct {
+ client *Client
+ pretty bool
+ index []string
+ timeout string
+ masterTimeout string
+}
+
+// NewIndicesDeleteService creates and initializes a new IndicesDeleteService.
+func NewIndicesDeleteService(client *Client) *IndicesDeleteService {
+ return &IndicesDeleteService{
+ client: client,
+ index: make([]string, 0),
+ }
+}
+
+// Index adds the list of indices to delete.
+// Use `_all` or `*` string to delete all indices.
+func (s *IndicesDeleteService) Index(index []string) *IndicesDeleteService {
+ s.index = index
+ return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *IndicesDeleteService) Timeout(timeout string) *IndicesDeleteService {
+ s.timeout = timeout
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *IndicesDeleteService) MasterTimeout(masterTimeout string) *IndicesDeleteService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesDeleteService) Pretty(pretty bool) *IndicesDeleteService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesDeleteService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/{index}", map[string]string{
+ "index": strings.Join(s.index, ","),
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesDeleteService) Validate() error {
+ var invalid []string
+ if len(s.index) == 0 {
+ invalid = append(invalid, "Index")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesDeleteService) Do(ctx context.Context) (*IndicesDeleteResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "DELETE",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IndicesDeleteResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Result of a delete index request.
+
+// IndicesDeleteResponse is the response of IndicesDeleteService.Do.
+type IndicesDeleteResponse struct {
+ Acknowledged bool `json:"acknowledged"`
+}
diff --git a/vendor/github.com/olivere/elastic/indices_delete_template.go b/vendor/github.com/olivere/elastic/indices_delete_template.go
new file mode 100644
index 000000000..0ea34cf89
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_delete_template.go
@@ -0,0 +1,128 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesDeleteTemplateService deletes index templates.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-templates.html.
+type IndicesDeleteTemplateService struct {
+ client *Client
+ pretty bool
+ name string
+ timeout string
+ masterTimeout string
+}
+
+// NewIndicesDeleteTemplateService creates a new IndicesDeleteTemplateService.
+func NewIndicesDeleteTemplateService(client *Client) *IndicesDeleteTemplateService {
+ return &IndicesDeleteTemplateService{
+ client: client,
+ }
+}
+
+// Name is the name of the template.
+func (s *IndicesDeleteTemplateService) Name(name string) *IndicesDeleteTemplateService {
+ s.name = name
+ return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *IndicesDeleteTemplateService) Timeout(timeout string) *IndicesDeleteTemplateService {
+ s.timeout = timeout
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *IndicesDeleteTemplateService) MasterTimeout(masterTimeout string) *IndicesDeleteTemplateService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesDeleteTemplateService) Pretty(pretty bool) *IndicesDeleteTemplateService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesDeleteTemplateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_template/{name}", map[string]string{
+ "name": s.name,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesDeleteTemplateService) Validate() error {
+ var invalid []string
+ if s.name == "" {
+ invalid = append(invalid, "Name")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesDeleteTemplateService) Do(ctx context.Context) (*IndicesDeleteTemplateResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "DELETE",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IndicesDeleteTemplateResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesDeleteTemplateResponse is the response of IndicesDeleteTemplateService.Do.
+type IndicesDeleteTemplateResponse struct {
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/indices_delete_test.go b/vendor/github.com/olivere/elastic/indices_delete_test.go
new file mode 100644
index 000000000..db77c7a25
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_delete_test.go
@@ -0,0 +1,23 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestIndicesDeleteValidate(t *testing.T) {
+ client := setupTestClient(t)
+
+ // No index name -> fail with error
+ res, err := NewIndicesDeleteService(client).Do(context.TODO())
+ if err == nil {
+ t.Fatalf("expected IndicesDelete to fail without index name")
+ }
+ if res != nil {
+ t.Fatalf("expected result to be == nil; got: %v", res)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_exists.go b/vendor/github.com/olivere/elastic/indices_exists.go
new file mode 100644
index 000000000..aa9391039
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_exists.go
@@ -0,0 +1,155 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesExistsService checks if an index or indices exist or not.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-exists.html
+// for details.
+type IndicesExistsService struct {
+ client *Client
+ pretty bool
+ index []string
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+ local *bool
+}
+
+// NewIndicesExistsService creates and initializes a new IndicesExistsService.
+func NewIndicesExistsService(client *Client) *IndicesExistsService {
+ return &IndicesExistsService{
+ client: client,
+ index: make([]string, 0),
+ }
+}
+
+// Index is a list of one or more indices to check.
+func (s *IndicesExistsService) Index(index []string) *IndicesExistsService {
+ s.index = index
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices expression
+// resolves into no concrete indices. (This includes `_all` string or
+// when no indices have been specified).
+func (s *IndicesExistsService) AllowNoIndices(allowNoIndices bool) *IndicesExistsService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *IndicesExistsService) ExpandWildcards(expandWildcards string) *IndicesExistsService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// Local, when set, returns local information and does not retrieve the state
+// from master node (default: false).
+func (s *IndicesExistsService) Local(local bool) *IndicesExistsService {
+ s.local = &local
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *IndicesExistsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesExistsService) Pretty(pretty bool) *IndicesExistsService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesExistsService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/{index}", map[string]string{
+ "index": strings.Join(s.index, ","),
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesExistsService) Validate() error {
+ var invalid []string
+ if len(s.index) == 0 {
+ invalid = append(invalid, "Index")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesExistsService) Do(ctx context.Context) (bool, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return false, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return false, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "HEAD",
+ Path: path,
+ Params: params,
+ IgnoreErrors: []int{404},
+ })
+ if err != nil {
+ return false, err
+ }
+
+ // Return operation response
+ switch res.StatusCode {
+ case http.StatusOK:
+ return true, nil
+ case http.StatusNotFound:
+ return false, nil
+ default:
+ return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_exists_template.go b/vendor/github.com/olivere/elastic/indices_exists_template.go
new file mode 100644
index 000000000..40b06e895
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_exists_template.go
@@ -0,0 +1,118 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "net/url"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesExistsTemplateService checks if a given template exists.
+// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-templates.html#indices-templates-exists
+// for documentation.
+type IndicesExistsTemplateService struct {
+ client *Client
+ pretty bool
+ name string
+ local *bool
+}
+
+// NewIndicesExistsTemplateService creates a new IndicesExistsTemplateService.
+func NewIndicesExistsTemplateService(client *Client) *IndicesExistsTemplateService {
+ return &IndicesExistsTemplateService{
+ client: client,
+ }
+}
+
+// Name is the name of the template.
+func (s *IndicesExistsTemplateService) Name(name string) *IndicesExistsTemplateService {
+ s.name = name
+ return s
+}
+
+// Local indicates whether to return local information, i.e. do not retrieve
+// the state from master node (default: false).
+func (s *IndicesExistsTemplateService) Local(local bool) *IndicesExistsTemplateService {
+ s.local = &local
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesExistsTemplateService) Pretty(pretty bool) *IndicesExistsTemplateService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesExistsTemplateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_template/{name}", map[string]string{
+ "name": s.name,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesExistsTemplateService) Validate() error {
+ var invalid []string
+ if s.name == "" {
+ invalid = append(invalid, "Name")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesExistsTemplateService) Do(ctx context.Context) (bool, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return false, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return false, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "HEAD",
+ Path: path,
+ Params: params,
+ IgnoreErrors: []int{404},
+ })
+ if err != nil {
+ return false, err
+ }
+
+ // Return operation response
+ switch res.StatusCode {
+ case http.StatusOK:
+ return true, nil
+ case http.StatusNotFound:
+ return false, nil
+ default:
+ return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_exists_template_test.go b/vendor/github.com/olivere/elastic/indices_exists_template_test.go
new file mode 100644
index 000000000..a97442971
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_exists_template_test.go
@@ -0,0 +1,68 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestIndexExistsTemplate(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tmpl := `{
+ "index_patterns":["elastic-test*"],
+ "settings":{
+ "number_of_shards":1,
+ "number_of_replicas":0
+ },
+ "mappings":{
+ "doc":{
+ "properties":{
+ "tags":{
+ "type":"keyword"
+ },
+ "location":{
+ "type":"geo_point"
+ },
+ "suggest_field":{
+ "type":"completion"
+ }
+ }
+ }
+ }
+}`
+ putres, err := client.IndexPutTemplate("elastic-template").BodyString(tmpl).Do(context.TODO())
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if putres == nil {
+ t.Fatalf("expected response; got: %v", putres)
+ }
+ if !putres.Acknowledged {
+ t.Fatalf("expected index template to be ack'd; got: %v", putres.Acknowledged)
+ }
+
+ // Always delete template
+ defer client.IndexDeleteTemplate("elastic-template").Do(context.TODO())
+
+ // Check if template exists
+ exists, err := client.IndexTemplateExists("elastic-template").Do(context.TODO())
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if !exists {
+ t.Fatalf("expected index template %q to exist; got: %v", "elastic-template", exists)
+ }
+
+ // Get template
+ getres, err := client.IndexGetTemplate("elastic-template").Do(context.TODO())
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if getres == nil {
+ t.Fatalf("expected to get index template %q; got: %v", "elastic-template", getres)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_exists_test.go b/vendor/github.com/olivere/elastic/indices_exists_test.go
new file mode 100644
index 000000000..07e3eb518
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_exists_test.go
@@ -0,0 +1,23 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestIndicesExistsWithoutIndex(t *testing.T) {
+ client := setupTestClient(t)
+
+ // No index name -> fail with error
+ res, err := NewIndicesExistsService(client).Do(context.TODO())
+ if err == nil {
+ t.Fatalf("expected IndicesExists to fail without index name")
+ }
+ if res != false {
+ t.Fatalf("expected result to be false; got: %v", res)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_exists_type.go b/vendor/github.com/olivere/elastic/indices_exists_type.go
new file mode 100644
index 000000000..a4d1ff610
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_exists_type.go
@@ -0,0 +1,165 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesExistsTypeService checks if one or more types exist in one or more indices.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-types-exists.html
+// for details.
+type IndicesExistsTypeService struct {
+ client *Client
+ pretty bool
+ typ []string
+ index []string
+ expandWildcards string
+ local *bool
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+}
+
+// NewIndicesExistsTypeService creates a new IndicesExistsTypeService.
+func NewIndicesExistsTypeService(client *Client) *IndicesExistsTypeService {
+ return &IndicesExistsTypeService{
+ client: client,
+ }
+}
+
+// Index is a list of index names; use `_all` to check the types across all indices.
+func (s *IndicesExistsTypeService) Index(indices ...string) *IndicesExistsTypeService {
+ s.index = append(s.index, indices...)
+ return s
+}
+
+// Type is a list of document types to check.
+func (s *IndicesExistsTypeService) Type(types ...string) *IndicesExistsTypeService {
+ s.typ = append(s.typ, types...)
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *IndicesExistsTypeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsTypeService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *IndicesExistsTypeService) AllowNoIndices(allowNoIndices bool) *IndicesExistsTypeService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *IndicesExistsTypeService) ExpandWildcards(expandWildcards string) *IndicesExistsTypeService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// Local specifies whether to return local information, i.e. do not retrieve
+// the state from master node (default: false).
+func (s *IndicesExistsTypeService) Local(local bool) *IndicesExistsTypeService {
+ s.local = &local
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesExistsTypeService) Pretty(pretty bool) *IndicesExistsTypeService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesExistsTypeService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{
+ "index": strings.Join(s.index, ","),
+ "type": strings.Join(s.typ, ","),
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesExistsTypeService) Validate() error {
+ var invalid []string
+ if len(s.index) == 0 {
+ invalid = append(invalid, "Index")
+ }
+ if len(s.typ) == 0 {
+ invalid = append(invalid, "Type")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesExistsTypeService) Do(ctx context.Context) (bool, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return false, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return false, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "HEAD",
+ Path: path,
+ Params: params,
+ IgnoreErrors: []int{404},
+ })
+ if err != nil {
+ return false, err
+ }
+
+ // Return operation response
+ switch res.StatusCode {
+ case http.StatusOK:
+ return true, nil
+ case http.StatusNotFound:
+ return false, nil
+ default:
+ return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_exists_type_test.go b/vendor/github.com/olivere/elastic/indices_exists_type_test.go
new file mode 100644
index 000000000..3795bd042
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_exists_type_test.go
@@ -0,0 +1,135 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestIndicesExistsTypeBuildURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Indices []string
+ Types []string
+ Expected string
+ ExpectValidateFailure bool
+ }{
+ {
+ []string{},
+ []string{},
+ "",
+ true,
+ },
+ {
+ []string{"index1"},
+ []string{},
+ "",
+ true,
+ },
+ {
+ []string{},
+ []string{"type1"},
+ "",
+ true,
+ },
+ {
+ []string{"index1"},
+ []string{"type1"},
+ "/index1/_mapping/type1",
+ false,
+ },
+ {
+ []string{"index1", "index2"},
+ []string{"type1"},
+ "/index1%2Cindex2/_mapping/type1",
+ false,
+ },
+ {
+ []string{"index1", "index2"},
+ []string{"type1", "type2"},
+ "/index1%2Cindex2/_mapping/type1%2Ctype2",
+ false,
+ },
+ }
+
+ for i, test := range tests {
+ err := client.TypeExists().Index(test.Indices...).Type(test.Types...).Validate()
+ if err == nil && test.ExpectValidateFailure {
+ t.Errorf("#%d: expected validate to fail", i+1)
+ continue
+ }
+ if err != nil && !test.ExpectValidateFailure {
+ t.Errorf("#%d: expected validate to succeed", i+1)
+ continue
+ }
+ if !test.ExpectValidateFailure {
+ path, _, err := client.TypeExists().Index(test.Indices...).Type(test.Types...).buildURL()
+ if err != nil {
+ t.Fatalf("#%d: %v", i+1, err)
+ }
+ if path != test.Expected {
+ t.Errorf("#%d: expected %q; got: %q", i+1, test.Expected, path)
+ }
+ }
+ }
+}
+
+func TestIndicesExistsType(t *testing.T) {
+ client := setupTestClient(t)
+
+ // Create index with tweet type
+ createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if createIndex == nil {
+ t.Errorf("expected result to be != nil; got: %v", createIndex)
+ }
+ if !createIndex.Acknowledged {
+ t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged)
+ }
+
+ // Check if type exists
+ exists, err := client.TypeExists().Index(testIndexName).Type("doc").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !exists {
+ t.Fatalf("type %s should exist in index %s, but doesn't\n", "doc", testIndexName)
+ }
+
+ // Delete index
+ deleteIndex, err := client.DeleteIndex(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !deleteIndex.Acknowledged {
+ t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged)
+ }
+
+ // Check if type exists
+ exists, err = client.TypeExists().Index(testIndexName).Type("doc").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if exists {
+ t.Fatalf("type %s should not exist in index %s, but it does\n", "doc", testIndexName)
+ }
+}
+
+func TestIndicesExistsTypeValidate(t *testing.T) {
+ client := setupTestClient(t)
+
+ // No index name -> fail with error
+ res, err := NewIndicesExistsTypeService(client).Do(context.TODO())
+ if err == nil {
+ t.Fatalf("expected IndicesExistsType to fail without index name")
+ }
+ if res != false {
+ t.Fatalf("expected result to be false; got: %v", res)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_flush.go b/vendor/github.com/olivere/elastic/indices_flush.go
new file mode 100644
index 000000000..113e53803
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_flush.go
@@ -0,0 +1,173 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// Flush allows to flush one or more indices. The flush process of an index
+// basically frees memory from the index by flushing data to the index
+// storage and clearing the internal transaction log.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-flush.html
+// for details.
+type IndicesFlushService struct {
+ client *Client
+ pretty bool
+ index []string
+ force *bool
+ waitIfOngoing *bool
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+}
+
+// NewIndicesFlushService creates a new IndicesFlushService.
+func NewIndicesFlushService(client *Client) *IndicesFlushService {
+ return &IndicesFlushService{
+ client: client,
+ index: make([]string, 0),
+ }
+}
+
+// Index is a list of index names; use `_all` or empty string for all indices.
+func (s *IndicesFlushService) Index(indices ...string) *IndicesFlushService {
+ s.index = append(s.index, indices...)
+ return s
+}
+
+// Force indicates whether a flush should be forced even if it is not
+// necessarily needed ie. if no changes will be committed to the index.
+// This is useful if transaction log IDs should be incremented even if
+// no uncommitted changes are present. (This setting can be considered as internal).
+func (s *IndicesFlushService) Force(force bool) *IndicesFlushService {
+ s.force = &force
+ return s
+}
+
+// WaitIfOngoing, if set to true, indicates that the flush operation will
+// block until the flush can be executed if another flush operation is
+// already executing. The default is false and will cause an exception
+// to be thrown on the shard level if another flush operation is already running..
+func (s *IndicesFlushService) WaitIfOngoing(waitIfOngoing bool) *IndicesFlushService {
+ s.waitIfOngoing = &waitIfOngoing
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *IndicesFlushService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesFlushService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices expression
+// resolves into no concrete indices. (This includes `_all` string or when
+// no indices have been specified).
+func (s *IndicesFlushService) AllowNoIndices(allowNoIndices bool) *IndicesFlushService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards specifies whether to expand wildcard expression to
+// concrete indices that are open, closed or both..
+func (s *IndicesFlushService) ExpandWildcards(expandWildcards string) *IndicesFlushService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesFlushService) Pretty(pretty bool) *IndicesFlushService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesFlushService) buildURL() (string, url.Values, error) {
+ // Build URL
+ var err error
+ var path string
+
+ if len(s.index) > 0 {
+ path, err = uritemplates.Expand("/{index}/_flush", map[string]string{
+ "index": strings.Join(s.index, ","),
+ })
+ } else {
+ path = "/_flush"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.force != nil {
+ params.Set("force", fmt.Sprintf("%v", *s.force))
+ }
+ if s.waitIfOngoing != nil {
+ params.Set("wait_if_ongoing", fmt.Sprintf("%v", *s.waitIfOngoing))
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesFlushService) Validate() error {
+ return nil
+}
+
+// Do executes the service.
+func (s *IndicesFlushService) Do(ctx context.Context) (*IndicesFlushResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IndicesFlushResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Result of a flush request.
+
+type IndicesFlushResponse struct {
+ Shards shardsInfo `json:"_shards"`
+}
diff --git a/vendor/github.com/olivere/elastic/indices_flush_test.go b/vendor/github.com/olivere/elastic/indices_flush_test.go
new file mode 100644
index 000000000..afefd1251
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_flush_test.go
@@ -0,0 +1,70 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestFlush(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ // Flush all indices
+ res, err := client.Flush().Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Errorf("expected res to be != nil; got: %v", res)
+ }
+}
+
+func TestFlushBuildURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Indices []string
+ Expected string
+ ExpectValidateFailure bool
+ }{
+ {
+ []string{},
+ "/_flush",
+ false,
+ },
+ {
+ []string{"index1"},
+ "/index1/_flush",
+ false,
+ },
+ {
+ []string{"index1", "index2"},
+ "/index1%2Cindex2/_flush",
+ false,
+ },
+ }
+
+ for i, test := range tests {
+ err := NewIndicesFlushService(client).Index(test.Indices...).Validate()
+ if err == nil && test.ExpectValidateFailure {
+ t.Errorf("case #%d: expected validate to fail", i+1)
+ continue
+ }
+ if err != nil && !test.ExpectValidateFailure {
+ t.Errorf("case #%d: expected validate to succeed", i+1)
+ continue
+ }
+ if !test.ExpectValidateFailure {
+ path, _, err := NewIndicesFlushService(client).Index(test.Indices...).buildURL()
+ if err != nil {
+ t.Fatalf("case #%d: %v", i+1, err)
+ }
+ if path != test.Expected {
+ t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_forcemerge.go b/vendor/github.com/olivere/elastic/indices_forcemerge.go
new file mode 100644
index 000000000..0e999cf19
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_forcemerge.go
@@ -0,0 +1,193 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesForcemergeService allows to force merging of one or more indices.
+// The merge relates to the number of segments a Lucene index holds
+// within each shard. The force merge operation allows to reduce the number
+// of segments by merging them.
+//
+// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-forcemerge.html
+// for more information.
+type IndicesForcemergeService struct {
+ client *Client
+ pretty bool
+ index []string
+ allowNoIndices *bool
+ expandWildcards string
+ flush *bool
+ ignoreUnavailable *bool
+ maxNumSegments interface{}
+ onlyExpungeDeletes *bool
+ operationThreading interface{}
+}
+
+// NewIndicesForcemergeService creates a new IndicesForcemergeService.
+func NewIndicesForcemergeService(client *Client) *IndicesForcemergeService {
+ return &IndicesForcemergeService{
+ client: client,
+ index: make([]string, 0),
+ }
+}
+
+// Index is a list of index names; use `_all` or empty string to perform
+// the operation on all indices.
+func (s *IndicesForcemergeService) Index(index ...string) *IndicesForcemergeService {
+ if s.index == nil {
+ s.index = make([]string, 0)
+ }
+ s.index = append(s.index, index...)
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *IndicesForcemergeService) AllowNoIndices(allowNoIndices bool) *IndicesForcemergeService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both..
+func (s *IndicesForcemergeService) ExpandWildcards(expandWildcards string) *IndicesForcemergeService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// Flush specifies whether the index should be flushed after performing
+// the operation (default: true).
+func (s *IndicesForcemergeService) Flush(flush bool) *IndicesForcemergeService {
+ s.flush = &flush
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should
+// be ignored when unavailable (missing or closed).
+func (s *IndicesForcemergeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesForcemergeService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// MaxNumSegments specifies the number of segments the index should be
+// merged into (default: dynamic).
+func (s *IndicesForcemergeService) MaxNumSegments(maxNumSegments interface{}) *IndicesForcemergeService {
+ s.maxNumSegments = maxNumSegments
+ return s
+}
+
+// OnlyExpungeDeletes specifies whether the operation should only expunge
+// deleted documents.
+func (s *IndicesForcemergeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *IndicesForcemergeService {
+ s.onlyExpungeDeletes = &onlyExpungeDeletes
+ return s
+}
+
+func (s *IndicesForcemergeService) OperationThreading(operationThreading interface{}) *IndicesForcemergeService {
+ s.operationThreading = operationThreading
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesForcemergeService) Pretty(pretty bool) *IndicesForcemergeService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesForcemergeService) buildURL() (string, url.Values, error) {
+ var err error
+ var path string
+
+ // Build URL
+ if len(s.index) > 0 {
+ path, err = uritemplates.Expand("/{index}/_forcemerge", map[string]string{
+ "index": strings.Join(s.index, ","),
+ })
+ } else {
+ path = "/_forcemerge"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.flush != nil {
+ params.Set("flush", fmt.Sprintf("%v", *s.flush))
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.maxNumSegments != nil {
+ params.Set("max_num_segments", fmt.Sprintf("%v", s.maxNumSegments))
+ }
+ if s.onlyExpungeDeletes != nil {
+ params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes))
+ }
+ if s.operationThreading != nil {
+ params.Set("operation_threading", fmt.Sprintf("%v", s.operationThreading))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesForcemergeService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesForcemergeService) Do(ctx context.Context) (*IndicesForcemergeResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IndicesForcemergeResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesForcemergeResponse is the response of IndicesForcemergeService.Do.
+type IndicesForcemergeResponse struct {
+ Shards shardsInfo `json:"_shards"`
+}
diff --git a/vendor/github.com/olivere/elastic/indices_forcemerge_test.go b/vendor/github.com/olivere/elastic/indices_forcemerge_test.go
new file mode 100644
index 000000000..6615d4dc6
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_forcemerge_test.go
@@ -0,0 +1,57 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestIndicesForcemergeBuildURL(t *testing.T) {
+ client := setupTestClient(t)
+
+ tests := []struct {
+ Indices []string
+ Expected string
+ }{
+ {
+ []string{},
+ "/_forcemerge",
+ },
+ {
+ []string{"index1"},
+ "/index1/_forcemerge",
+ },
+ {
+ []string{"index1", "index2"},
+ "/index1%2Cindex2/_forcemerge",
+ },
+ }
+
+ for i, test := range tests {
+ path, _, err := client.Forcemerge().Index(test.Indices...).buildURL()
+ if err != nil {
+ t.Errorf("case #%d: %v", i+1, err)
+ continue
+ }
+ if path != test.Expected {
+ t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
+ }
+ }
+}
+
+func TestIndicesForcemerge(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ _, err := client.Forcemerge(testIndexName).MaxNumSegments(1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ /*
+ if !ok {
+ t.Fatalf("expected forcemerge to succeed; got: %v", ok)
+ }
+ */
+}
diff --git a/vendor/github.com/olivere/elastic/indices_get.go b/vendor/github.com/olivere/elastic/indices_get.go
new file mode 100644
index 000000000..cb4e449d5
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_get.go
@@ -0,0 +1,206 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesGetService retrieves information about one or more indices.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-get-index.html
+// for more details.
+type IndicesGetService struct {
+ client *Client
+ pretty bool
+ index []string
+ feature []string
+ local *bool
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+ flatSettings *bool
+ human *bool
+}
+
+// NewIndicesGetService creates a new IndicesGetService.
+func NewIndicesGetService(client *Client) *IndicesGetService {
+ return &IndicesGetService{
+ client: client,
+ index: make([]string, 0),
+ feature: make([]string, 0),
+ }
+}
+
+// Index is a list of index names.
+func (s *IndicesGetService) Index(indices ...string) *IndicesGetService {
+ s.index = append(s.index, indices...)
+ return s
+}
+
+// Feature is a list of features.
+func (s *IndicesGetService) Feature(features ...string) *IndicesGetService {
+ s.feature = append(s.feature, features...)
+ return s
+}
+
+// Local indicates whether to return local information, i.e. do not retrieve
+// the state from master node (default: false).
+func (s *IndicesGetService) Local(local bool) *IndicesGetService {
+ s.local = &local
+ return s
+}
+
+// IgnoreUnavailable indicates whether to ignore unavailable indexes (default: false).
+func (s *IndicesGetService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard expression
+// resolves to no concrete indices (default: false).
+func (s *IndicesGetService) AllowNoIndices(allowNoIndices bool) *IndicesGetService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether wildcard expressions should get
+// expanded to open or closed indices (default: open).
+func (s *IndicesGetService) ExpandWildcards(expandWildcards string) *IndicesGetService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+/* Disabled because serialization would fail in that case. */
+/*
+// FlatSettings make the service return settings in flat format (default: false).
+func (s *IndicesGetService) FlatSettings(flatSettings bool) *IndicesGetService {
+ s.flatSettings = &flatSettings
+ return s
+}
+*/
+
+// Human indicates whether to return version and creation date values
+// in human-readable format (default: false).
+func (s *IndicesGetService) Human(human bool) *IndicesGetService {
+ s.human = &human
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesGetService) Pretty(pretty bool) *IndicesGetService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesGetService) buildURL() (string, url.Values, error) {
+ var err error
+ var path string
+ var index []string
+
+ if len(s.index) > 0 {
+ index = s.index
+ } else {
+ index = []string{"_all"}
+ }
+
+ if len(s.feature) > 0 {
+ // Build URL
+ path, err = uritemplates.Expand("/{index}/{feature}", map[string]string{
+ "index": strings.Join(index, ","),
+ "feature": strings.Join(s.feature, ","),
+ })
+ } else {
+ // Build URL
+ path, err = uritemplates.Expand("/{index}", map[string]string{
+ "index": strings.Join(index, ","),
+ })
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.flatSettings != nil {
+ params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+ }
+ if s.human != nil {
+ params.Set("human", fmt.Sprintf("%v", *s.human))
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesGetService) Validate() error {
+ var invalid []string
+ if len(s.index) == 0 {
+ invalid = append(invalid, "Index")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesGetService) Do(ctx context.Context) (map[string]*IndicesGetResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ var ret map[string]*IndicesGetResponse
+ if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesGetResponse is part of the response of IndicesGetService.Do.
+type IndicesGetResponse struct {
+ Aliases map[string]interface{} `json:"aliases"`
+ Mappings map[string]interface{} `json:"mappings"`
+ Settings map[string]interface{} `json:"settings"`
+ Warmers map[string]interface{} `json:"warmers"`
+}
diff --git a/vendor/github.com/olivere/elastic/indices_get_aliases.go b/vendor/github.com/olivere/elastic/indices_get_aliases.go
new file mode 100644
index 000000000..68b186358
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_get_aliases.go
@@ -0,0 +1,161 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// AliasesService returns the aliases associated with one or more indices.
+// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-aliases.html.
+type AliasesService struct {
+ client *Client
+ index []string
+ pretty bool
+}
+
+// NewAliasesService instantiates a new AliasesService.
+func NewAliasesService(client *Client) *AliasesService {
+ builder := &AliasesService{
+ client: client,
+ }
+ return builder
+}
+
+// Pretty asks Elasticsearch to indent the returned JSON.
+func (s *AliasesService) Pretty(pretty bool) *AliasesService {
+ s.pretty = pretty
+ return s
+}
+
+// Index adds one or more indices.
+func (s *AliasesService) Index(index ...string) *AliasesService {
+ s.index = append(s.index, index...)
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *AliasesService) buildURL() (string, url.Values, error) {
+ var err error
+ var path string
+
+ if len(s.index) > 0 {
+ path, err = uritemplates.Expand("/{index}/_alias", map[string]string{
+ "index": strings.Join(s.index, ","),
+ })
+ } else {
+ path = "/_alias"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+ return path, params, nil
+}
+
+func (s *AliasesService) Do(ctx context.Context) (*AliasesResult, error) {
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // {
+ // "indexName" : {
+ // "aliases" : {
+ // "alias1" : { },
+ // "alias2" : { }
+ // }
+ // },
+ // "indexName2" : {
+ // ...
+ // },
+ // }
+ indexMap := make(map[string]interface{})
+ if err := s.client.decoder.Decode(res.Body, &indexMap); err != nil {
+ return nil, err
+ }
+
+ // Each (indexName, _)
+ ret := &AliasesResult{
+ Indices: make(map[string]indexResult),
+ }
+ for indexName, indexData := range indexMap {
+ indexOut, found := ret.Indices[indexName]
+ if !found {
+ indexOut = indexResult{Aliases: make([]aliasResult, 0)}
+ }
+
+ // { "aliases" : { ... } }
+ indexDataMap, ok := indexData.(map[string]interface{})
+ if ok {
+ aliasesData, ok := indexDataMap["aliases"].(map[string]interface{})
+ if ok {
+ for aliasName, _ := range aliasesData {
+ aliasRes := aliasResult{AliasName: aliasName}
+ indexOut.Aliases = append(indexOut.Aliases, aliasRes)
+ }
+ }
+ }
+
+ ret.Indices[indexName] = indexOut
+ }
+
+ return ret, nil
+}
+
+// -- Result of an alias request.
+
+type AliasesResult struct {
+ Indices map[string]indexResult
+}
+
+type indexResult struct {
+ Aliases []aliasResult
+}
+
+type aliasResult struct {
+ AliasName string
+}
+
+func (ar AliasesResult) IndicesByAlias(aliasName string) []string {
+ var indices []string
+ for indexName, indexInfo := range ar.Indices {
+ for _, aliasInfo := range indexInfo.Aliases {
+ if aliasInfo.AliasName == aliasName {
+ indices = append(indices, indexName)
+ }
+ }
+ }
+ return indices
+}
+
+func (ir indexResult) HasAlias(aliasName string) bool {
+ for _, alias := range ir.Aliases {
+ if alias.AliasName == aliasName {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/olivere/elastic/indices_get_aliases_test.go b/vendor/github.com/olivere/elastic/indices_get_aliases_test.go
new file mode 100644
index 000000000..2c8da9b7f
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_get_aliases_test.go
@@ -0,0 +1,181 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestAliasesBuildURL(t *testing.T) {
+ client := setupTestClient(t)
+
+ tests := []struct {
+ Indices []string
+ Expected string
+ }{
+ {
+ []string{},
+ "/_alias",
+ },
+ {
+ []string{"index1"},
+ "/index1/_alias",
+ },
+ {
+ []string{"index1", "index2"},
+ "/index1%2Cindex2/_alias",
+ },
+ }
+
+ for i, test := range tests {
+ path, _, err := client.Aliases().Index(test.Indices...).buildURL()
+ if err != nil {
+ t.Errorf("case #%d: %v", i+1, err)
+ continue
+ }
+ if path != test.Expected {
+ t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
+ }
+ }
+}
+
+func TestAliases(t *testing.T) {
+ var err error
+
+ //client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+ client := setupTestClientAndCreateIndex(t)
+
+ // Some tweets
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "sandrae", Message: "Cycling is fun."}
+ tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."}
+
+ // Add tweets to first index
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Add tweets to second index
+ _, err = client.Index().Index(testIndexName2).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Flush
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Flush().Index(testIndexName2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Alias should not yet exist
+ aliasesResult1, err := client.Aliases().
+ Index(testIndexName, testIndexName2).
+ Pretty(true).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(aliasesResult1.Indices) != 2 {
+ t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult1.Indices))
+ }
+ for indexName, indexDetails := range aliasesResult1.Indices {
+ if len(indexDetails.Aliases) != 0 {
+ t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 0, len(indexDetails.Aliases))
+ }
+ }
+
+ // Add both indices to a new alias
+ aliasCreate, err := client.Alias().
+ Add(testIndexName, testAliasName).
+ Add(testIndexName2, testAliasName).
+ //Pretty(true).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !aliasCreate.Acknowledged {
+ t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasCreate.Acknowledged)
+ }
+
+ // Alias should now exist
+ aliasesResult2, err := client.Aliases().
+ Index(testIndexName, testIndexName2).
+ //Pretty(true).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(aliasesResult2.Indices) != 2 {
+ t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult2.Indices))
+ }
+ for indexName, indexDetails := range aliasesResult2.Indices {
+ if len(indexDetails.Aliases) != 1 {
+ t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 1, len(indexDetails.Aliases))
+ }
+ }
+
+ // Check the reverse function:
+ indexInfo1, found := aliasesResult2.Indices[testIndexName]
+ if !found {
+ t.Errorf("expected info about index %s = %v; got %v", testIndexName, true, found)
+ }
+ aliasFound := indexInfo1.HasAlias(testAliasName)
+ if !aliasFound {
+ t.Errorf("expected alias %s to include index %s; got %v", testAliasName, testIndexName, aliasFound)
+ }
+
+ // Check the reverse function:
+ indexInfo2, found := aliasesResult2.Indices[testIndexName2]
+ if !found {
+ t.Errorf("expected info about index %s = %v; got %v", testIndexName, true, found)
+ }
+ aliasFound = indexInfo2.HasAlias(testAliasName)
+ if !aliasFound {
+ t.Errorf("expected alias %s to include index %s; got %v", testAliasName, testIndexName2, aliasFound)
+ }
+
+ // Remove first index should remove two tweets, so should only yield 1
+ aliasRemove1, err := client.Alias().
+ Remove(testIndexName, testAliasName).
+ //Pretty(true).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !aliasRemove1.Acknowledged {
+ t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasRemove1.Acknowledged)
+ }
+
+ // Alias should now exist only for index 2
+ aliasesResult3, err := client.Aliases().Index(testIndexName, testIndexName2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(aliasesResult3.Indices) != 2 {
+ t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult3.Indices))
+ }
+ for indexName, indexDetails := range aliasesResult3.Indices {
+ if indexName == testIndexName {
+ if len(indexDetails.Aliases) != 0 {
+ t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 0, len(indexDetails.Aliases))
+ }
+ } else if indexName == testIndexName2 {
+ if len(indexDetails.Aliases) != 1 {
+ t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 1, len(indexDetails.Aliases))
+ }
+ } else {
+ t.Errorf("got index %s", indexName)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_get_field_mapping.go b/vendor/github.com/olivere/elastic/indices_get_field_mapping.go
new file mode 100644
index 000000000..e3b7eac07
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_get_field_mapping.go
@@ -0,0 +1,187 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesGetFieldMappingService retrieves the mapping definitions for the fields in an index
+// or index/type.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-get-field-mapping.html
+// for details.
+type IndicesGetFieldMappingService struct {
+ client *Client
+ pretty bool
+ index []string
+ typ []string
+ field []string
+ local *bool
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+}
+
+// NewGetFieldMappingService is an alias for NewIndicesGetFieldMappingService.
+// Use NewIndicesGetFieldMappingService.
+func NewGetFieldMappingService(client *Client) *IndicesGetFieldMappingService {
+ return NewIndicesGetFieldMappingService(client)
+}
+
+// NewIndicesGetFieldMappingService creates a new IndicesGetFieldMappingService.
+func NewIndicesGetFieldMappingService(client *Client) *IndicesGetFieldMappingService {
+ return &IndicesGetFieldMappingService{
+ client: client,
+ }
+}
+
+// Index is a list of index names.
+func (s *IndicesGetFieldMappingService) Index(indices ...string) *IndicesGetFieldMappingService {
+ s.index = append(s.index, indices...)
+ return s
+}
+
+// Type is a list of document types.
+func (s *IndicesGetFieldMappingService) Type(types ...string) *IndicesGetFieldMappingService {
+ s.typ = append(s.typ, types...)
+ return s
+}
+
+// Field is a list of fields.
+func (s *IndicesGetFieldMappingService) Field(fields ...string) *IndicesGetFieldMappingService {
+ s.field = append(s.field, fields...)
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// This includes `_all` string or when no indices have been specified.
+func (s *IndicesGetFieldMappingService) AllowNoIndices(allowNoIndices bool) *IndicesGetFieldMappingService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both..
+func (s *IndicesGetFieldMappingService) ExpandWildcards(expandWildcards string) *IndicesGetFieldMappingService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// Local indicates whether to return local information, do not retrieve
+// the state from master node (default: false).
+func (s *IndicesGetFieldMappingService) Local(local bool) *IndicesGetFieldMappingService {
+ s.local = &local
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *IndicesGetFieldMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetFieldMappingService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesGetFieldMappingService) Pretty(pretty bool) *IndicesGetFieldMappingService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesGetFieldMappingService) buildURL() (string, url.Values, error) {
+ var index, typ, field []string
+
+ if len(s.index) > 0 {
+ index = s.index
+ } else {
+ index = []string{"_all"}
+ }
+
+ if len(s.typ) > 0 {
+ typ = s.typ
+ } else {
+ typ = []string{"_all"}
+ }
+
+ if len(s.field) > 0 {
+ field = s.field
+ } else {
+ field = []string{"*"}
+ }
+
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/_mapping/{type}/field/{field}", map[string]string{
+ "index": strings.Join(index, ","),
+ "type": strings.Join(typ, ","),
+ "field": strings.Join(field, ","),
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesGetFieldMappingService) Validate() error {
+ return nil
+}
+
+// Do executes the operation. It returns mapping definitions for an index
+// or index/type.
+func (s *IndicesGetFieldMappingService) Do(ctx context.Context) (map[string]interface{}, error) {
+ var ret map[string]interface{}
+
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
diff --git a/vendor/github.com/olivere/elastic/indices_get_field_mapping_test.go b/vendor/github.com/olivere/elastic/indices_get_field_mapping_test.go
new file mode 100644
index 000000000..62770e030
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_get_field_mapping_test.go
@@ -0,0 +1,55 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestIndicesGetFieldMappingURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Indices []string
+ Types []string
+ Fields []string
+ Expected string
+ }{
+ {
+ []string{},
+ []string{},
+ []string{},
+ "/_all/_mapping/_all/field/%2A",
+ },
+ {
+ []string{},
+ []string{"tweet"},
+ []string{"message"},
+ "/_all/_mapping/tweet/field/message",
+ },
+ {
+ []string{"twitter"},
+ []string{"tweet"},
+ []string{"*.id"},
+ "/twitter/_mapping/tweet/field/%2A.id",
+ },
+ {
+ []string{"store-1", "store-2"},
+ []string{"tweet", "user"},
+ []string{"message", "*.id"},
+ "/store-1%2Cstore-2/_mapping/tweet%2Cuser/field/message%2C%2A.id",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := client.GetFieldMapping().Index(test.Indices...).Type(test.Types...).Field(test.Fields...).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_get_mapping.go b/vendor/github.com/olivere/elastic/indices_get_mapping.go
new file mode 100644
index 000000000..7f9c9cb22
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_get_mapping.go
@@ -0,0 +1,174 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesGetMappingService retrieves the mapping definitions for an index or
+// index/type.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-get-mapping.html
+// for details.
+type IndicesGetMappingService struct {
+ client *Client
+ pretty bool
+ index []string
+ typ []string
+ local *bool
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+}
+
+// NewGetMappingService is an alias for NewIndicesGetMappingService.
+// Use NewIndicesGetMappingService.
+func NewGetMappingService(client *Client) *IndicesGetMappingService {
+ return NewIndicesGetMappingService(client)
+}
+
+// NewIndicesGetMappingService creates a new IndicesGetMappingService.
+func NewIndicesGetMappingService(client *Client) *IndicesGetMappingService {
+ return &IndicesGetMappingService{
+ client: client,
+ index: make([]string, 0),
+ typ: make([]string, 0),
+ }
+}
+
+// Index is a list of index names.
+func (s *IndicesGetMappingService) Index(indices ...string) *IndicesGetMappingService {
+ s.index = append(s.index, indices...)
+ return s
+}
+
+// Type is a list of document types.
+func (s *IndicesGetMappingService) Type(types ...string) *IndicesGetMappingService {
+ s.typ = append(s.typ, types...)
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// This includes `_all` string or when no indices have been specified.
+func (s *IndicesGetMappingService) AllowNoIndices(allowNoIndices bool) *IndicesGetMappingService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both..
+func (s *IndicesGetMappingService) ExpandWildcards(expandWildcards string) *IndicesGetMappingService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// Local indicates whether to return local information, do not retrieve
+// the state from master node (default: false).
+func (s *IndicesGetMappingService) Local(local bool) *IndicesGetMappingService {
+ s.local = &local
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *IndicesGetMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetMappingService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesGetMappingService) Pretty(pretty bool) *IndicesGetMappingService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesGetMappingService) buildURL() (string, url.Values, error) {
+ var index, typ []string
+
+ if len(s.index) > 0 {
+ index = s.index
+ } else {
+ index = []string{"_all"}
+ }
+
+ if len(s.typ) > 0 {
+ typ = s.typ
+ } else {
+ typ = []string{"_all"}
+ }
+
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{
+ "index": strings.Join(index, ","),
+ "type": strings.Join(typ, ","),
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesGetMappingService) Validate() error {
+ return nil
+}
+
+// Do executes the operation. It returns mapping definitions for an index
+// or index/type.
+func (s *IndicesGetMappingService) Do(ctx context.Context) (map[string]interface{}, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ var ret map[string]interface{}
+ if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
diff --git a/vendor/github.com/olivere/elastic/indices_get_mapping_test.go b/vendor/github.com/olivere/elastic/indices_get_mapping_test.go
new file mode 100644
index 000000000..5ec54e7fb
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_get_mapping_test.go
@@ -0,0 +1,50 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestIndicesGetMappingURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Indices []string
+ Types []string
+ Expected string
+ }{
+ {
+ []string{},
+ []string{},
+ "/_all/_mapping/_all",
+ },
+ {
+ []string{},
+ []string{"tweet"},
+ "/_all/_mapping/tweet",
+ },
+ {
+ []string{"twitter"},
+ []string{"tweet"},
+ "/twitter/_mapping/tweet",
+ },
+ {
+ []string{"store-1", "store-2"},
+ []string{"tweet", "user"},
+ "/store-1%2Cstore-2/_mapping/tweet%2Cuser",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := client.GetMapping().Index(test.Indices...).Type(test.Types...).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_get_settings.go b/vendor/github.com/olivere/elastic/indices_get_settings.go
new file mode 100644
index 000000000..06fce0dfa
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_get_settings.go
@@ -0,0 +1,187 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesGetSettingsService allows to retrieve settings of one
+// or more indices.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-get-settings.html
+// for more details.
+type IndicesGetSettingsService struct {
+ client *Client
+ pretty bool
+ index []string
+ name []string
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+ flatSettings *bool
+ local *bool
+}
+
+// NewIndicesGetSettingsService creates a new IndicesGetSettingsService.
+func NewIndicesGetSettingsService(client *Client) *IndicesGetSettingsService {
+ return &IndicesGetSettingsService{
+ client: client,
+ index: make([]string, 0),
+ name: make([]string, 0),
+ }
+}
+
+// Index is a list of index names; use `_all` or empty string to perform
+// the operation on all indices.
+func (s *IndicesGetSettingsService) Index(indices ...string) *IndicesGetSettingsService {
+ s.index = append(s.index, indices...)
+ return s
+}
+
+// Name are the names of the settings that should be included.
+func (s *IndicesGetSettingsService) Name(name ...string) *IndicesGetSettingsService {
+ s.name = append(s.name, name...)
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should
+// be ignored when unavailable (missing or closed).
+func (s *IndicesGetSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetSettingsService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *IndicesGetSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesGetSettingsService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression
+// to concrete indices that are open, closed or both.
+// Options: open, closed, none, all. Default: open,closed.
+func (s *IndicesGetSettingsService) ExpandWildcards(expandWildcards string) *IndicesGetSettingsService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// FlatSettings indicates whether to return settings in flat format (default: false).
+func (s *IndicesGetSettingsService) FlatSettings(flatSettings bool) *IndicesGetSettingsService {
+ s.flatSettings = &flatSettings
+ return s
+}
+
+// Local indicates whether to return local information, do not retrieve
+// the state from master node (default: false).
+func (s *IndicesGetSettingsService) Local(local bool) *IndicesGetSettingsService {
+ s.local = &local
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesGetSettingsService) Pretty(pretty bool) *IndicesGetSettingsService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesGetSettingsService) buildURL() (string, url.Values, error) {
+ var err error
+ var path string
+ var index []string
+
+ if len(s.index) > 0 {
+ index = s.index
+ } else {
+ index = []string{"_all"}
+ }
+
+ if len(s.name) > 0 {
+ // Build URL
+ path, err = uritemplates.Expand("/{index}/_settings/{name}", map[string]string{
+ "index": strings.Join(index, ","),
+ "name": strings.Join(s.name, ","),
+ })
+ } else {
+ // Build URL
+ path, err = uritemplates.Expand("/{index}/_settings", map[string]string{
+ "index": strings.Join(index, ","),
+ })
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.flatSettings != nil {
+ params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesGetSettingsService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesGetSettingsService) Do(ctx context.Context) (map[string]*IndicesGetSettingsResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ var ret map[string]*IndicesGetSettingsResponse
+ if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesGetSettingsResponse is the response of IndicesGetSettingsService.Do.
+type IndicesGetSettingsResponse struct {
+ Settings map[string]interface{} `json:"settings"`
+}
diff --git a/vendor/github.com/olivere/elastic/indices_get_settings_test.go b/vendor/github.com/olivere/elastic/indices_get_settings_test.go
new file mode 100644
index 000000000..7c6995a28
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_get_settings_test.go
@@ -0,0 +1,82 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestIndexGetSettingsURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Indices []string
+ Names []string
+ Expected string
+ }{
+ {
+ []string{},
+ []string{},
+ "/_all/_settings",
+ },
+ {
+ []string{},
+ []string{"index.merge.*"},
+ "/_all/_settings/index.merge.%2A",
+ },
+ {
+ []string{"twitter-*"},
+ []string{"index.merge.*", "_settings"},
+ "/twitter-%2A/_settings/index.merge.%2A%2C_settings",
+ },
+ {
+ []string{"store-1", "store-2"},
+ []string{"index.merge.*", "_settings"},
+ "/store-1%2Cstore-2/_settings/index.merge.%2A%2C_settings",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := client.IndexGetSettings().Index(test.Indices...).Name(test.Names...).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
+
+func TestIndexGetSettingsService(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ esversion, err := client.ElasticsearchVersion(DefaultURL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if esversion < "1.4.0" {
+ t.Skip("Index Get API is available since 1.4")
+ return
+ }
+
+ res, err := client.IndexGetSettings().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatalf("expected result; got: %v", res)
+ }
+ info, found := res[testIndexName]
+ if !found {
+ t.Fatalf("expected index %q to be found; got: %v", testIndexName, found)
+ }
+ if info == nil {
+ t.Fatalf("expected index %q to be != nil; got: %v", testIndexName, info)
+ }
+ if info.Settings == nil {
+ t.Fatalf("expected index settings of %q to be != nil; got: %v", testIndexName, info.Settings)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_get_template.go b/vendor/github.com/olivere/elastic/indices_get_template.go
new file mode 100644
index 000000000..ad3a091a0
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_get_template.go
@@ -0,0 +1,133 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesGetTemplateService returns an index template.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-templates.html.
+type IndicesGetTemplateService struct {
+ client *Client
+ pretty bool
+ name []string
+ flatSettings *bool
+ local *bool
+}
+
+// NewIndicesGetTemplateService creates a new IndicesGetTemplateService.
+func NewIndicesGetTemplateService(client *Client) *IndicesGetTemplateService {
+ return &IndicesGetTemplateService{
+ client: client,
+ name: make([]string, 0),
+ }
+}
+
+// Name is the name of the index template.
+func (s *IndicesGetTemplateService) Name(name ...string) *IndicesGetTemplateService {
+ s.name = append(s.name, name...)
+ return s
+}
+
+// FlatSettings is returns settings in flat format (default: false).
+func (s *IndicesGetTemplateService) FlatSettings(flatSettings bool) *IndicesGetTemplateService {
+ s.flatSettings = &flatSettings
+ return s
+}
+
+// Local indicates whether to return local information, i.e. do not retrieve
+// the state from master node (default: false).
+func (s *IndicesGetTemplateService) Local(local bool) *IndicesGetTemplateService {
+ s.local = &local
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesGetTemplateService) Pretty(pretty bool) *IndicesGetTemplateService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesGetTemplateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ var err error
+ var path string
+ if len(s.name) > 0 {
+ path, err = uritemplates.Expand("/_template/{name}", map[string]string{
+ "name": strings.Join(s.name, ","),
+ })
+ } else {
+ path = "/_template"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.flatSettings != nil {
+ params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesGetTemplateService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesGetTemplateService) Do(ctx context.Context) (map[string]*IndicesGetTemplateResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ var ret map[string]*IndicesGetTemplateResponse
+ if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesGetTemplateResponse is the response of IndicesGetTemplateService.Do.
+type IndicesGetTemplateResponse struct {
+ Order int `json:"order,omitempty"`
+ Version int `json:"version,omitempty"`
+ Template string `json:"template,omitempty"`
+ Settings map[string]interface{} `json:"settings,omitempty"`
+ Mappings map[string]interface{} `json:"mappings,omitempty"`
+ Aliases map[string]interface{} `json:"aliases,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/indices_get_template_test.go b/vendor/github.com/olivere/elastic/indices_get_template_test.go
new file mode 100644
index 000000000..c884ec1cb
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_get_template_test.go
@@ -0,0 +1,41 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestIndexGetTemplateURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Names []string
+ Expected string
+ }{
+ {
+ []string{},
+ "/_template",
+ },
+ {
+ []string{"index1"},
+ "/_template/index1",
+ },
+ {
+ []string{"index1", "index2"},
+ "/_template/index1%2Cindex2",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := client.IndexGetTemplate().Name(test.Names...).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_get_test.go b/vendor/github.com/olivere/elastic/indices_get_test.go
new file mode 100644
index 000000000..6d37fca6e
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_get_test.go
@@ -0,0 +1,98 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestIndicesGetValidate(t *testing.T) {
+ client := setupTestClient(t)
+
+ // No index name -> fail with error
+ res, err := NewIndicesGetService(client).Index("").Do(context.TODO())
+ if err == nil {
+ t.Fatalf("expected IndicesGet to fail without index name")
+ }
+ if res != nil {
+ t.Fatalf("expected result to be == nil; got: %v", res)
+ }
+}
+
+func TestIndicesGetURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Indices []string
+ Features []string
+ Expected string
+ }{
+ {
+ []string{},
+ []string{},
+ "/_all",
+ },
+ {
+ []string{},
+ []string{"_mappings"},
+ "/_all/_mappings",
+ },
+ {
+ []string{"twitter"},
+ []string{"_mappings", "_settings"},
+ "/twitter/_mappings%2C_settings",
+ },
+ {
+ []string{"store-1", "store-2"},
+ []string{"_mappings", "_settings"},
+ "/store-1%2Cstore-2/_mappings%2C_settings",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := NewIndicesGetService(client).Index(test.Indices...).Feature(test.Features...).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
+
+func TestIndicesGetService(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ esversion, err := client.ElasticsearchVersion(DefaultURL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if esversion < "1.4.0" {
+ t.Skip("Index Get API is available since 1.4")
+ return
+ }
+
+ res, err := client.IndexGet().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatalf("expected result; got: %v", res)
+ }
+ info, found := res[testIndexName]
+ if !found {
+ t.Fatalf("expected index %q to be found; got: %v", testIndexName, found)
+ }
+ if info == nil {
+ t.Fatalf("expected index %q to be != nil; got: %v", testIndexName, info)
+ }
+ if info.Mappings == nil {
+ t.Errorf("expected mappings to be != nil; got: %v", info.Mappings)
+ }
+ if info.Settings == nil {
+ t.Errorf("expected settings to be != nil; got: %v", info.Settings)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_open.go b/vendor/github.com/olivere/elastic/indices_open.go
new file mode 100644
index 000000000..1b58c5721
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_open.go
@@ -0,0 +1,163 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesOpenService opens an index.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-open-close.html
+// for details.
+type IndicesOpenService struct {
+ client *Client
+ pretty bool
+ index string
+ timeout string
+ masterTimeout string
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+}
+
+// NewIndicesOpenService creates and initializes a new IndicesOpenService.
+func NewIndicesOpenService(client *Client) *IndicesOpenService {
+ return &IndicesOpenService{client: client}
+}
+
+// Index is the name of the index to open.
+func (s *IndicesOpenService) Index(index string) *IndicesOpenService {
+ s.index = index
+ return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *IndicesOpenService) Timeout(timeout string) *IndicesOpenService {
+ s.timeout = timeout
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *IndicesOpenService) MasterTimeout(masterTimeout string) *IndicesOpenService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should
+// be ignored when unavailable (missing or closed).
+func (s *IndicesOpenService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesOpenService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *IndicesOpenService) AllowNoIndices(allowNoIndices bool) *IndicesOpenService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both..
+func (s *IndicesOpenService) ExpandWildcards(expandWildcards string) *IndicesOpenService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesOpenService) Pretty(pretty bool) *IndicesOpenService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesOpenService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/_open", map[string]string{
+ "index": s.index,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesOpenService) Validate() error {
+ var invalid []string
+ if s.index == "" {
+ invalid = append(invalid, "Index")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesOpenService) Do(ctx context.Context) (*IndicesOpenResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IndicesOpenResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesOpenResponse is the response of IndicesOpenService.Do.
+type IndicesOpenResponse struct {
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
+}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/delete_template_test.go b/vendor/github.com/olivere/elastic/indices_open_test.go
index 9a6324198..aab6c5c19 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/delete_template_test.go
+++ b/vendor/github.com/olivere/elastic/indices_open_test.go
@@ -9,13 +9,13 @@ import (
"testing"
)
-func TestDeleteTemplateValidate(t *testing.T) {
+func TestIndicesOpenValidate(t *testing.T) {
client := setupTestClient(t)
- // No template id -> fail with error
- res, err := NewDeleteTemplateService(client).Do(context.TODO())
+ // No index name -> fail with error
+ res, err := NewIndicesOpenService(client).Do(context.TODO())
if err == nil {
- t.Fatalf("expected Delete to fail without index name")
+ t.Fatalf("expected IndicesOpen to fail without index name")
}
if res != nil {
t.Fatalf("expected result to be == nil; got: %v", res)
diff --git a/vendor/github.com/olivere/elastic/indices_put_alias.go b/vendor/github.com/olivere/elastic/indices_put_alias.go
new file mode 100644
index 000000000..12f8e1bd5
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_put_alias.go
@@ -0,0 +1,302 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+)
+
+// -- Actions --
+
+// AliasAction is an action to apply to an alias, e.g. "add" or "remove".
+type AliasAction interface {
+ Source() (interface{}, error)
+}
+
+// AliasAddAction is an action to add to an alias.
+type AliasAddAction struct {
+ index []string // index name(s)
+ alias string // alias name
+ filter Query
+ routing string
+ searchRouting string
+ indexRouting string
+}
+
+// NewAliasAddAction returns an action to add an alias.
+func NewAliasAddAction(alias string) *AliasAddAction {
+ return &AliasAddAction{
+ alias: alias,
+ }
+}
+
+// Index associates one or more indices to the alias.
+func (a *AliasAddAction) Index(index ...string) *AliasAddAction {
+ a.index = append(a.index, index...)
+ return a
+}
+
+func (a *AliasAddAction) removeBlankIndexNames() {
+ var indices []string
+ for _, index := range a.index {
+ if len(index) > 0 {
+ indices = append(indices, index)
+ }
+ }
+ a.index = indices
+}
+
+// Filter associates a filter to the alias.
+func (a *AliasAddAction) Filter(filter Query) *AliasAddAction {
+ a.filter = filter
+ return a
+}
+
+// Routing associates a routing value to the alias.
+// This basically sets index and search routing to the same value.
+func (a *AliasAddAction) Routing(routing string) *AliasAddAction {
+ a.routing = routing
+ return a
+}
+
+// IndexRouting associates an index routing value to the alias.
+func (a *AliasAddAction) IndexRouting(routing string) *AliasAddAction {
+ a.indexRouting = routing
+ return a
+}
+
+// SearchRouting associates a search routing value to the alias.
+func (a *AliasAddAction) SearchRouting(routing ...string) *AliasAddAction {
+ a.searchRouting = strings.Join(routing, ",")
+ return a
+}
+
+// Validate checks if the operation is valid.
+func (a *AliasAddAction) Validate() error {
+ var invalid []string
+ if len(a.alias) == 0 {
+ invalid = append(invalid, "Alias")
+ }
+ if len(a.index) == 0 {
+ invalid = append(invalid, "Index")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Source returns the JSON-serializable data.
+func (a *AliasAddAction) Source() (interface{}, error) {
+ a.removeBlankIndexNames()
+ if err := a.Validate(); err != nil {
+ return nil, err
+ }
+ src := make(map[string]interface{})
+ act := make(map[string]interface{})
+ src["add"] = act
+ act["alias"] = a.alias
+ switch len(a.index) {
+ case 1:
+ act["index"] = a.index[0]
+ default:
+ act["indices"] = a.index
+ }
+ if a.filter != nil {
+ f, err := a.filter.Source()
+ if err != nil {
+ return nil, err
+ }
+ act["filter"] = f
+ }
+ if len(a.routing) > 0 {
+ act["routing"] = a.routing
+ }
+ if len(a.indexRouting) > 0 {
+ act["index_routing"] = a.indexRouting
+ }
+ if len(a.searchRouting) > 0 {
+ act["search_routing"] = a.searchRouting
+ }
+ return src, nil
+}
+
+// AliasRemoveAction is an action to remove an alias.
+type AliasRemoveAction struct {
+ index []string // index name(s)
+ alias string // alias name
+}
+
+// NewAliasRemoveAction returns an action to remove an alias.
+func NewAliasRemoveAction(alias string) *AliasRemoveAction {
+ return &AliasRemoveAction{
+ alias: alias,
+ }
+}
+
+// Index associates one or more indices to the alias.
+func (a *AliasRemoveAction) Index(index ...string) *AliasRemoveAction {
+ a.index = append(a.index, index...)
+ return a
+}
+
+func (a *AliasRemoveAction) removeBlankIndexNames() {
+ var indices []string
+ for _, index := range a.index {
+ if len(index) > 0 {
+ indices = append(indices, index)
+ }
+ }
+ a.index = indices
+}
+
+// Validate checks if the operation is valid.
+func (a *AliasRemoveAction) Validate() error {
+ var invalid []string
+ if len(a.alias) == 0 {
+ invalid = append(invalid, "Alias")
+ }
+ if len(a.index) == 0 {
+ invalid = append(invalid, "Index")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Source returns the JSON-serializable data.
+func (a *AliasRemoveAction) Source() (interface{}, error) {
+ a.removeBlankIndexNames()
+ if err := a.Validate(); err != nil {
+ return nil, err
+ }
+ src := make(map[string]interface{})
+ act := make(map[string]interface{})
+ src["remove"] = act
+ act["alias"] = a.alias
+ switch len(a.index) {
+ case 1:
+ act["index"] = a.index[0]
+ default:
+ act["indices"] = a.index
+ }
+ return src, nil
+}
+
+// -- Service --
+
+// AliasService enables users to add or remove an alias.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-aliases.html
+// for details.
+type AliasService struct {
+ client *Client
+ actions []AliasAction
+ pretty bool
+}
+
+// NewAliasService implements a service to manage aliases.
+func NewAliasService(client *Client) *AliasService {
+ builder := &AliasService{
+ client: client,
+ }
+ return builder
+}
+
+// Pretty asks Elasticsearch to indent the HTTP response.
+func (s *AliasService) Pretty(pretty bool) *AliasService {
+ s.pretty = pretty
+ return s
+}
+
+// Add adds an alias to an index.
+func (s *AliasService) Add(indexName string, aliasName string) *AliasService {
+ action := NewAliasAddAction(aliasName).Index(indexName)
+ s.actions = append(s.actions, action)
+ return s
+}
+
+// Add adds an alias to an index and associates a filter to the alias.
+func (s *AliasService) AddWithFilter(indexName string, aliasName string, filter Query) *AliasService {
+ action := NewAliasAddAction(aliasName).Index(indexName).Filter(filter)
+ s.actions = append(s.actions, action)
+ return s
+}
+
+// Remove removes an alias.
+func (s *AliasService) Remove(indexName string, aliasName string) *AliasService {
+ action := NewAliasRemoveAction(aliasName).Index(indexName)
+ s.actions = append(s.actions, action)
+ return s
+}
+
+// Action accepts one or more AliasAction instances which can be
+// of type AliasAddAction or AliasRemoveAction.
+func (s *AliasService) Action(action ...AliasAction) *AliasService {
+ s.actions = append(s.actions, action...)
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *AliasService) buildURL() (string, url.Values, error) {
+ path := "/_aliases"
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+ return path, params, nil
+}
+
+// Do executes the command.
+func (s *AliasService) Do(ctx context.Context) (*AliasResult, error) {
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Body with actions
+ body := make(map[string]interface{})
+ var actions []interface{}
+ for _, action := range s.actions {
+ src, err := action.Source()
+ if err != nil {
+ return nil, err
+ }
+ actions = append(actions, src)
+ }
+ body["actions"] = actions
+
+ // Get response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return results
+ ret := new(AliasResult)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Result of an alias request.
+
+// AliasResult is the outcome of calling Do on AliasService.
+type AliasResult struct {
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/indices_put_alias_test.go b/vendor/github.com/olivere/elastic/indices_put_alias_test.go
new file mode 100644
index 000000000..ada1dfdef
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_put_alias_test.go
@@ -0,0 +1,222 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+)
+
+const (
+ testAliasName = "elastic-test-alias"
+)
+
+func TestAliasLifecycle(t *testing.T) {
+ var err error
+
+ client := setupTestClientAndCreateIndex(t)
+
+ // Some tweets
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "sandrae", Message: "Cycling is fun."}
+ tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."}
+
+ // Add tweets to first index
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Add tweets to second index
+ _, err = client.Index().Index(testIndexName2).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Flush
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Flush().Index(testIndexName2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Add both indices to a new alias
+ aliasCreate, err := client.Alias().
+ Add(testIndexName, testAliasName).
+ Action(NewAliasAddAction(testAliasName).Index(testIndexName2)).
+ //Pretty(true).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !aliasCreate.Acknowledged {
+ t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasCreate.Acknowledged)
+ }
+
+ // Search should return all 3 tweets
+ matchAll := NewMatchAllQuery()
+ searchResult1, err := client.Search().Index(testAliasName).Query(matchAll).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult1.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult1.Hits.TotalHits != 3 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult1.Hits.TotalHits)
+ }
+
+ // Remove first index should remove two tweets, so should only yield 1
+ aliasRemove1, err := client.Alias().
+ Remove(testIndexName, testAliasName).
+ //Pretty(true).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !aliasRemove1.Acknowledged {
+ t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasRemove1.Acknowledged)
+ }
+
+ searchResult2, err := client.Search().Index(testAliasName).Query(matchAll).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult2.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult2.Hits.TotalHits != 1 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult2.Hits.TotalHits)
+ }
+}
+
+func TestAliasAddAction(t *testing.T) {
+ var tests = []struct {
+ Action *AliasAddAction
+ Expected string
+ Invalid bool
+ }{
+ {
+ Action: NewAliasAddAction("").Index(""),
+ Invalid: true,
+ },
+ {
+ Action: NewAliasAddAction("alias1").Index(""),
+ Invalid: true,
+ },
+ {
+ Action: NewAliasAddAction("").Index("index1"),
+ Invalid: true,
+ },
+ {
+ Action: NewAliasAddAction("alias1").Index("index1"),
+ Expected: `{"add":{"alias":"alias1","index":"index1"}}`,
+ },
+ {
+ Action: NewAliasAddAction("alias1").Index("index1", "index2"),
+ Expected: `{"add":{"alias":"alias1","indices":["index1","index2"]}}`,
+ },
+ {
+ Action: NewAliasAddAction("alias1").Index("index1").Routing("routing1"),
+ Expected: `{"add":{"alias":"alias1","index":"index1","routing":"routing1"}}`,
+ },
+ {
+ Action: NewAliasAddAction("alias1").Index("index1").Routing("routing1").IndexRouting("indexRouting1"),
+ Expected: `{"add":{"alias":"alias1","index":"index1","index_routing":"indexRouting1","routing":"routing1"}}`,
+ },
+ {
+ Action: NewAliasAddAction("alias1").Index("index1").Routing("routing1").SearchRouting("searchRouting1"),
+ Expected: `{"add":{"alias":"alias1","index":"index1","routing":"routing1","search_routing":"searchRouting1"}}`,
+ },
+ {
+ Action: NewAliasAddAction("alias1").Index("index1").Routing("routing1").SearchRouting("searchRouting1", "searchRouting2"),
+ Expected: `{"add":{"alias":"alias1","index":"index1","routing":"routing1","search_routing":"searchRouting1,searchRouting2"}}`,
+ },
+ {
+ Action: NewAliasAddAction("alias1").Index("index1").Filter(NewTermQuery("user", "olivere")),
+ Expected: `{"add":{"alias":"alias1","filter":{"term":{"user":"olivere"}},"index":"index1"}}`,
+ },
+ }
+
+ for i, tt := range tests {
+ src, err := tt.Action.Source()
+ if err != nil {
+ if !tt.Invalid {
+ t.Errorf("#%d: expected to succeed", i)
+ }
+ } else {
+ if tt.Invalid {
+ t.Errorf("#%d: expected to fail", i)
+ } else {
+ dst, err := json.Marshal(src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want, have := tt.Expected, string(dst); want != have {
+ t.Errorf("#%d: expected %s, got %s", i, want, have)
+ }
+ }
+ }
+ }
+}
+
+func TestAliasRemoveAction(t *testing.T) {
+ var tests = []struct {
+ Action *AliasRemoveAction
+ Expected string
+ Invalid bool
+ }{
+ {
+ Action: NewAliasRemoveAction(""),
+ Invalid: true,
+ },
+ {
+ Action: NewAliasRemoveAction("alias1"),
+ Invalid: true,
+ },
+ {
+ Action: NewAliasRemoveAction("").Index("index1"),
+ Invalid: true,
+ },
+ {
+ Action: NewAliasRemoveAction("alias1").Index("index1"),
+ Expected: `{"remove":{"alias":"alias1","index":"index1"}}`,
+ },
+ {
+ Action: NewAliasRemoveAction("alias1").Index("index1", "index2"),
+ Expected: `{"remove":{"alias":"alias1","indices":["index1","index2"]}}`,
+ },
+ }
+
+ for i, tt := range tests {
+ src, err := tt.Action.Source()
+ if err != nil {
+ if !tt.Invalid {
+ t.Errorf("#%d: expected to succeed", i)
+ }
+ } else {
+ if tt.Invalid {
+ t.Errorf("#%d: expected to fail", i)
+ } else {
+ dst, err := json.Marshal(src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want, have := tt.Expected, string(dst); want != have {
+ t.Errorf("#%d: expected %s, got %s", i, want, have)
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_put_mapping.go b/vendor/github.com/olivere/elastic/indices_put_mapping.go
new file mode 100644
index 000000000..2f8a35e4c
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_put_mapping.go
@@ -0,0 +1,228 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesPutMappingService allows to register specific mapping definition
+// for a specific type.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-put-mapping.html
+// for details.
+type IndicesPutMappingService struct {
+ client *Client
+ pretty bool
+ typ string
+ index []string
+ masterTimeout string
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+ updateAllTypes *bool
+ timeout string
+ bodyJson map[string]interface{}
+ bodyString string
+}
+
+// NewPutMappingService is an alias for NewIndicesPutMappingService.
+// Use NewIndicesPutMappingService.
+func NewPutMappingService(client *Client) *IndicesPutMappingService {
+ return NewIndicesPutMappingService(client)
+}
+
+// NewIndicesPutMappingService creates a new IndicesPutMappingService.
+func NewIndicesPutMappingService(client *Client) *IndicesPutMappingService {
+ return &IndicesPutMappingService{
+ client: client,
+ index: make([]string, 0),
+ }
+}
+
+// Index is a list of index names the mapping should be added to
+// (supports wildcards); use `_all` or omit to add the mapping on all indices.
+func (s *IndicesPutMappingService) Index(indices ...string) *IndicesPutMappingService {
+ s.index = append(s.index, indices...)
+ return s
+}
+
+// Type is the name of the document type.
+func (s *IndicesPutMappingService) Type(typ string) *IndicesPutMappingService {
+ s.typ = typ
+ return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *IndicesPutMappingService) Timeout(timeout string) *IndicesPutMappingService {
+ s.timeout = timeout
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *IndicesPutMappingService) MasterTimeout(masterTimeout string) *IndicesPutMappingService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *IndicesPutMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutMappingService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// This includes `_all` string or when no indices have been specified.
+func (s *IndicesPutMappingService) AllowNoIndices(allowNoIndices bool) *IndicesPutMappingService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *IndicesPutMappingService) ExpandWildcards(expandWildcards string) *IndicesPutMappingService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// UpdateAllTypes, if true, indicates that all fields that span multiple indices
+// should be updated (default: false).
+func (s *IndicesPutMappingService) UpdateAllTypes(updateAllTypes bool) *IndicesPutMappingService {
+ s.updateAllTypes = &updateAllTypes
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesPutMappingService) Pretty(pretty bool) *IndicesPutMappingService {
+ s.pretty = pretty
+ return s
+}
+
+// BodyJson contains the mapping definition.
+func (s *IndicesPutMappingService) BodyJson(mapping map[string]interface{}) *IndicesPutMappingService {
+ s.bodyJson = mapping
+ return s
+}
+
+// BodyString is the mapping definition serialized as a string.
+func (s *IndicesPutMappingService) BodyString(mapping string) *IndicesPutMappingService {
+ s.bodyString = mapping
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesPutMappingService) buildURL() (string, url.Values, error) {
+ var err error
+ var path string
+
+ // Build URL: Typ MUST be specified and is verified in Validate.
+ if len(s.index) > 0 {
+ path, err = uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{
+ "index": strings.Join(s.index, ","),
+ "type": s.typ,
+ })
+ } else {
+ path, err = uritemplates.Expand("/_mapping/{type}", map[string]string{
+ "type": s.typ,
+ })
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.updateAllTypes != nil {
+ params.Set("update_all_types", fmt.Sprintf("%v", *s.updateAllTypes))
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesPutMappingService) Validate() error {
+ var invalid []string
+ if s.typ == "" {
+ invalid = append(invalid, "Type")
+ }
+ if s.bodyString == "" && s.bodyJson == nil {
+ invalid = append(invalid, "BodyJson")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesPutMappingService) Do(ctx context.Context) (*PutMappingResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else {
+ body = s.bodyString
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "PUT",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(PutMappingResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// PutMappingResponse is the response of IndicesPutMappingService.Do.
+type PutMappingResponse struct {
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/indices_put_mapping_test.go b/vendor/github.com/olivere/elastic/indices_put_mapping_test.go
new file mode 100644
index 000000000..644e1187a
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_put_mapping_test.go
@@ -0,0 +1,95 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestPutMappingURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Indices []string
+ Type string
+ Expected string
+ }{
+ {
+ []string{},
+ "doc",
+ "/_mapping/doc",
+ },
+ {
+ []string{"*"},
+ "doc",
+ "/%2A/_mapping/doc",
+ },
+ {
+ []string{"store-1", "store-2"},
+ "doc",
+ "/store-1%2Cstore-2/_mapping/doc",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := client.PutMapping().Index(test.Indices...).Type(test.Type).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
+
+func TestMappingLifecycle(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+ //client := setupTestClientAndCreateIndexAndLog(t)
+
+ // Create index
+ createIndex, err := client.CreateIndex(testIndexName3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if createIndex == nil {
+ t.Errorf("expected result to be != nil; got: %v", createIndex)
+ }
+
+ mapping := `{
+ "doc":{
+ "properties":{
+ "field":{
+ "type":"keyword"
+ }
+ }
+ }
+ }`
+
+ putresp, err := client.PutMapping().Index(testIndexName3).Type("doc").BodyString(mapping).Do(context.TODO())
+ if err != nil {
+ t.Fatalf("expected put mapping to succeed; got: %v", err)
+ }
+ if putresp == nil {
+ t.Fatalf("expected put mapping response; got: %v", putresp)
+ }
+ if !putresp.Acknowledged {
+ t.Fatalf("expected put mapping ack; got: %v", putresp.Acknowledged)
+ }
+
+ getresp, err := client.GetMapping().Index(testIndexName3).Type("doc").Do(context.TODO())
+ if err != nil {
+ t.Fatalf("expected get mapping to succeed; got: %v", err)
+ }
+ if getresp == nil {
+ t.Fatalf("expected get mapping response; got: %v", getresp)
+ }
+ props, ok := getresp[testIndexName3]
+ if !ok {
+ t.Fatalf("expected JSON root to be of type map[string]interface{}; got: %#v", props)
+ }
+
+ // NOTE There is no Delete Mapping API in Elasticsearch 2.0
+}
diff --git a/vendor/github.com/olivere/elastic/indices_put_settings.go b/vendor/github.com/olivere/elastic/indices_put_settings.go
new file mode 100644
index 000000000..1283eb669
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_put_settings.go
@@ -0,0 +1,191 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesPutSettingsService changes specific index level settings in
+// real time.
+//
+// See the documentation at
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-update-settings.html.
+type IndicesPutSettingsService struct {
+ client *Client
+ pretty bool
+ index []string
+ allowNoIndices *bool
+ expandWildcards string
+ flatSettings *bool
+ ignoreUnavailable *bool
+ masterTimeout string
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewIndicesPutSettingsService creates a new IndicesPutSettingsService.
+func NewIndicesPutSettingsService(client *Client) *IndicesPutSettingsService {
+ return &IndicesPutSettingsService{
+ client: client,
+ index: make([]string, 0),
+ }
+}
+
+// Index is a list of index names the mapping should be added to
+// (supports wildcards); use `_all` or omit to add the mapping on all indices.
+func (s *IndicesPutSettingsService) Index(indices ...string) *IndicesPutSettingsService {
+ s.index = append(s.index, indices...)
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices. (This includes `_all`
+// string or when no indices have been specified).
+func (s *IndicesPutSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesPutSettingsService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards specifies whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *IndicesPutSettingsService) ExpandWildcards(expandWildcards string) *IndicesPutSettingsService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// FlatSettings indicates whether to return settings in flat format (default: false).
+func (s *IndicesPutSettingsService) FlatSettings(flatSettings bool) *IndicesPutSettingsService {
+ s.flatSettings = &flatSettings
+ return s
+}
+
+// IgnoreUnavailable specifies whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *IndicesPutSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutSettingsService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// MasterTimeout is the timeout for connection to master.
+func (s *IndicesPutSettingsService) MasterTimeout(masterTimeout string) *IndicesPutSettingsService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesPutSettingsService) Pretty(pretty bool) *IndicesPutSettingsService {
+ s.pretty = pretty
+ return s
+}
+
+// BodyJson is documented as: The index settings to be updated.
+func (s *IndicesPutSettingsService) BodyJson(body interface{}) *IndicesPutSettingsService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString is documented as: The index settings to be updated.
+func (s *IndicesPutSettingsService) BodyString(body string) *IndicesPutSettingsService {
+ s.bodyString = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesPutSettingsService) buildURL() (string, url.Values, error) {
+ // Build URL
+ var err error
+ var path string
+
+ if len(s.index) > 0 {
+ path, err = uritemplates.Expand("/{index}/_settings", map[string]string{
+ "index": strings.Join(s.index, ","),
+ })
+ } else {
+ path = "/_settings"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.flatSettings != nil {
+ params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesPutSettingsService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesPutSettingsService) Do(ctx context.Context) (*IndicesPutSettingsResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else {
+ body = s.bodyString
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "PUT",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IndicesPutSettingsResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesPutSettingsResponse is the response of IndicesPutSettingsService.Do.
+type IndicesPutSettingsResponse struct {
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/indices_put_settings_test.go b/vendor/github.com/olivere/elastic/indices_put_settings_test.go
new file mode 100644
index 000000000..0ceea3ef8
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_put_settings_test.go
@@ -0,0 +1,95 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestIndicesPutSettingsBuildURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Indices []string
+ Expected string
+ }{
+ {
+ []string{},
+ "/_settings",
+ },
+ {
+ []string{"*"},
+ "/%2A/_settings",
+ },
+ {
+ []string{"store-1", "store-2"},
+ "/store-1%2Cstore-2/_settings",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := client.IndexPutSettings().Index(test.Indices...).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
+
+func TestIndicesSettingsLifecycle(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ body := `{
+ "index":{
+ "refresh_interval":"-1"
+ }
+ }`
+
+ // Put settings
+ putres, err := client.IndexPutSettings().Index(testIndexName).BodyString(body).Do(context.TODO())
+ if err != nil {
+ t.Fatalf("expected put settings to succeed; got: %v", err)
+ }
+ if putres == nil {
+ t.Fatalf("expected put settings response; got: %v", putres)
+ }
+ if !putres.Acknowledged {
+ t.Fatalf("expected put settings ack; got: %v", putres.Acknowledged)
+ }
+
+ // Read settings
+ getres, err := client.IndexGetSettings().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatalf("expected get mapping to succeed; got: %v", err)
+ }
+ if getres == nil {
+ t.Fatalf("expected get mapping response; got: %v", getres)
+ }
+
+ // Check settings
+ index, found := getres[testIndexName]
+ if !found {
+ t.Fatalf("expected to return settings for index %q; got: %#v", testIndexName, getres)
+ }
+ // Retrieve "index" section of the settings for index testIndexName
+ sectionIntf, ok := index.Settings["index"]
+ if !ok {
+ t.Fatalf("expected settings to have %q field; got: %#v", "index", getres)
+ }
+ section, ok := sectionIntf.(map[string]interface{})
+ if !ok {
+ t.Fatalf("expected settings to be of type map[string]interface{}; got: %#v", getres)
+ }
+ refintv, ok := section["refresh_interval"]
+ if !ok {
+ t.Fatalf(`expected JSON to include "refresh_interval" field; got: %#v`, getres)
+ }
+ if got, want := refintv, "-1"; got != want {
+ t.Fatalf("expected refresh_interval = %v; got: %v", want, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_put_template.go b/vendor/github.com/olivere/elastic/indices_put_template.go
new file mode 100644
index 000000000..c0b959647
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_put_template.go
@@ -0,0 +1,207 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesPutTemplateService creates or updates index mappings.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-templates.html.
+type IndicesPutTemplateService struct {
+ client *Client
+ pretty bool
+ name string
+ cause string
+ order interface{}
+ version *int
+ create *bool
+ timeout string
+ masterTimeout string
+ flatSettings *bool
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewIndicesPutTemplateService creates a new IndicesPutTemplateService.
+func NewIndicesPutTemplateService(client *Client) *IndicesPutTemplateService {
+ return &IndicesPutTemplateService{
+ client: client,
+ }
+}
+
+// Name is the name of the index template.
+func (s *IndicesPutTemplateService) Name(name string) *IndicesPutTemplateService {
+ s.name = name
+ return s
+}
+
+// Cause describes the cause for this index template creation. This is currently
+// undocumented, but part of the Java source.
+func (s *IndicesPutTemplateService) Cause(cause string) *IndicesPutTemplateService {
+ s.cause = cause
+ return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *IndicesPutTemplateService) Timeout(timeout string) *IndicesPutTemplateService {
+ s.timeout = timeout
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *IndicesPutTemplateService) MasterTimeout(masterTimeout string) *IndicesPutTemplateService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// FlatSettings indicates whether to return settings in flat format (default: false).
+func (s *IndicesPutTemplateService) FlatSettings(flatSettings bool) *IndicesPutTemplateService {
+ s.flatSettings = &flatSettings
+ return s
+}
+
+// Order is the order for this template when merging multiple matching ones
+// (higher numbers are merged later, overriding the lower numbers).
+func (s *IndicesPutTemplateService) Order(order interface{}) *IndicesPutTemplateService {
+ s.order = order
+ return s
+}
+
+// Version sets the version number for this template.
+func (s *IndicesPutTemplateService) Version(version int) *IndicesPutTemplateService {
+ s.version = &version
+ return s
+}
+
+// Create indicates whether the index template should only be added if
+// new or can also replace an existing one.
+func (s *IndicesPutTemplateService) Create(create bool) *IndicesPutTemplateService {
+ s.create = &create
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesPutTemplateService) Pretty(pretty bool) *IndicesPutTemplateService {
+ s.pretty = pretty
+ return s
+}
+
+// BodyJson is documented as: The template definition.
+func (s *IndicesPutTemplateService) BodyJson(body interface{}) *IndicesPutTemplateService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString is documented as: The template definition.
+func (s *IndicesPutTemplateService) BodyString(body string) *IndicesPutTemplateService {
+ s.bodyString = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesPutTemplateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_template/{name}", map[string]string{
+ "name": s.name,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.order != nil {
+ params.Set("order", fmt.Sprintf("%v", s.order))
+ }
+ if s.version != nil {
+ params.Set("version", fmt.Sprintf("%v", *s.version))
+ }
+ if s.create != nil {
+ params.Set("create", fmt.Sprintf("%v", *s.create))
+ }
+ if s.cause != "" {
+ params.Set("cause", s.cause)
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ if s.flatSettings != nil {
+ params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesPutTemplateService) Validate() error {
+ var invalid []string
+ if s.name == "" {
+ invalid = append(invalid, "Name")
+ }
+ if s.bodyString == "" && s.bodyJson == nil {
+ invalid = append(invalid, "BodyJson")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesPutTemplateService) Do(ctx context.Context) (*IndicesPutTemplateResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else {
+ body = s.bodyString
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "PUT",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IndicesPutTemplateResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesPutTemplateResponse is the response of IndicesPutTemplateService.Do.
+type IndicesPutTemplateResponse struct {
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/indices_refresh.go b/vendor/github.com/olivere/elastic/indices_refresh.go
new file mode 100644
index 000000000..f6c7f165e
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_refresh.go
@@ -0,0 +1,98 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// RefreshService explicitly refreshes one or more indices.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-refresh.html.
+type RefreshService struct {
+ client *Client
+ index []string
+ pretty bool
+}
+
+// NewRefreshService creates a new instance of RefreshService.
+func NewRefreshService(client *Client) *RefreshService {
+ builder := &RefreshService{
+ client: client,
+ }
+ return builder
+}
+
+// Index specifies the indices to refresh.
+func (s *RefreshService) Index(index ...string) *RefreshService {
+ s.index = append(s.index, index...)
+ return s
+}
+
+// Pretty asks Elasticsearch to return indented JSON.
+func (s *RefreshService) Pretty(pretty bool) *RefreshService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *RefreshService) buildURL() (string, url.Values, error) {
+ var err error
+ var path string
+
+ if len(s.index) > 0 {
+ path, err = uritemplates.Expand("/{index}/_refresh", map[string]string{
+ "index": strings.Join(s.index, ","),
+ })
+ } else {
+ path = "/_refresh"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+ return path, params, nil
+}
+
+// Do executes the request.
+func (s *RefreshService) Do(ctx context.Context) (*RefreshResult, error) {
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(RefreshResult)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Result of a refresh request.
+
+// RefreshResult is the outcome of RefreshService.Do.
+type RefreshResult struct {
+ Shards shardsInfo `json:"_shards,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/indices_refresh_test.go b/vendor/github.com/olivere/elastic/indices_refresh_test.go
new file mode 100644
index 000000000..8640fb602
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_refresh_test.go
@@ -0,0 +1,81 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestRefreshBuildURL(t *testing.T) {
+ client := setupTestClient(t)
+
+ tests := []struct {
+ Indices []string
+ Expected string
+ }{
+ {
+ []string{},
+ "/_refresh",
+ },
+ {
+ []string{"index1"},
+ "/index1/_refresh",
+ },
+ {
+ []string{"index1", "index2"},
+ "/index1%2Cindex2/_refresh",
+ },
+ }
+
+ for i, test := range tests {
+ path, _, err := client.Refresh().Index(test.Indices...).buildURL()
+ if err != nil {
+ t.Errorf("case #%d: %v", i+1, err)
+ continue
+ }
+ if path != test.Expected {
+ t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
+ }
+ }
+}
+
+func TestRefresh(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add some documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Refresh indices
+ res, err := client.Refresh(testIndexName, testIndexName2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected result; got nil")
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_rollover.go b/vendor/github.com/olivere/elastic/indices_rollover.go
new file mode 100644
index 000000000..841b3836f
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_rollover.go
@@ -0,0 +1,272 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesRolloverService rolls an alias over to a new index when the
+// existing index is considered to be too large or too old.
+//
+// It is documented at
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-rollover-index.html.
+type IndicesRolloverService struct {
+ client *Client
+ pretty bool
+ dryRun bool
+ newIndex string
+ alias string
+ masterTimeout string
+ timeout string
+ waitForActiveShards string
+ conditions map[string]interface{}
+ settings map[string]interface{}
+ mappings map[string]interface{}
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewIndicesRolloverService creates a new IndicesRolloverService.
+func NewIndicesRolloverService(client *Client) *IndicesRolloverService {
+ return &IndicesRolloverService{
+ client: client,
+ conditions: make(map[string]interface{}),
+ settings: make(map[string]interface{}),
+ mappings: make(map[string]interface{}),
+ }
+}
+
+// Alias is the name of the alias to rollover.
+func (s *IndicesRolloverService) Alias(alias string) *IndicesRolloverService {
+ s.alias = alias
+ return s
+}
+
+// NewIndex is the name of the rollover index.
+func (s *IndicesRolloverService) NewIndex(newIndex string) *IndicesRolloverService {
+ s.newIndex = newIndex
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *IndicesRolloverService) MasterTimeout(masterTimeout string) *IndicesRolloverService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Timeout sets an explicit operation timeout.
+func (s *IndicesRolloverService) Timeout(timeout string) *IndicesRolloverService {
+ s.timeout = timeout
+ return s
+}
+
+// WaitForActiveShards sets the number of active shards to wait for on the
+// newly created rollover index before the operation returns.
+func (s *IndicesRolloverService) WaitForActiveShards(waitForActiveShards string) *IndicesRolloverService {
+ s.waitForActiveShards = waitForActiveShards
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesRolloverService) Pretty(pretty bool) *IndicesRolloverService {
+ s.pretty = pretty
+ return s
+}
+
+// DryRun, when set, specifies that only conditions are checked without
+// performing the actual rollover.
+func (s *IndicesRolloverService) DryRun(dryRun bool) *IndicesRolloverService {
+ s.dryRun = dryRun
+ return s
+}
+
+// Conditions allows to specify all conditions as a dictionary.
+func (s *IndicesRolloverService) Conditions(conditions map[string]interface{}) *IndicesRolloverService {
+ s.conditions = conditions
+ return s
+}
+
+// AddCondition adds a condition to the rollover decision.
+func (s *IndicesRolloverService) AddCondition(name string, value interface{}) *IndicesRolloverService {
+ s.conditions[name] = value
+ return s
+}
+
+// AddMaxIndexAgeCondition adds a condition to set the max index age.
+func (s *IndicesRolloverService) AddMaxIndexAgeCondition(time string) *IndicesRolloverService {
+ s.conditions["max_age"] = time
+ return s
+}
+
+// AddMaxIndexDocsCondition adds a condition to set the max documents in the index.
+func (s *IndicesRolloverService) AddMaxIndexDocsCondition(docs int64) *IndicesRolloverService {
+ s.conditions["max_docs"] = docs
+ return s
+}
+
+// Settings adds the index settings.
+func (s *IndicesRolloverService) Settings(settings map[string]interface{}) *IndicesRolloverService {
+ s.settings = settings
+ return s
+}
+
+// AddSetting adds an index setting.
+func (s *IndicesRolloverService) AddSetting(name string, value interface{}) *IndicesRolloverService {
+ s.settings[name] = value
+ return s
+}
+
+// Mappings adds the index mappings.
+func (s *IndicesRolloverService) Mappings(mappings map[string]interface{}) *IndicesRolloverService {
+ s.mappings = mappings
+ return s
+}
+
+// AddMapping adds a mapping for the given type.
+func (s *IndicesRolloverService) AddMapping(typ string, mapping interface{}) *IndicesRolloverService {
+ s.mappings[typ] = mapping
+ return s
+}
+
+// BodyJson sets the conditions that needs to be met for executing rollover,
+// specified as a serializable JSON instance which is sent as the body of
+// the request.
+func (s *IndicesRolloverService) BodyJson(body interface{}) *IndicesRolloverService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString sets the conditions that needs to be met for executing rollover,
+// specified as a string which is sent as the body of the request.
+func (s *IndicesRolloverService) BodyString(body string) *IndicesRolloverService {
+ s.bodyString = body
+ return s
+}
+
+// getBody returns the body of the request, if not explicitly set via
+// BodyJson or BodyString.
+func (s *IndicesRolloverService) getBody() interface{} {
+ body := make(map[string]interface{})
+ if len(s.conditions) > 0 {
+ body["conditions"] = s.conditions
+ }
+ if len(s.settings) > 0 {
+ body["settings"] = s.settings
+ }
+ if len(s.mappings) > 0 {
+ body["mappings"] = s.mappings
+ }
+ return body
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesRolloverService) buildURL() (string, url.Values, error) {
+ // Build URL
+ var err error
+ var path string
+ if s.newIndex != "" {
+ path, err = uritemplates.Expand("/{alias}/_rollover/{new_index}", map[string]string{
+ "alias": s.alias,
+ "new_index": s.newIndex,
+ })
+ } else {
+ path, err = uritemplates.Expand("/{alias}/_rollover", map[string]string{
+ "alias": s.alias,
+ })
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.dryRun {
+ params.Set("dry_run", "true")
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.waitForActiveShards != "" {
+ params.Set("wait_for_active_shards", s.waitForActiveShards)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesRolloverService) Validate() error {
+ var invalid []string
+ if s.alias == "" {
+ invalid = append(invalid, "Alias")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesRolloverService) Do(ctx context.Context) (*IndicesRolloverResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else if s.bodyString != "" {
+ body = s.bodyString
+ } else {
+ body = s.getBody()
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IndicesRolloverResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesRolloverResponse is the response of IndicesRolloverService.Do.
+type IndicesRolloverResponse struct {
+ OldIndex string `json:"old_index"`
+ NewIndex string `json:"new_index"`
+ RolledOver bool `json:"rolled_over"`
+ DryRun bool `json:"dry_run"`
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Conditions map[string]bool `json:"conditions"`
+}
diff --git a/vendor/github.com/olivere/elastic/indices_rollover_test.go b/vendor/github.com/olivere/elastic/indices_rollover_test.go
new file mode 100644
index 000000000..81d7099e0
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_rollover_test.go
@@ -0,0 +1,116 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestIndicesRolloverBuildURL(t *testing.T) {
+ client := setupTestClient(t)
+
+ tests := []struct {
+ Alias string
+ NewIndex string
+ Expected string
+ }{
+ {
+ "logs_write",
+ "",
+ "/logs_write/_rollover",
+ },
+ {
+ "logs_write",
+ "my_new_index_name",
+ "/logs_write/_rollover/my_new_index_name",
+ },
+ }
+
+ for i, test := range tests {
+ path, _, err := client.RolloverIndex(test.Alias).NewIndex(test.NewIndex).buildURL()
+ if err != nil {
+ t.Errorf("case #%d: %v", i+1, err)
+ continue
+ }
+ if path != test.Expected {
+ t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
+ }
+ }
+}
+
+func TestIndicesRolloverBodyConditions(t *testing.T) {
+ client := setupTestClient(t)
+ svc := NewIndicesRolloverService(client).
+ Conditions(map[string]interface{}{
+ "max_age": "7d",
+ "max_docs": 1000,
+ })
+ data, err := json.Marshal(svc.getBody())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"conditions":{"max_age":"7d","max_docs":1000}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestIndicesRolloverBodyAddCondition(t *testing.T) {
+ client := setupTestClient(t)
+ svc := NewIndicesRolloverService(client).
+ AddCondition("max_age", "7d").
+ AddCondition("max_docs", 1000)
+ data, err := json.Marshal(svc.getBody())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"conditions":{"max_age":"7d","max_docs":1000}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestIndicesRolloverBodyAddPredefinedConditions(t *testing.T) {
+ client := setupTestClient(t)
+ svc := NewIndicesRolloverService(client).
+ AddMaxIndexAgeCondition("2d").
+ AddMaxIndexDocsCondition(1000000)
+ data, err := json.Marshal(svc.getBody())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"conditions":{"max_age":"2d","max_docs":1000000}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestIndicesRolloverBodyComplex(t *testing.T) {
+ client := setupTestClient(t)
+ svc := NewIndicesRolloverService(client).
+ AddMaxIndexAgeCondition("2d").
+ AddMaxIndexDocsCondition(1000000).
+ AddSetting("index.number_of_shards", 2).
+ AddMapping("doc", map[string]interface{}{
+ "properties": map[string]interface{}{
+ "user": map[string]interface{}{
+ "type": "keyword",
+ },
+ },
+ })
+ data, err := json.Marshal(svc.getBody())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"conditions":{"max_age":"2d","max_docs":1000000},"mappings":{"doc":{"properties":{"user":{"type":"keyword"}}}},"settings":{"index.number_of_shards":2}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_segments.go b/vendor/github.com/olivere/elastic/indices_segments.go
new file mode 100644
index 000000000..133d1101e
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_segments.go
@@ -0,0 +1,237 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesSegmentsService provides low level segments information that a
+// Lucene index (shard level) is built with. Allows to be used to provide
+// more information on the state of a shard and an index, possibly
+// optimization information, data "wasted" on deletes, and so on.
+//
+// Find further documentation at
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.1/indices-segments.html.
+type IndicesSegmentsService struct {
+ client *Client
+ pretty bool
+ index []string
+ allowNoIndices *bool
+ expandWildcards string
+ ignoreUnavailable *bool
+ human *bool
+ operationThreading interface{}
+ verbose *bool
+}
+
+// NewIndicesSegmentsService creates a new IndicesSegmentsService.
+func NewIndicesSegmentsService(client *Client) *IndicesSegmentsService {
+ return &IndicesSegmentsService{
+ client: client,
+ }
+}
+
+// Index is a comma-separated list of index names; use `_all` or empty string
+// to perform the operation on all indices.
+func (s *IndicesSegmentsService) Index(indices ...string) *IndicesSegmentsService {
+ s.index = append(s.index, indices...)
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices expression
+// resolves into no concrete indices. (This includes `_all` string or when
+// no indices have been specified).
+func (s *IndicesSegmentsService) AllowNoIndices(allowNoIndices bool) *IndicesSegmentsService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to concrete indices
+// that are open, closed or both.
+func (s *IndicesSegmentsService) ExpandWildcards(expandWildcards string) *IndicesSegmentsService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *IndicesSegmentsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesSegmentsService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// Human, when set to true, returns time and byte-values in human-readable format.
+func (s *IndicesSegmentsService) Human(human bool) *IndicesSegmentsService {
+ s.human = &human
+ return s
+}
+
+// OperationThreading is undocumented in Elasticsearch as of now.
+func (s *IndicesSegmentsService) OperationThreading(operationThreading interface{}) *IndicesSegmentsService {
+ s.operationThreading = operationThreading
+ return s
+}
+
+// Verbose, when set to true, includes detailed memory usage by Lucene.
+func (s *IndicesSegmentsService) Verbose(verbose bool) *IndicesSegmentsService {
+ s.verbose = &verbose
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesSegmentsService) Pretty(pretty bool) *IndicesSegmentsService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesSegmentsService) buildURL() (string, url.Values, error) {
+ var err error
+ var path string
+
+ if len(s.index) > 0 {
+ path, err = uritemplates.Expand("/{index}/_segments", map[string]string{
+ "index": strings.Join(s.index, ","),
+ })
+ } else {
+ path = "/_segments"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.human != nil {
+ params.Set("human", fmt.Sprintf("%v", *s.human))
+ }
+ if s.operationThreading != nil {
+ params.Set("operation_threading", fmt.Sprintf("%v", s.operationThreading))
+ }
+ if s.verbose != nil {
+ params.Set("verbose", fmt.Sprintf("%v", *s.verbose))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesSegmentsService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesSegmentsService) Do(ctx context.Context) (*IndicesSegmentsResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IndicesSegmentsResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesSegmentsResponse is the response of IndicesSegmentsService.Do.
+type IndicesSegmentsResponse struct {
+ // Shards provides information returned from shards.
+ Shards shardsInfo `json:"_shards"`
+
+ // Indices provides a map into the stats of an index.
+ // The key of the map is the index name.
+ Indices map[string]*IndexSegments `json:"indices,omitempty"`
+}
+
+type IndexSegments struct {
+ // Shards provides a map into the shard related information of an index.
+ // The key of the map is the number of a specific shard.
+ Shards map[string][]*IndexSegmentsShards `json:"shards,omitempty"`
+}
+
+type IndexSegmentsShards struct {
+ Routing *IndexSegmentsRouting `json:"routing,omitempty"`
+ NumCommittedSegments int64 `json:"num_committed_segments,omitempty"`
+ NumSearchSegments int64 `json:"num_search_segments"`
+
+ // Segments provides a map into the segment related information of a shard.
+ // The key of the map is the specific lucene segment id.
+ Segments map[string]*IndexSegmentsDetails `json:"segments,omitempty"`
+}
+
+type IndexSegmentsRouting struct {
+ State string `json:"state,omitempty"`
+ Primary bool `json:"primary,omitempty"`
+ Node string `json:"node,omitempty"`
+ RelocatingNode string `json:"relocating_node,omitempty"`
+}
+
+type IndexSegmentsDetails struct {
+ Generation int64 `json:"generation,omitempty"`
+ NumDocs int64 `json:"num_docs,omitempty"`
+ DeletedDocs int64 `json:"deleted_docs,omitempty"`
+ Size string `json:"size,omitempty"`
+ SizeInBytes int64 `json:"size_in_bytes,omitempty"`
+ Memory string `json:"memory,omitempty"`
+ MemoryInBytes int64 `json:"memory_in_bytes,omitempty"`
+ Committed bool `json:"committed,omitempty"`
+ Search bool `json:"search,omitempty"`
+ Version string `json:"version,omitempty"`
+ Compound bool `json:"compound,omitempty"`
+ MergeId string `json:"merge_id,omitempty"`
+ Sort []*IndexSegmentsSort `json:"sort,omitempty"`
+ RAMTree []*IndexSegmentsRamTree `json:"ram_tree,omitempty"`
+ Attributes map[string]string `json:"attributes,omitempty"`
+}
+
+type IndexSegmentsSort struct {
+ Field string `json:"field,omitempty"`
+ Mode string `json:"mode,omitempty"`
+ Missing interface{} `json:"missing,omitempty"`
+ Reverse bool `json:"reverse,omitempty"`
+}
+
+type IndexSegmentsRamTree struct {
+ Description string `json:"description,omitempty"`
+ Size string `json:"size,omitempty"`
+ SizeInBytes int64 `json:"size_in_bytes,omitempty"`
+ Children []*IndexSegmentsRamTree `json:"children,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/indices_segments_test.go b/vendor/github.com/olivere/elastic/indices_segments_test.go
new file mode 100644
index 000000000..2ec181cc1
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_segments_test.go
@@ -0,0 +1,86 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestIndicesSegments(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Indices []string
+ Expected string
+ }{
+ {
+ []string{},
+ "/_segments",
+ },
+ {
+ []string{"index1"},
+ "/index1/_segments",
+ },
+ {
+ []string{"index1", "index2"},
+ "/index1%2Cindex2/_segments",
+ },
+ }
+
+ for i, test := range tests {
+ path, _, err := client.IndexSegments().Index(test.Indices...).buildURL()
+ if err != nil {
+ t.Errorf("case #%d: %v", i+1, err)
+ }
+ if path != test.Expected {
+ t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
+ }
+ }
+}
+
+func TestIndexSegments(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+ //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ segments, err := client.IndexSegments(testIndexName).Pretty(true).Human(true).Do(context.TODO())
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if segments == nil {
+ t.Fatalf("expected response; got: %v", segments)
+ }
+ indices, found := segments.Indices[testIndexName]
+ if !found {
+ t.Fatalf("expected index information about index %v; got: %v", testIndexName, found)
+ }
+ shards, found := indices.Shards["0"]
+ if !found {
+ t.Fatalf("expected shard information about index %v", testIndexName)
+ }
+ if shards == nil {
+ t.Fatalf("expected shard information to be != nil for index %v", testIndexName)
+ }
+ shard := shards[0]
+ if shard == nil {
+ t.Fatalf("expected shard information to be != nil for shard 0 in index %v", testIndexName)
+ }
+ if shard.Routing == nil {
+ t.Fatalf("expected shard routing information to be != nil for index %v", testIndexName)
+ }
+ segmentDetail, found := shard.Segments["_0"]
+ if !found {
+ t.Fatalf("expected segment detail to be != nil for index %v", testIndexName)
+ }
+ if segmentDetail == nil {
+ t.Fatalf("expected segment detail to be != nil for index %v", testIndexName)
+ }
+ if segmentDetail.NumDocs == 0 {
+ t.Fatal("expected segment to contain >= 1 docs")
+ }
+ if len(segmentDetail.Attributes) == 0 {
+ t.Fatalf("expected segment attributes map to contain at least one key, value pair for index %v", testIndexName)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_shrink.go b/vendor/github.com/olivere/elastic/indices_shrink.go
new file mode 100644
index 000000000..6ea72b281
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_shrink.go
@@ -0,0 +1,179 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesShrinkService allows you to shrink an existing index into a
+// new index with fewer primary shards.
+//
+// For further details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-shrink-index.html.
+type IndicesShrinkService struct {
+ client *Client
+ pretty bool
+ source string
+ target string
+ masterTimeout string
+ timeout string
+ waitForActiveShards string
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewIndicesShrinkService creates a new IndicesShrinkService.
+func NewIndicesShrinkService(client *Client) *IndicesShrinkService {
+ return &IndicesShrinkService{
+ client: client,
+ }
+}
+
+// Source is the name of the source index to shrink.
+func (s *IndicesShrinkService) Source(source string) *IndicesShrinkService {
+ s.source = source
+ return s
+}
+
+// Target is the name of the target index to shrink into.
+func (s *IndicesShrinkService) Target(target string) *IndicesShrinkService {
+ s.target = target
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *IndicesShrinkService) MasterTimeout(masterTimeout string) *IndicesShrinkService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *IndicesShrinkService) Timeout(timeout string) *IndicesShrinkService {
+ s.timeout = timeout
+ return s
+}
+
+// WaitForActiveShards sets the number of active shards to wait for on
+// the shrunken index before the operation returns.
+func (s *IndicesShrinkService) WaitForActiveShards(waitForActiveShards string) *IndicesShrinkService {
+ s.waitForActiveShards = waitForActiveShards
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesShrinkService) Pretty(pretty bool) *IndicesShrinkService {
+ s.pretty = pretty
+ return s
+}
+
+// BodyJson is the configuration for the target index (`settings` and `aliases`)
+// defined as a JSON-serializable instance to be sent as the request body.
+func (s *IndicesShrinkService) BodyJson(body interface{}) *IndicesShrinkService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString is the configuration for the target index (`settings` and `aliases`)
+// defined as a string to send as the request body.
+func (s *IndicesShrinkService) BodyString(body string) *IndicesShrinkService {
+ s.bodyString = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesShrinkService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/{source}/_shrink/{target}", map[string]string{
+ "source": s.source,
+ "target": s.target,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.waitForActiveShards != "" {
+ params.Set("wait_for_active_shards", s.waitForActiveShards)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesShrinkService) Validate() error {
+ var invalid []string
+ if s.source == "" {
+ invalid = append(invalid, "Source")
+ }
+ if s.target == "" {
+ invalid = append(invalid, "Target")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesShrinkService) Do(ctx context.Context) (*IndicesShrinkResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else if s.bodyString != "" {
+ body = s.bodyString
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IndicesShrinkResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesShrinkResponse is the response of IndicesShrinkService.Do.
+type IndicesShrinkResponse struct {
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/indices_shrink_test.go b/vendor/github.com/olivere/elastic/indices_shrink_test.go
new file mode 100644
index 000000000..06ab7d923
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_shrink_test.go
@@ -0,0 +1,34 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "testing"
+
+func TestIndicesShrinkBuildURL(t *testing.T) {
+ client := setupTestClient(t)
+
+ tests := []struct {
+ Source string
+ Target string
+ Expected string
+ }{
+ {
+ "my_source_index",
+ "my_target_index",
+ "/my_source_index/_shrink/my_target_index",
+ },
+ }
+
+ for i, test := range tests {
+ path, _, err := client.ShrinkIndex(test.Source, test.Target).buildURL()
+ if err != nil {
+ t.Errorf("case #%d: %v", i+1, err)
+ continue
+ }
+ if path != test.Expected {
+ t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/indices_stats.go b/vendor/github.com/olivere/elastic/indices_stats.go
new file mode 100644
index 000000000..20d35a6d4
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_stats.go
@@ -0,0 +1,384 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesStatsService provides stats on various metrics of one or more
+// indices. See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-stats.html.
+type IndicesStatsService struct {
+ client *Client
+ pretty bool
+ metric []string
+ index []string
+ level string
+ types []string
+ completionFields []string
+ fielddataFields []string
+ fields []string
+ groups []string
+ human *bool
+}
+
+// NewIndicesStatsService creates a new IndicesStatsService.
+func NewIndicesStatsService(client *Client) *IndicesStatsService {
+ return &IndicesStatsService{
+ client: client,
+ index: make([]string, 0),
+ metric: make([]string, 0),
+ completionFields: make([]string, 0),
+ fielddataFields: make([]string, 0),
+ fields: make([]string, 0),
+ groups: make([]string, 0),
+ types: make([]string, 0),
+ }
+}
+
+// Metric limits the information returned the specific metrics. Options are:
+// docs, store, indexing, get, search, completion, fielddata, flush, merge,
+// query_cache, refresh, suggest, and warmer.
+func (s *IndicesStatsService) Metric(metric ...string) *IndicesStatsService {
+ s.metric = append(s.metric, metric...)
+ return s
+}
+
+// Index is the list of index names; use `_all` or empty string to perform
+// the operation on all indices.
+func (s *IndicesStatsService) Index(indices ...string) *IndicesStatsService {
+ s.index = append(s.index, indices...)
+ return s
+}
+
+// Type is a list of document types for the `indexing` index metric.
+func (s *IndicesStatsService) Type(types ...string) *IndicesStatsService {
+ s.types = append(s.types, types...)
+ return s
+}
+
+// Level returns stats aggregated at cluster, index or shard level.
+func (s *IndicesStatsService) Level(level string) *IndicesStatsService {
+ s.level = level
+ return s
+}
+
+// CompletionFields is a list of fields for `fielddata` and `suggest`
+// index metric (supports wildcards).
+func (s *IndicesStatsService) CompletionFields(completionFields ...string) *IndicesStatsService {
+ s.completionFields = append(s.completionFields, completionFields...)
+ return s
+}
+
+// FielddataFields is a list of fields for `fielddata` index metric (supports wildcards).
+func (s *IndicesStatsService) FielddataFields(fielddataFields ...string) *IndicesStatsService {
+ s.fielddataFields = append(s.fielddataFields, fielddataFields...)
+ return s
+}
+
+// Fields is a list of fields for `fielddata` and `completion` index metric
+// (supports wildcards).
+func (s *IndicesStatsService) Fields(fields ...string) *IndicesStatsService {
+ s.fields = append(s.fields, fields...)
+ return s
+}
+
+// Groups is a list of search groups for `search` index metric.
+func (s *IndicesStatsService) Groups(groups ...string) *IndicesStatsService {
+ s.groups = append(s.groups, groups...)
+ return s
+}
+
+// Human indicates whether to return time and byte values in human-readable format..
+func (s *IndicesStatsService) Human(human bool) *IndicesStatsService {
+ s.human = &human
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesStatsService) Pretty(pretty bool) *IndicesStatsService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesStatsService) buildURL() (string, url.Values, error) {
+ var err error
+ var path string
+ if len(s.index) > 0 && len(s.metric) > 0 {
+ path, err = uritemplates.Expand("/{index}/_stats/{metric}", map[string]string{
+ "index": strings.Join(s.index, ","),
+ "metric": strings.Join(s.metric, ","),
+ })
+ } else if len(s.index) > 0 {
+ path, err = uritemplates.Expand("/{index}/_stats", map[string]string{
+ "index": strings.Join(s.index, ","),
+ })
+ } else if len(s.metric) > 0 {
+ path, err = uritemplates.Expand("/_stats/{metric}", map[string]string{
+ "metric": strings.Join(s.metric, ","),
+ })
+ } else {
+ path = "/_stats"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if len(s.groups) > 0 {
+ params.Set("groups", strings.Join(s.groups, ","))
+ }
+ if s.human != nil {
+ params.Set("human", fmt.Sprintf("%v", *s.human))
+ }
+ if s.level != "" {
+ params.Set("level", s.level)
+ }
+ if len(s.types) > 0 {
+ params.Set("types", strings.Join(s.types, ","))
+ }
+ if len(s.completionFields) > 0 {
+ params.Set("completion_fields", strings.Join(s.completionFields, ","))
+ }
+ if len(s.fielddataFields) > 0 {
+ params.Set("fielddata_fields", strings.Join(s.fielddataFields, ","))
+ }
+ if len(s.fields) > 0 {
+ params.Set("fields", strings.Join(s.fields, ","))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesStatsService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesStatsService) Do(ctx context.Context) (*IndicesStatsResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IndicesStatsResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesStatsResponse is the response of IndicesStatsService.Do.
+type IndicesStatsResponse struct {
+ // Shards provides information returned from shards.
+ Shards shardsInfo `json:"_shards"`
+
+ // All provides summary stats about all indices.
+ All *IndexStats `json:"_all,omitempty"`
+
+ // Indices provides a map into the stats of an index. The key of the
+ // map is the index name.
+ Indices map[string]*IndexStats `json:"indices,omitempty"`
+}
+
+// IndexStats is index stats for a specific index.
+type IndexStats struct {
+ Primaries *IndexStatsDetails `json:"primaries,omitempty"`
+ Total *IndexStatsDetails `json:"total,omitempty"`
+}
+
+type IndexStatsDetails struct {
+ Docs *IndexStatsDocs `json:"docs,omitempty"`
+ Store *IndexStatsStore `json:"store,omitempty"`
+ Indexing *IndexStatsIndexing `json:"indexing,omitempty"`
+ Get *IndexStatsGet `json:"get,omitempty"`
+ Search *IndexStatsSearch `json:"search,omitempty"`
+ Merges *IndexStatsMerges `json:"merges,omitempty"`
+ Refresh *IndexStatsRefresh `json:"refresh,omitempty"`
+ Flush *IndexStatsFlush `json:"flush,omitempty"`
+ Warmer *IndexStatsWarmer `json:"warmer,omitempty"`
+ FilterCache *IndexStatsFilterCache `json:"filter_cache,omitempty"`
+ IdCache *IndexStatsIdCache `json:"id_cache,omitempty"`
+ Fielddata *IndexStatsFielddata `json:"fielddata,omitempty"`
+ Percolate *IndexStatsPercolate `json:"percolate,omitempty"`
+ Completion *IndexStatsCompletion `json:"completion,omitempty"`
+ Segments *IndexStatsSegments `json:"segments,omitempty"`
+ Translog *IndexStatsTranslog `json:"translog,omitempty"`
+ Suggest *IndexStatsSuggest `json:"suggest,omitempty"`
+ QueryCache *IndexStatsQueryCache `json:"query_cache,omitempty"`
+}
+
+type IndexStatsDocs struct {
+ Count int64 `json:"count,omitempty"`
+ Deleted int64 `json:"deleted,omitempty"`
+}
+
+type IndexStatsStore struct {
+ Size string `json:"size,omitempty"` // human size, e.g. 119.3mb
+ SizeInBytes int64 `json:"size_in_bytes,omitempty"`
+}
+
+type IndexStatsIndexing struct {
+ IndexTotal int64 `json:"index_total,omitempty"`
+ IndexTime string `json:"index_time,omitempty"`
+ IndexTimeInMillis int64 `json:"index_time_in_millis,omitempty"`
+ IndexCurrent int64 `json:"index_current,omitempty"`
+ DeleteTotal int64 `json:"delete_total,omitempty"`
+ DeleteTime string `json:"delete_time,omitempty"`
+ DeleteTimeInMillis int64 `json:"delete_time_in_millis,omitempty"`
+ DeleteCurrent int64 `json:"delete_current,omitempty"`
+ NoopUpdateTotal int64 `json:"noop_update_total,omitempty"`
+}
+
+type IndexStatsGet struct {
+ Total int64 `json:"total,omitempty"`
+ GetTime string `json:"get_time,omitempty"`
+ TimeInMillis int64 `json:"time_in_millis,omitempty"`
+ ExistsTotal int64 `json:"exists_total,omitempty"`
+ ExistsTime string `json:"exists_time,omitempty"`
+ ExistsTimeInMillis int64 `json:"exists_time_in_millis,omitempty"`
+ MissingTotal int64 `json:"missing_total,omitempty"`
+ MissingTime string `json:"missing_time,omitempty"`
+ MissingTimeInMillis int64 `json:"missing_time_in_millis,omitempty"`
+ Current int64 `json:"current,omitempty"`
+}
+
+type IndexStatsSearch struct {
+ OpenContexts int64 `json:"open_contexts,omitempty"`
+ QueryTotal int64 `json:"query_total,omitempty"`
+ QueryTime string `json:"query_time,omitempty"`
+ QueryTimeInMillis int64 `json:"query_time_in_millis,omitempty"`
+ QueryCurrent int64 `json:"query_current,omitempty"`
+ FetchTotal int64 `json:"fetch_total,omitempty"`
+ FetchTime string `json:"fetch_time,omitempty"`
+ FetchTimeInMillis int64 `json:"fetch_time_in_millis,omitempty"`
+ FetchCurrent int64 `json:"fetch_current,omitempty"`
+}
+
+type IndexStatsMerges struct {
+ Current int64 `json:"current,omitempty"`
+ CurrentDocs int64 `json:"current_docs,omitempty"`
+ CurrentSize string `json:"current_size,omitempty"`
+ CurrentSizeInBytes int64 `json:"current_size_in_bytes,omitempty"`
+ Total int64 `json:"total,omitempty"`
+ TotalTime string `json:"total_time,omitempty"`
+ TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"`
+ TotalDocs int64 `json:"total_docs,omitempty"`
+ TotalSize string `json:"total_size,omitempty"`
+ TotalSizeInBytes int64 `json:"total_size_in_bytes,omitempty"`
+}
+
+type IndexStatsRefresh struct {
+ Total int64 `json:"total,omitempty"`
+ TotalTime string `json:"total_time,omitempty"`
+ TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"`
+}
+
+type IndexStatsFlush struct {
+ Total int64 `json:"total,omitempty"`
+ TotalTime string `json:"total_time,omitempty"`
+ TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"`
+}
+
+type IndexStatsWarmer struct {
+ Current int64 `json:"current,omitempty"`
+ Total int64 `json:"total,omitempty"`
+ TotalTime string `json:"total_time,omitempty"`
+ TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"`
+}
+
+type IndexStatsFilterCache struct {
+ MemorySize string `json:"memory_size,omitempty"`
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"`
+ Evictions int64 `json:"evictions,omitempty"`
+}
+
+type IndexStatsIdCache struct {
+ MemorySize string `json:"memory_size,omitempty"`
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"`
+}
+
+type IndexStatsFielddata struct {
+ MemorySize string `json:"memory_size,omitempty"`
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"`
+ Evictions int64 `json:"evictions,omitempty"`
+}
+
+type IndexStatsPercolate struct {
+ Total int64 `json:"total,omitempty"`
+ GetTime string `json:"get_time,omitempty"`
+ TimeInMillis int64 `json:"time_in_millis,omitempty"`
+ Current int64 `json:"current,omitempty"`
+ MemorySize string `json:"memory_size,omitempty"`
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"`
+ Queries int64 `json:"queries,omitempty"`
+}
+
+type IndexStatsCompletion struct {
+ Size string `json:"size,omitempty"`
+ SizeInBytes int64 `json:"size_in_bytes,omitempty"`
+}
+
+type IndexStatsSegments struct {
+ Count int64 `json:"count,omitempty"`
+ Memory string `json:"memory,omitempty"`
+ MemoryInBytes int64 `json:"memory_in_bytes,omitempty"`
+ IndexWriterMemory string `json:"index_writer_memory,omitempty"`
+ IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes,omitempty"`
+ IndexWriterMaxMemory string `json:"index_writer_max_memory,omitempty"`
+ IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes,omitempty"`
+ VersionMapMemory string `json:"version_map_memory,omitempty"`
+ VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes,omitempty"`
+ FixedBitSetMemory string `json:"fixed_bit_set,omitempty"`
+ FixedBitSetMemoryInBytes int64 `json:"fixed_bit_set_memory_in_bytes,omitempty"`
+}
+
+type IndexStatsTranslog struct {
+ Operations int64 `json:"operations,omitempty"`
+ Size string `json:"size,omitempty"`
+ SizeInBytes int64 `json:"size_in_bytes,omitempty"`
+}
+
+type IndexStatsSuggest struct {
+ Total int64 `json:"total,omitempty"`
+ Time string `json:"time,omitempty"`
+ TimeInMillis int64 `json:"time_in_millis,omitempty"`
+ Current int64 `json:"current,omitempty"`
+}
+
+type IndexStatsQueryCache struct {
+ MemorySize string `json:"memory_size,omitempty"`
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"`
+ Evictions int64 `json:"evictions,omitempty"`
+ HitCount int64 `json:"hit_count,omitempty"`
+ MissCount int64 `json:"miss_count,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/indices_stats_test.go b/vendor/github.com/olivere/elastic/indices_stats_test.go
new file mode 100644
index 000000000..a3392c97a
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/indices_stats_test.go
@@ -0,0 +1,86 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestIndexStatsBuildURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Indices []string
+ Metrics []string
+ Expected string
+ }{
+ {
+ []string{},
+ []string{},
+ "/_stats",
+ },
+ {
+ []string{"index1"},
+ []string{},
+ "/index1/_stats",
+ },
+ {
+ []string{},
+ []string{"metric1"},
+ "/_stats/metric1",
+ },
+ {
+ []string{"index1"},
+ []string{"metric1"},
+ "/index1/_stats/metric1",
+ },
+ {
+ []string{"index1", "index2"},
+ []string{"metric1"},
+ "/index1%2Cindex2/_stats/metric1",
+ },
+ {
+ []string{"index1", "index2"},
+ []string{"metric1", "metric2"},
+ "/index1%2Cindex2/_stats/metric1%2Cmetric2",
+ },
+ }
+
+ for i, test := range tests {
+ path, _, err := client.IndexStats().Index(test.Indices...).Metric(test.Metrics...).buildURL()
+ if err != nil {
+ t.Fatalf("case #%d: %v", i+1, err)
+ }
+ if path != test.Expected {
+ t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
+ }
+ }
+}
+
+func TestIndexStats(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ stats, err := client.IndexStats(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if stats == nil {
+ t.Fatalf("expected response; got: %v", stats)
+ }
+ stat, found := stats.Indices[testIndexName]
+ if !found {
+ t.Fatalf("expected stats about index %q; got: %v", testIndexName, found)
+ }
+ if stat.Total == nil {
+ t.Fatalf("expected total to be != nil; got: %v", stat.Total)
+ }
+ if stat.Total.Docs == nil {
+ t.Fatalf("expected total docs to be != nil; got: %v", stat.Total.Docs)
+ }
+ if stat.Total.Docs.Count == 0 {
+ t.Fatalf("expected total docs count to be > 0; got: %d", stat.Total.Docs.Count)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/ingest_delete_pipeline.go b/vendor/github.com/olivere/elastic/ingest_delete_pipeline.go
new file mode 100644
index 000000000..78b6d04f2
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/ingest_delete_pipeline.go
@@ -0,0 +1,129 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IngestDeletePipelineService deletes pipelines by ID.
+// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/6.0/delete-pipeline-api.html.
+type IngestDeletePipelineService struct {
+ client *Client
+ pretty bool
+ id string
+ masterTimeout string
+ timeout string
+}
+
+// NewIngestDeletePipelineService creates a new IngestDeletePipelineService.
+func NewIngestDeletePipelineService(client *Client) *IngestDeletePipelineService {
+ return &IngestDeletePipelineService{
+ client: client,
+ }
+}
+
+// Id is documented as: Pipeline ID.
+func (s *IngestDeletePipelineService) Id(id string) *IngestDeletePipelineService {
+ s.id = id
+ return s
+}
+
+// MasterTimeout is documented as: Explicit operation timeout for connection to master node.
+func (s *IngestDeletePipelineService) MasterTimeout(masterTimeout string) *IngestDeletePipelineService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Timeout is documented as: Explicit operation timeout.
+func (s *IngestDeletePipelineService) Timeout(timeout string) *IngestDeletePipelineService {
+ s.timeout = timeout
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IngestDeletePipelineService) Pretty(pretty bool) *IngestDeletePipelineService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IngestDeletePipelineService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_ingest/pipeline/{id}", map[string]string{
+ "id": s.id,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IngestDeletePipelineService) Validate() error {
+ var invalid []string
+ if s.id == "" {
+ invalid = append(invalid, "Id")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IngestDeletePipelineService) Do(ctx context.Context) (*IngestDeletePipelineResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "DELETE",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IngestDeletePipelineResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IngestDeletePipelineResponse is the response of IngestDeletePipelineService.Do.
+type IngestDeletePipelineResponse struct {
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/ingest_delete_pipeline_test.go b/vendor/github.com/olivere/elastic/ingest_delete_pipeline_test.go
new file mode 100644
index 000000000..1163e0f17
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/ingest_delete_pipeline_test.go
@@ -0,0 +1,31 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "testing"
+
+func TestIngestDeletePipelineURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Id string
+ Expected string
+ }{
+ {
+ "my-pipeline-id",
+ "/_ingest/pipeline/my-pipeline-id",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := client.IngestDeletePipeline(test.Id).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/ingest_get_pipeline.go b/vendor/github.com/olivere/elastic/ingest_get_pipeline.go
new file mode 100644
index 000000000..16a683261
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/ingest_get_pipeline.go
@@ -0,0 +1,121 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IngestGetPipelineService returns pipelines based on ID.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/get-pipeline-api.html
+// for documentation.
+type IngestGetPipelineService struct {
+ client *Client
+ pretty bool
+ id []string
+ masterTimeout string
+}
+
+// NewIngestGetPipelineService creates a new IngestGetPipelineService.
+func NewIngestGetPipelineService(client *Client) *IngestGetPipelineService {
+ return &IngestGetPipelineService{
+ client: client,
+ }
+}
+
+// Id is a list of pipeline ids. Wildcards supported.
+func (s *IngestGetPipelineService) Id(id ...string) *IngestGetPipelineService {
+ s.id = append(s.id, id...)
+ return s
+}
+
+// MasterTimeout is an explicit operation timeout for connection to master node.
+func (s *IngestGetPipelineService) MasterTimeout(masterTimeout string) *IngestGetPipelineService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IngestGetPipelineService) Pretty(pretty bool) *IngestGetPipelineService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IngestGetPipelineService) buildURL() (string, url.Values, error) {
+ var err error
+ var path string
+
+ // Build URL
+ if len(s.id) > 0 {
+ path, err = uritemplates.Expand("/_ingest/pipeline/{id}", map[string]string{
+ "id": strings.Join(s.id, ","),
+ })
+ } else {
+ path = "/_ingest/pipeline"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IngestGetPipelineService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *IngestGetPipelineService) Do(ctx context.Context) (IngestGetPipelineResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ var ret IngestGetPipelineResponse
+ if err := json.Unmarshal(res.Body, &ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IngestGetPipelineResponse is the response of IngestGetPipelineService.Do.
+type IngestGetPipelineResponse map[string]*IngestGetPipeline
+
+type IngestGetPipeline struct {
+ ID string `json:"id"`
+ Config map[string]interface{} `json:"config"`
+}
diff --git a/vendor/github.com/olivere/elastic/ingest_get_pipeline_test.go b/vendor/github.com/olivere/elastic/ingest_get_pipeline_test.go
new file mode 100644
index 000000000..009b717ca
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/ingest_get_pipeline_test.go
@@ -0,0 +1,121 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestIngestGetPipelineURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Id []string
+ Expected string
+ }{
+ {
+ nil,
+ "/_ingest/pipeline",
+ },
+ {
+ []string{"my-pipeline-id"},
+ "/_ingest/pipeline/my-pipeline-id",
+ },
+ {
+ []string{"*"},
+ "/_ingest/pipeline/%2A",
+ },
+ {
+ []string{"pipeline-1", "pipeline-2"},
+ "/_ingest/pipeline/pipeline-1%2Cpipeline-2",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := client.IngestGetPipeline(test.Id...).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
+
+func TestIngestLifecycle(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ // With the new ES Docker images, XPack is already installed and returns a pipeline. So we cannot test for "no pipelines". Skipping for now.
+ /*
+ // Get all pipelines (returns 404 that indicates an error)
+ getres, err := client.IngestGetPipeline().Do(context.TODO())
+ if err == nil {
+ t.Fatal(err)
+ }
+ if getres != nil {
+ t.Fatalf("expected no response, got %v", getres)
+ }
+ //*/
+
+ // Add a pipeline
+ pipelineDef := `{
+ "description" : "reset retweets",
+ "processors" : [
+ {
+ "set" : {
+ "field": "retweets",
+ "value": 0
+ }
+ }
+ ]
+}`
+ putres, err := client.IngestPutPipeline("my-pipeline").BodyString(pipelineDef).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if putres == nil {
+ t.Fatal("expected response, got nil")
+ }
+ if want, have := true, putres.Acknowledged; want != have {
+ t.Fatalf("expected ack = %v, got %v", want, have)
+ }
+
+ // Get all pipelines again
+ getres, err := client.IngestGetPipeline().Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if have := len(getres); have == 0 {
+ t.Fatalf("expected at least 1 pipeline, got %d", have)
+ }
+ if _, found := getres["my-pipeline"]; !found {
+ t.Fatalf("expected to find pipline with id %q", "my-pipeline")
+ }
+
+ // Get pipeline by ID
+ getres, err = client.IngestGetPipeline("my-pipeline").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want, have := 1, len(getres); want != have {
+ t.Fatalf("expected %d pipelines, got %d", want, have)
+ }
+ if _, found := getres["my-pipeline"]; !found {
+ t.Fatalf("expected to find pipline with id %q", "my-pipeline")
+ }
+
+ // Delete pipeline
+ delres, err := client.IngestDeletePipeline("my-pipeline").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if delres == nil {
+ t.Fatal("expected response, got nil")
+ }
+ if want, have := true, delres.Acknowledged; want != have {
+ t.Fatalf("expected ack = %v, got %v", want, have)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/ingest_put_pipeline.go b/vendor/github.com/olivere/elastic/ingest_put_pipeline.go
new file mode 100644
index 000000000..5781e7072
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/ingest_put_pipeline.go
@@ -0,0 +1,158 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IngestPutPipelineService adds pipelines and updates existing pipelines in
+// the cluster.
+//
+// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/6.0/put-pipeline-api.html.
+type IngestPutPipelineService struct {
+ client *Client
+ pretty bool
+ id string
+ masterTimeout string
+ timeout string
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewIngestPutPipelineService creates a new IngestPutPipelineService.
+func NewIngestPutPipelineService(client *Client) *IngestPutPipelineService {
+ return &IngestPutPipelineService{
+ client: client,
+ }
+}
+
+// Id is the pipeline ID.
+func (s *IngestPutPipelineService) Id(id string) *IngestPutPipelineService {
+ s.id = id
+ return s
+}
+
+// MasterTimeout is an explicit operation timeout for connection to master node.
+func (s *IngestPutPipelineService) MasterTimeout(masterTimeout string) *IngestPutPipelineService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Timeout specifies an explicit operation timeout.
+func (s *IngestPutPipelineService) Timeout(timeout string) *IngestPutPipelineService {
+ s.timeout = timeout
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IngestPutPipelineService) Pretty(pretty bool) *IngestPutPipelineService {
+ s.pretty = pretty
+ return s
+}
+
+// BodyJson is the ingest definition, defined as a JSON-serializable document.
+// Use e.g. a map[string]interface{} here.
+func (s *IngestPutPipelineService) BodyJson(body interface{}) *IngestPutPipelineService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString is the ingest definition, specified as a string.
+func (s *IngestPutPipelineService) BodyString(body string) *IngestPutPipelineService {
+ s.bodyString = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IngestPutPipelineService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_ingest/pipeline/{id}", map[string]string{
+ "id": s.id,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IngestPutPipelineService) Validate() error {
+ var invalid []string
+ if s.id == "" {
+ invalid = append(invalid, "Id")
+ }
+ if s.bodyString == "" && s.bodyJson == nil {
+ invalid = append(invalid, "BodyJson")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IngestPutPipelineService) Do(ctx context.Context) (*IngestPutPipelineResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else {
+ body = s.bodyString
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "PUT",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IngestPutPipelineResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IngestPutPipelineResponse is the response of IngestPutPipelineService.Do.
+type IngestPutPipelineResponse struct {
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/ingest_put_pipeline_test.go b/vendor/github.com/olivere/elastic/ingest_put_pipeline_test.go
new file mode 100644
index 000000000..9609f2f53
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/ingest_put_pipeline_test.go
@@ -0,0 +1,31 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "testing"
+
+func TestIngestPutPipelineURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Id string
+ Expected string
+ }{
+ {
+ "my-pipeline-id",
+ "/_ingest/pipeline/my-pipeline-id",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := client.IngestPutPipeline(test.Id).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/ingest_simulate_pipeline.go b/vendor/github.com/olivere/elastic/ingest_simulate_pipeline.go
new file mode 100644
index 000000000..213f97bbb
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/ingest_simulate_pipeline.go
@@ -0,0 +1,161 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IngestSimulatePipelineService executes a specific pipeline against the set of
+// documents provided in the body of the request.
+//
+// The API is documented at
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/simulate-pipeline-api.html.
+type IngestSimulatePipelineService struct {
+ client *Client
+ pretty bool
+ id string
+ verbose *bool
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewIngestSimulatePipelineService creates a new IngestSimulatePipeline.
+func NewIngestSimulatePipelineService(client *Client) *IngestSimulatePipelineService {
+ return &IngestSimulatePipelineService{
+ client: client,
+ }
+}
+
+// Id specifies the pipeline ID.
+func (s *IngestSimulatePipelineService) Id(id string) *IngestSimulatePipelineService {
+ s.id = id
+ return s
+}
+
+// Verbose mode. Display data output for each processor in executed pipeline.
+func (s *IngestSimulatePipelineService) Verbose(verbose bool) *IngestSimulatePipelineService {
+ s.verbose = &verbose
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IngestSimulatePipelineService) Pretty(pretty bool) *IngestSimulatePipelineService {
+ s.pretty = pretty
+ return s
+}
+
+// BodyJson is the ingest definition, defined as a JSON-serializable simulate
+// definition. Use e.g. a map[string]interface{} here.
+func (s *IngestSimulatePipelineService) BodyJson(body interface{}) *IngestSimulatePipelineService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString is the simulate definition, defined as a string.
+func (s *IngestSimulatePipelineService) BodyString(body string) *IngestSimulatePipelineService {
+ s.bodyString = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IngestSimulatePipelineService) buildURL() (string, url.Values, error) {
+ var err error
+ var path string
+
+ // Build URL
+ if s.id != "" {
+ path, err = uritemplates.Expand("/_ingest/pipeline/{id}/_simulate", map[string]string{
+ "id": s.id,
+ })
+ } else {
+ path = "/_ingest/pipeline/_simulate"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.verbose != nil {
+ params.Set("verbose", fmt.Sprintf("%v", *s.verbose))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IngestSimulatePipelineService) Validate() error {
+ var invalid []string
+ if s.bodyString == "" && s.bodyJson == nil {
+ invalid = append(invalid, "BodyJson")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IngestSimulatePipelineService) Do(ctx context.Context) (*IngestSimulatePipelineResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else {
+ body = s.bodyString
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IngestSimulatePipelineResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IngestSimulatePipelineResponse is the response of IngestSimulatePipeline.Do.
+type IngestSimulatePipelineResponse struct {
+ Docs []*IngestSimulateDocumentResult `json:"docs"`
+}
+
+type IngestSimulateDocumentResult struct {
+ Doc map[string]interface{} `json:"doc"`
+ ProcessorResults []*IngestSimulateProcessorResult `json:"processor_results"`
+}
+
+type IngestSimulateProcessorResult struct {
+ ProcessorTag string `json:"tag"`
+ Doc map[string]interface{} `json:"doc"`
+}
diff --git a/vendor/github.com/olivere/elastic/ingest_simulate_pipeline_test.go b/vendor/github.com/olivere/elastic/ingest_simulate_pipeline_test.go
new file mode 100644
index 000000000..a254f85ff
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/ingest_simulate_pipeline_test.go
@@ -0,0 +1,35 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "testing"
+
+func TestIngestSimulatePipelineURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Id string
+ Expected string
+ }{
+ {
+ "",
+ "/_ingest/pipeline/_simulate",
+ },
+ {
+ "my-pipeline-id",
+ "/_ingest/pipeline/my-pipeline-id/_simulate",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := client.IngestSimulatePipeline().Id(test.Id).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/inner_hit.go b/vendor/github.com/olivere/elastic/inner_hit.go
new file mode 100644
index 000000000..c371fbf79
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/inner_hit.go
@@ -0,0 +1,160 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// InnerHit implements a simple join for parent/child, nested, and even
+// top-level documents in Elasticsearch.
+// It is an experimental feature for Elasticsearch versions 1.5 (or greater).
+// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-inner-hits.html
+// for documentation.
+//
+// See the tests for SearchSource, HasChildFilter, HasChildQuery,
+// HasParentFilter, HasParentQuery, NestedFilter, and NestedQuery
+// for usage examples.
+type InnerHit struct {
+ source *SearchSource
+ path string
+ typ string
+
+ name string
+}
+
+// NewInnerHit creates a new InnerHit.
+func NewInnerHit() *InnerHit {
+ return &InnerHit{source: NewSearchSource()}
+}
+
+func (hit *InnerHit) Path(path string) *InnerHit {
+ hit.path = path
+ return hit
+}
+
+func (hit *InnerHit) Type(typ string) *InnerHit {
+ hit.typ = typ
+ return hit
+}
+
+func (hit *InnerHit) Query(query Query) *InnerHit {
+ hit.source.Query(query)
+ return hit
+}
+
+func (hit *InnerHit) From(from int) *InnerHit {
+ hit.source.From(from)
+ return hit
+}
+
+func (hit *InnerHit) Size(size int) *InnerHit {
+ hit.source.Size(size)
+ return hit
+}
+
+func (hit *InnerHit) TrackScores(trackScores bool) *InnerHit {
+ hit.source.TrackScores(trackScores)
+ return hit
+}
+
+func (hit *InnerHit) Explain(explain bool) *InnerHit {
+ hit.source.Explain(explain)
+ return hit
+}
+
+func (hit *InnerHit) Version(version bool) *InnerHit {
+ hit.source.Version(version)
+ return hit
+}
+
+func (hit *InnerHit) StoredField(storedFieldName string) *InnerHit {
+ hit.source.StoredField(storedFieldName)
+ return hit
+}
+
+func (hit *InnerHit) StoredFields(storedFieldNames ...string) *InnerHit {
+ hit.source.StoredFields(storedFieldNames...)
+ return hit
+}
+
+func (hit *InnerHit) NoStoredFields() *InnerHit {
+ hit.source.NoStoredFields()
+ return hit
+}
+
+func (hit *InnerHit) FetchSource(fetchSource bool) *InnerHit {
+ hit.source.FetchSource(fetchSource)
+ return hit
+}
+
+func (hit *InnerHit) FetchSourceContext(fetchSourceContext *FetchSourceContext) *InnerHit {
+ hit.source.FetchSourceContext(fetchSourceContext)
+ return hit
+}
+
+func (hit *InnerHit) DocvalueFields(docvalueFields ...string) *InnerHit {
+ hit.source.DocvalueFields(docvalueFields...)
+ return hit
+}
+
+func (hit *InnerHit) DocvalueField(docvalueField string) *InnerHit {
+ hit.source.DocvalueField(docvalueField)
+ return hit
+}
+
+func (hit *InnerHit) ScriptFields(scriptFields ...*ScriptField) *InnerHit {
+ hit.source.ScriptFields(scriptFields...)
+ return hit
+}
+
+func (hit *InnerHit) ScriptField(scriptField *ScriptField) *InnerHit {
+ hit.source.ScriptField(scriptField)
+ return hit
+}
+
+func (hit *InnerHit) Sort(field string, ascending bool) *InnerHit {
+ hit.source.Sort(field, ascending)
+ return hit
+}
+
+func (hit *InnerHit) SortWithInfo(info SortInfo) *InnerHit {
+ hit.source.SortWithInfo(info)
+ return hit
+}
+
+func (hit *InnerHit) SortBy(sorter ...Sorter) *InnerHit {
+ hit.source.SortBy(sorter...)
+ return hit
+}
+
+func (hit *InnerHit) Highlight(highlight *Highlight) *InnerHit {
+ hit.source.Highlight(highlight)
+ return hit
+}
+
+func (hit *InnerHit) Highlighter() *Highlight {
+ return hit.source.Highlighter()
+}
+
+func (hit *InnerHit) Name(name string) *InnerHit {
+ hit.name = name
+ return hit
+}
+
+func (hit *InnerHit) Source() (interface{}, error) {
+ src, err := hit.source.Source()
+ if err != nil {
+ return nil, err
+ }
+ source, ok := src.(map[string]interface{})
+ if !ok {
+ return nil, nil
+ }
+
+ // Notice that hit.typ and hit.path are not exported here.
+ // They are only used with SearchSource and serialized there.
+
+ if hit.name != "" {
+ source["name"] = hit.name
+ }
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/inner_hit_test.go b/vendor/github.com/olivere/elastic/inner_hit_test.go
new file mode 100644
index 000000000..fd9bd2e8a
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/inner_hit_test.go
@@ -0,0 +1,44 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestInnerHitEmpty(t *testing.T) {
+ hit := NewInnerHit()
+ src, err := hit.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestInnerHitWithName(t *testing.T) {
+ hit := NewInnerHit().Name("comments")
+ src, err := hit.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"name":"comments"}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/logger.go b/vendor/github.com/olivere/elastic/logger.go
new file mode 100644
index 000000000..095eb4cd4
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/logger.go
@@ -0,0 +1,10 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Logger specifies the interface for all log operations.
+type Logger interface {
+ Printf(format string, v ...interface{})
+}
diff --git a/vendor/github.com/olivere/elastic/mget.go b/vendor/github.com/olivere/elastic/mget.go
new file mode 100644
index 000000000..5202a9603
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/mget.go
@@ -0,0 +1,257 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+)
+
+// MgetService allows to get multiple documents based on an index,
+// type (optional) and id (possibly routing). The response includes
+// a docs array with all the fetched documents, each element similar
+// in structure to a document provided by the Get API.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-multi-get.html
+// for details.
+type MgetService struct {
+ client *Client
+ pretty bool
+ preference string
+ realtime *bool
+ refresh string
+ routing string
+ storedFields []string
+ items []*MultiGetItem
+}
+
+// NewMgetService initializes a new Multi GET API request call.
+func NewMgetService(client *Client) *MgetService {
+ builder := &MgetService{
+ client: client,
+ }
+ return builder
+}
+
+// Preference specifies the node or shard the operation should be performed
+// on (default: random).
+func (s *MgetService) Preference(preference string) *MgetService {
+ s.preference = preference
+ return s
+}
+
+// Refresh the shard containing the document before performing the operation.
+func (s *MgetService) Refresh(refresh string) *MgetService {
+ s.refresh = refresh
+ return s
+}
+
+// Realtime specifies whether to perform the operation in realtime or search mode.
+func (s *MgetService) Realtime(realtime bool) *MgetService {
+ s.realtime = &realtime
+ return s
+}
+
+// Routing is the specific routing value.
+func (s *MgetService) Routing(routing string) *MgetService {
+ s.routing = routing
+ return s
+}
+
+// StoredFields is a list of fields to return in the response.
+func (s *MgetService) StoredFields(storedFields ...string) *MgetService {
+ s.storedFields = append(s.storedFields, storedFields...)
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *MgetService) Pretty(pretty bool) *MgetService {
+ s.pretty = pretty
+ return s
+}
+
+// Add an item to the request.
+func (s *MgetService) Add(items ...*MultiGetItem) *MgetService {
+ s.items = append(s.items, items...)
+ return s
+}
+
+// Source returns the request body, which will be serialized into JSON.
+func (s *MgetService) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ items := make([]interface{}, len(s.items))
+ for i, item := range s.items {
+ src, err := item.Source()
+ if err != nil {
+ return nil, err
+ }
+ items[i] = src
+ }
+ source["docs"] = items
+ return source, nil
+}
+
+// Do executes the request.
+func (s *MgetService) Do(ctx context.Context) (*MgetResponse, error) {
+ // Build url
+ path := "/_mget"
+
+ params := make(url.Values)
+ if s.realtime != nil {
+ params.Add("realtime", fmt.Sprintf("%v", *s.realtime))
+ }
+ if s.preference != "" {
+ params.Add("preference", s.preference)
+ }
+ if s.refresh != "" {
+ params.Add("refresh", s.refresh)
+ }
+ if s.routing != "" {
+ params.Set("routing", s.routing)
+ }
+ if len(s.storedFields) > 0 {
+ params.Set("stored_fields", strings.Join(s.storedFields, ","))
+ }
+
+ // Set body
+ body, err := s.Source()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(MgetResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Multi Get Item --
+
+// MultiGetItem is a single document to retrieve via the MgetService.
+type MultiGetItem struct {
+ index string
+ typ string
+ id string
+ routing string
+ storedFields []string
+ version *int64 // see org.elasticsearch.common.lucene.uid.Versions
+ versionType string // see org.elasticsearch.index.VersionType
+ fsc *FetchSourceContext
+}
+
+// NewMultiGetItem initializes a new, single item for a Multi GET request.
+func NewMultiGetItem() *MultiGetItem {
+ return &MultiGetItem{}
+}
+
+// Index specifies the index name.
+func (item *MultiGetItem) Index(index string) *MultiGetItem {
+ item.index = index
+ return item
+}
+
+// Type specifies the type name.
+func (item *MultiGetItem) Type(typ string) *MultiGetItem {
+ item.typ = typ
+ return item
+}
+
+// Id specifies the identifier of the document.
+func (item *MultiGetItem) Id(id string) *MultiGetItem {
+ item.id = id
+ return item
+}
+
+// Routing is the specific routing value.
+func (item *MultiGetItem) Routing(routing string) *MultiGetItem {
+ item.routing = routing
+ return item
+}
+
+// StoredFields is a list of fields to return in the response.
+func (item *MultiGetItem) StoredFields(storedFields ...string) *MultiGetItem {
+ item.storedFields = append(item.storedFields, storedFields...)
+ return item
+}
+
+// Version can be MatchAny (-3), MatchAnyPre120 (0), NotFound (-1),
+// or NotSet (-2). These are specified in org.elasticsearch.common.lucene.uid.Versions.
+// The default in Elasticsearch is MatchAny (-3).
+func (item *MultiGetItem) Version(version int64) *MultiGetItem {
+ item.version = &version
+ return item
+}
+
+// VersionType can be "internal", "external", "external_gt", or "external_gte".
+// See org.elasticsearch.index.VersionType in Elasticsearch source.
+// It is "internal" by default.
+func (item *MultiGetItem) VersionType(versionType string) *MultiGetItem {
+ item.versionType = versionType
+ return item
+}
+
+// FetchSource allows to specify source filtering.
+func (item *MultiGetItem) FetchSource(fetchSourceContext *FetchSourceContext) *MultiGetItem {
+ item.fsc = fetchSourceContext
+ return item
+}
+
+// Source returns the serialized JSON to be sent to Elasticsearch as
+// part of a MultiGet search.
+func (item *MultiGetItem) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+
+ source["_id"] = item.id
+
+ if item.index != "" {
+ source["_index"] = item.index
+ }
+ if item.typ != "" {
+ source["_type"] = item.typ
+ }
+ if item.fsc != nil {
+ src, err := item.fsc.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["_source"] = src
+ }
+ if item.routing != "" {
+ source["_routing"] = item.routing
+ }
+ if len(item.storedFields) > 0 {
+ source["stored_fields"] = strings.Join(item.storedFields, ",")
+ }
+ if item.version != nil {
+ source["version"] = fmt.Sprintf("%d", *item.version)
+ }
+ if item.versionType != "" {
+ source["version_type"] = item.versionType
+ }
+
+ return source, nil
+}
+
+// -- Result of a Multi Get request.
+
+// MgetResponse is the outcome of a Multi GET API request.
+type MgetResponse struct {
+ Docs []*GetResult `json:"docs,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/mget_test.go b/vendor/github.com/olivere/elastic/mget_test.go
new file mode 100644
index 000000000..6b3ecd9f6
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/mget_test.go
@@ -0,0 +1,96 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+)
+
+func TestMultiGet(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add some documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Count documents
+ count, err := client.Count(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 3 {
+ t.Errorf("expected Count = %d; got %d", 3, count)
+ }
+
+ // Get documents 1 and 3
+ res, err := client.MultiGet().
+ Add(NewMultiGetItem().Index(testIndexName).Type("doc").Id("1")).
+ Add(NewMultiGetItem().Index(testIndexName).Type("doc").Id("3")).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected result to be != nil; got nil")
+ }
+ if res.Docs == nil {
+ t.Fatal("expected result docs to be != nil; got nil")
+ }
+ if len(res.Docs) != 2 {
+ t.Fatalf("expected to have 2 docs; got %d", len(res.Docs))
+ }
+
+ item := res.Docs[0]
+ if item.Error != nil {
+ t.Errorf("expected no error on item 0; got %v", item.Error)
+ }
+ if item.Source == nil {
+ t.Errorf("expected Source != nil; got %v", item.Source)
+ }
+ var doc tweet
+ if err := json.Unmarshal(*item.Source, &doc); err != nil {
+ t.Fatalf("expected to unmarshal item Source; got %v", err)
+ }
+ if doc.Message != tweet1.Message {
+ t.Errorf("expected Message of first tweet to be %q; got %q", tweet1.Message, doc.Message)
+ }
+
+ item = res.Docs[1]
+ if item.Error != nil {
+ t.Errorf("expected no error on item 1; got %v", item.Error)
+ }
+ if item.Source == nil {
+ t.Errorf("expected Source != nil; got %v", item.Source)
+ }
+ if err := json.Unmarshal(*item.Source, &doc); err != nil {
+ t.Fatalf("expected to unmarshal item Source; got %v", err)
+ }
+ if doc.Message != tweet3.Message {
+ t.Errorf("expected Message of second tweet to be %q; got %q", tweet3.Message, doc.Message)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/msearch.go b/vendor/github.com/olivere/elastic/msearch.go
new file mode 100644
index 000000000..ed54d3c2f
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/msearch.go
@@ -0,0 +1,101 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+)
+
+// MultiSearch executes one or more searches in one roundtrip.
+type MultiSearchService struct {
+ client *Client
+ requests []*SearchRequest
+ indices []string
+ pretty bool
+ routing string
+ preference string
+}
+
+func NewMultiSearchService(client *Client) *MultiSearchService {
+ builder := &MultiSearchService{
+ client: client,
+ requests: make([]*SearchRequest, 0),
+ indices: make([]string, 0),
+ }
+ return builder
+}
+
+func (s *MultiSearchService) Add(requests ...*SearchRequest) *MultiSearchService {
+ s.requests = append(s.requests, requests...)
+ return s
+}
+
+func (s *MultiSearchService) Index(indices ...string) *MultiSearchService {
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+func (s *MultiSearchService) Pretty(pretty bool) *MultiSearchService {
+ s.pretty = pretty
+ return s
+}
+
+func (s *MultiSearchService) Do(ctx context.Context) (*MultiSearchResult, error) {
+ // Build url
+ path := "/_msearch"
+
+ // Parameters
+ params := make(url.Values)
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+
+ // Set body
+ var lines []string
+ for _, sr := range s.requests {
+ // Set default indices if not specified in the request
+ if !sr.HasIndices() && len(s.indices) > 0 {
+ sr = sr.Index(s.indices...)
+ }
+
+ header, err := json.Marshal(sr.header())
+ if err != nil {
+ return nil, err
+ }
+ body, err := json.Marshal(sr.Body())
+ if err != nil {
+ return nil, err
+ }
+ lines = append(lines, string(header))
+ lines = append(lines, string(body))
+ }
+ body := strings.Join(lines, "\n") + "\n" // Don't forget trailing \n
+
+ // Get response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(MultiSearchResult)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+type MultiSearchResult struct {
+ Responses []*SearchResult `json:"responses,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/msearch_test.go b/vendor/github.com/olivere/elastic/msearch_test.go
new file mode 100644
index 000000000..79f2047e6
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/msearch_test.go
@@ -0,0 +1,198 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ _ "net/http"
+ "testing"
+)
+
+func TestMultiSearch(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{
+ User: "olivere",
+ Message: "Welcome to Golang and Elasticsearch.",
+ Tags: []string{"golang", "elasticsearch"},
+ }
+ tweet2 := tweet{
+ User: "olivere",
+ Message: "Another unrelated topic.",
+ Tags: []string{"golang"},
+ }
+ tweet3 := tweet{
+ User: "sandrae",
+ Message: "Cycling is fun.",
+ Tags: []string{"sports", "cycling"},
+ }
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Spawn two search queries with one roundtrip
+ q1 := NewMatchAllQuery()
+ q2 := NewTermQuery("tags", "golang")
+
+ sreq1 := NewSearchRequest().Index(testIndexName, testIndexName2).
+ Source(NewSearchSource().Query(q1).Size(10))
+ sreq2 := NewSearchRequest().Index(testIndexName).Type("doc").
+ Source(NewSearchSource().Query(q2))
+
+ searchResult, err := client.MultiSearch().
+ Add(sreq1, sreq2).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Responses == nil {
+ t.Fatal("expected responses != nil; got nil")
+ }
+ if len(searchResult.Responses) != 2 {
+ t.Fatalf("expected 2 responses; got %d", len(searchResult.Responses))
+ }
+
+ sres := searchResult.Responses[0]
+ if sres.Hits == nil {
+ t.Errorf("expected Hits != nil; got nil")
+ }
+ if sres.Hits.TotalHits != 3 {
+ t.Errorf("expected Hits.TotalHits = %d; got %d", 3, sres.Hits.TotalHits)
+ }
+ if len(sres.Hits.Hits) != 3 {
+ t.Errorf("expected len(Hits.Hits) = %d; got %d", 3, len(sres.Hits.Hits))
+ }
+ for _, hit := range sres.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ sres = searchResult.Responses[1]
+ if sres.Hits == nil {
+ t.Errorf("expected Hits != nil; got nil")
+ }
+ if sres.Hits.TotalHits != 2 {
+ t.Errorf("expected Hits.TotalHits = %d; got %d", 2, sres.Hits.TotalHits)
+ }
+ if len(sres.Hits.Hits) != 2 {
+ t.Errorf("expected len(Hits.Hits) = %d; got %d", 2, len(sres.Hits.Hits))
+ }
+ for _, hit := range sres.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestMultiSearchWithOneRequest(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{
+ User: "olivere",
+ Message: "Welcome to Golang and Elasticsearch.",
+ Tags: []string{"golang", "elasticsearch"},
+ }
+ tweet2 := tweet{
+ User: "olivere",
+ Message: "Another unrelated topic.",
+ Tags: []string{"golang"},
+ }
+ tweet3 := tweet{
+ User: "sandrae",
+ Message: "Cycling is fun.",
+ Tags: []string{"sports", "cycling"},
+ }
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Spawn two search queries with one roundtrip
+ query := NewMatchAllQuery()
+ source := NewSearchSource().Query(query).Size(10)
+ sreq := NewSearchRequest().Source(source)
+
+ searchResult, err := client.MultiSearch().
+ Index(testIndexName).
+ Add(sreq).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Responses == nil {
+ t.Fatal("expected responses != nil; got nil")
+ }
+ if len(searchResult.Responses) != 1 {
+ t.Fatalf("expected 1 responses; got %d", len(searchResult.Responses))
+ }
+
+ sres := searchResult.Responses[0]
+ if sres.Hits == nil {
+ t.Errorf("expected Hits != nil; got nil")
+ }
+ if sres.Hits.TotalHits != 3 {
+ t.Errorf("expected Hits.TotalHits = %d; got %d", 3, sres.Hits.TotalHits)
+ }
+ if len(sres.Hits.Hits) != 3 {
+ t.Errorf("expected len(Hits.Hits) = %d; got %d", 3, len(sres.Hits.Hits))
+ }
+ for _, hit := range sres.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/mtermvectors.go b/vendor/github.com/olivere/elastic/mtermvectors.go
new file mode 100644
index 000000000..755718e67
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/mtermvectors.go
@@ -0,0 +1,475 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// MultiTermvectorService returns information and statistics on terms in the
+// fields of a particular document. The document could be stored in the
+// index or artificially provided by the user.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-multi-termvectors.html
+// for documentation.
+type MultiTermvectorService struct {
+ client *Client
+ pretty bool
+ index string
+ typ string
+ fieldStatistics *bool
+ fields []string
+ ids []string
+ offsets *bool
+ parent string
+ payloads *bool
+ positions *bool
+ preference string
+ realtime *bool
+ routing string
+ termStatistics *bool
+ version interface{}
+ versionType string
+ bodyJson interface{}
+ bodyString string
+ docs []*MultiTermvectorItem
+}
+
+// NewMultiTermvectorService creates a new MultiTermvectorService.
+func NewMultiTermvectorService(client *Client) *MultiTermvectorService {
+ return &MultiTermvectorService{
+ client: client,
+ }
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *MultiTermvectorService) Pretty(pretty bool) *MultiTermvectorService {
+ s.pretty = pretty
+ return s
+}
+
+// Add adds documents to MultiTermvectors service.
+func (s *MultiTermvectorService) Add(docs ...*MultiTermvectorItem) *MultiTermvectorService {
+ s.docs = append(s.docs, docs...)
+ return s
+}
+
+// Index in which the document resides.
+func (s *MultiTermvectorService) Index(index string) *MultiTermvectorService {
+ s.index = index
+ return s
+}
+
+// Type of the document.
+func (s *MultiTermvectorService) Type(typ string) *MultiTermvectorService {
+ s.typ = typ
+ return s
+}
+
+// FieldStatistics specifies if document count, sum of document frequencies and sum of total term frequencies should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs".
+func (s *MultiTermvectorService) FieldStatistics(fieldStatistics bool) *MultiTermvectorService {
+ s.fieldStatistics = &fieldStatistics
+ return s
+}
+
+// Fields is a comma-separated list of fields to return. Applies to all returned documents unless otherwise specified in body "params" or "docs".
+func (s *MultiTermvectorService) Fields(fields []string) *MultiTermvectorService {
+ s.fields = fields
+ return s
+}
+
+// Ids is a comma-separated list of documents ids. You must define ids as parameter or set "ids" or "docs" in the request body.
+func (s *MultiTermvectorService) Ids(ids []string) *MultiTermvectorService {
+ s.ids = ids
+ return s
+}
+
+// Offsets specifies if term offsets should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs".
+func (s *MultiTermvectorService) Offsets(offsets bool) *MultiTermvectorService {
+ s.offsets = &offsets
+ return s
+}
+
+// Parent id of documents. Applies to all returned documents unless otherwise specified in body "params" or "docs".
+func (s *MultiTermvectorService) Parent(parent string) *MultiTermvectorService {
+ s.parent = parent
+ return s
+}
+
+// Payloads specifies if term payloads should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs".
+func (s *MultiTermvectorService) Payloads(payloads bool) *MultiTermvectorService {
+ s.payloads = &payloads
+ return s
+}
+
+// Positions specifies if term positions should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs".
+func (s *MultiTermvectorService) Positions(positions bool) *MultiTermvectorService {
+ s.positions = &positions
+ return s
+}
+
+// Preference specifies the node or shard the operation should be performed on (default: random). Applies to all returned documents unless otherwise specified in body "params" or "docs".
+func (s *MultiTermvectorService) Preference(preference string) *MultiTermvectorService {
+ s.preference = preference
+ return s
+}
+
+// Realtime specifies if requests are real-time as opposed to near-real-time (default: true).
+func (s *MultiTermvectorService) Realtime(realtime bool) *MultiTermvectorService {
+ s.realtime = &realtime
+ return s
+}
+
+// Routing specific routing value. Applies to all returned documents unless otherwise specified in body "params" or "docs".
+func (s *MultiTermvectorService) Routing(routing string) *MultiTermvectorService {
+ s.routing = routing
+ return s
+}
+
+// TermStatistics specifies if total term frequency and document frequency should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs".
+func (s *MultiTermvectorService) TermStatistics(termStatistics bool) *MultiTermvectorService {
+ s.termStatistics = &termStatistics
+ return s
+}
+
+// Version is explicit version number for concurrency control.
+func (s *MultiTermvectorService) Version(version interface{}) *MultiTermvectorService {
+ s.version = version
+ return s
+}
+
+// VersionType is specific version type.
+func (s *MultiTermvectorService) VersionType(versionType string) *MultiTermvectorService {
+ s.versionType = versionType
+ return s
+}
+
+// BodyJson is documented as: Define ids, documents, parameters or a list of parameters per document here. You must at least provide a list of document ids. See documentation..
+func (s *MultiTermvectorService) BodyJson(body interface{}) *MultiTermvectorService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString is documented as: Define ids, documents, parameters or a list of parameters per document here. You must at least provide a list of document ids. See documentation..
+func (s *MultiTermvectorService) BodyString(body string) *MultiTermvectorService {
+ s.bodyString = body
+ return s
+}
+
+func (s *MultiTermvectorService) Source() interface{} {
+ source := make(map[string]interface{})
+ docs := make([]interface{}, len(s.docs))
+ for i, doc := range s.docs {
+ docs[i] = doc.Source()
+ }
+ source["docs"] = docs
+ return source
+}
+
+// buildURL builds the URL for the operation.
+func (s *MultiTermvectorService) buildURL() (string, url.Values, error) {
+ var path string
+ var err error
+
+ if s.index != "" && s.typ != "" {
+ path, err = uritemplates.Expand("/{index}/{type}/_mtermvectors", map[string]string{
+ "index": s.index,
+ "type": s.typ,
+ })
+ } else if s.index != "" && s.typ == "" {
+ path, err = uritemplates.Expand("/{index}/_mtermvectors", map[string]string{
+ "index": s.index,
+ })
+ } else {
+ path = "/_mtermvectors"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.fieldStatistics != nil {
+ params.Set("field_statistics", fmt.Sprintf("%v", *s.fieldStatistics))
+ }
+ if len(s.fields) > 0 {
+ params.Set("fields", strings.Join(s.fields, ","))
+ }
+ if len(s.ids) > 0 {
+ params.Set("ids", strings.Join(s.ids, ","))
+ }
+ if s.offsets != nil {
+ params.Set("offsets", fmt.Sprintf("%v", *s.offsets))
+ }
+ if s.parent != "" {
+ params.Set("parent", s.parent)
+ }
+ if s.payloads != nil {
+ params.Set("payloads", fmt.Sprintf("%v", *s.payloads))
+ }
+ if s.positions != nil {
+ params.Set("positions", fmt.Sprintf("%v", *s.positions))
+ }
+ if s.preference != "" {
+ params.Set("preference", s.preference)
+ }
+ if s.realtime != nil {
+ params.Set("realtime", fmt.Sprintf("%v", *s.realtime))
+ }
+ if s.routing != "" {
+ params.Set("routing", s.routing)
+ }
+ if s.termStatistics != nil {
+ params.Set("term_statistics", fmt.Sprintf("%v", *s.termStatistics))
+ }
+ if s.version != nil {
+ params.Set("version", fmt.Sprintf("%v", s.version))
+ }
+ if s.versionType != "" {
+ params.Set("version_type", s.versionType)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *MultiTermvectorService) Validate() error {
+ var invalid []string
+ if s.index == "" && s.typ != "" {
+ invalid = append(invalid, "Index")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *MultiTermvectorService) Do(ctx context.Context) (*MultiTermvectorResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else if len(s.bodyString) > 0 {
+ body = s.bodyString
+ } else {
+ body = s.Source()
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(MultiTermvectorResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// MultiTermvectorResponse is the response of MultiTermvectorService.Do.
+type MultiTermvectorResponse struct {
+ Docs []*TermvectorsResponse `json:"docs"`
+}
+
+// -- MultiTermvectorItem --
+
+// MultiTermvectorItem is a single document to retrieve via MultiTermvectorService.
+type MultiTermvectorItem struct {
+ index string
+ typ string
+ id string
+ doc interface{}
+ fieldStatistics *bool
+ fields []string
+ perFieldAnalyzer map[string]string
+ offsets *bool
+ parent string
+ payloads *bool
+ positions *bool
+ preference string
+ realtime *bool
+ routing string
+ termStatistics *bool
+}
+
+func NewMultiTermvectorItem() *MultiTermvectorItem {
+ return &MultiTermvectorItem{}
+}
+
+func (s *MultiTermvectorItem) Index(index string) *MultiTermvectorItem {
+ s.index = index
+ return s
+}
+
+func (s *MultiTermvectorItem) Type(typ string) *MultiTermvectorItem {
+ s.typ = typ
+ return s
+}
+
+func (s *MultiTermvectorItem) Id(id string) *MultiTermvectorItem {
+ s.id = id
+ return s
+}
+
+// Doc is the document to analyze.
+func (s *MultiTermvectorItem) Doc(doc interface{}) *MultiTermvectorItem {
+ s.doc = doc
+ return s
+}
+
+// FieldStatistics specifies if document count, sum of document frequencies
+// and sum of total term frequencies should be returned.
+func (s *MultiTermvectorItem) FieldStatistics(fieldStatistics bool) *MultiTermvectorItem {
+ s.fieldStatistics = &fieldStatistics
+ return s
+}
+
+// Fields a list of fields to return.
+func (s *MultiTermvectorItem) Fields(fields ...string) *MultiTermvectorItem {
+ if s.fields == nil {
+ s.fields = make([]string, 0)
+ }
+ s.fields = append(s.fields, fields...)
+ return s
+}
+
+// PerFieldAnalyzer allows to specify a different analyzer than the one
+// at the field.
+func (s *MultiTermvectorItem) PerFieldAnalyzer(perFieldAnalyzer map[string]string) *MultiTermvectorItem {
+ s.perFieldAnalyzer = perFieldAnalyzer
+ return s
+}
+
+// Offsets specifies if term offsets should be returned.
+func (s *MultiTermvectorItem) Offsets(offsets bool) *MultiTermvectorItem {
+ s.offsets = &offsets
+ return s
+}
+
+// Parent id of documents.
+func (s *MultiTermvectorItem) Parent(parent string) *MultiTermvectorItem {
+ s.parent = parent
+ return s
+}
+
+// Payloads specifies if term payloads should be returned.
+func (s *MultiTermvectorItem) Payloads(payloads bool) *MultiTermvectorItem {
+ s.payloads = &payloads
+ return s
+}
+
+// Positions specifies if term positions should be returned.
+func (s *MultiTermvectorItem) Positions(positions bool) *MultiTermvectorItem {
+ s.positions = &positions
+ return s
+}
+
+// Preference specify the node or shard the operation
+// should be performed on (default: random).
+func (s *MultiTermvectorItem) Preference(preference string) *MultiTermvectorItem {
+ s.preference = preference
+ return s
+}
+
+// Realtime specifies if request is real-time as opposed to
+// near-real-time (default: true).
+func (s *MultiTermvectorItem) Realtime(realtime bool) *MultiTermvectorItem {
+ s.realtime = &realtime
+ return s
+}
+
+// Routing is a specific routing value.
+func (s *MultiTermvectorItem) Routing(routing string) *MultiTermvectorItem {
+ s.routing = routing
+ return s
+}
+
+// TermStatistics specifies if total term frequency and document frequency
+// should be returned.
+func (s *MultiTermvectorItem) TermStatistics(termStatistics bool) *MultiTermvectorItem {
+ s.termStatistics = &termStatistics
+ return s
+}
+
+// Source returns the serialized JSON to be sent to Elasticsearch as
+// part of a MultiTermvector.
+func (s *MultiTermvectorItem) Source() interface{} {
+ source := make(map[string]interface{})
+
+ source["_id"] = s.id
+
+ if s.index != "" {
+ source["_index"] = s.index
+ }
+ if s.typ != "" {
+ source["_type"] = s.typ
+ }
+ if s.fields != nil {
+ source["fields"] = s.fields
+ }
+ if s.fieldStatistics != nil {
+ source["field_statistics"] = fmt.Sprintf("%v", *s.fieldStatistics)
+ }
+ if s.offsets != nil {
+ source["offsets"] = s.offsets
+ }
+ if s.parent != "" {
+ source["parent"] = s.parent
+ }
+ if s.payloads != nil {
+ source["payloads"] = fmt.Sprintf("%v", *s.payloads)
+ }
+ if s.positions != nil {
+ source["positions"] = fmt.Sprintf("%v", *s.positions)
+ }
+ if s.preference != "" {
+ source["preference"] = s.preference
+ }
+ if s.realtime != nil {
+ source["realtime"] = fmt.Sprintf("%v", *s.realtime)
+ }
+ if s.routing != "" {
+ source["routing"] = s.routing
+ }
+ if s.termStatistics != nil {
+ source["term_statistics"] = fmt.Sprintf("%v", *s.termStatistics)
+ }
+ if s.doc != nil {
+ source["doc"] = s.doc
+ }
+ if s.perFieldAnalyzer != nil && len(s.perFieldAnalyzer) > 0 {
+ source["per_field_analyzer"] = s.perFieldAnalyzer
+ }
+
+ return source
+}
diff --git a/vendor/github.com/olivere/elastic/mtermvectors_test.go b/vendor/github.com/olivere/elastic/mtermvectors_test.go
new file mode 100644
index 000000000..5f90cd5e2
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/mtermvectors_test.go
@@ -0,0 +1,134 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestMultiTermVectorsValidateAndBuildURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Index string
+ Type string
+ Expected string
+ ExpectValidateFailure bool
+ }{
+ // #0: No index, no type
+ {
+ "",
+ "",
+ "/_mtermvectors",
+ false,
+ },
+ // #1: Index only
+ {
+ "twitter",
+ "",
+ "/twitter/_mtermvectors",
+ false,
+ },
+ // #2: Type without index
+ {
+ "",
+ "doc",
+ "",
+ true,
+ },
+ // #3: Both index and type
+ {
+ "twitter",
+ "doc",
+ "/twitter/doc/_mtermvectors",
+ false,
+ },
+ }
+
+ for i, test := range tests {
+ builder := client.MultiTermVectors().Index(test.Index).Type(test.Type)
+ // Validate
+ err := builder.Validate()
+ if err != nil {
+ if !test.ExpectValidateFailure {
+ t.Errorf("#%d: expected no error, got: %v", i, err)
+ continue
+ }
+ } else {
+ if test.ExpectValidateFailure {
+ t.Errorf("#%d: expected error, got: nil", i)
+ continue
+ }
+ // Build
+ path, _, err := builder.buildURL()
+ if err != nil {
+ t.Errorf("#%d: expected no error, got: %v", i, err)
+ continue
+ }
+ if path != test.Expected {
+ t.Errorf("#%d: expected %q; got: %q", i, test.Expected, path)
+ }
+ }
+ }
+}
+
+func TestMultiTermVectorsWithIds(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Count documents
+ count, err := client.Count(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 3 {
+ t.Errorf("expected Count = %d; got %d", 3, count)
+ }
+
+ // MultiTermVectors by specifying ID by 1 and 3
+ field := "Message"
+ res, err := client.MultiTermVectors().
+ Index(testIndexName).
+ Type("doc").
+ Add(NewMultiTermvectorItem().Index(testIndexName).Type("doc").Id("1").Fields(field)).
+ Add(NewMultiTermvectorItem().Index(testIndexName).Type("doc").Id("3").Fields(field)).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected to return information and statistics")
+ }
+ if res.Docs == nil {
+ t.Fatal("expected result docs to be != nil; got nil")
+ }
+ if len(res.Docs) != 2 {
+ t.Fatalf("expected to have 2 docs; got %d", len(res.Docs))
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/nodes_info.go b/vendor/github.com/olivere/elastic/nodes_info.go
new file mode 100644
index 000000000..9f1422a69
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/nodes_info.go
@@ -0,0 +1,313 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// NodesInfoService allows to retrieve one or more or all of the
+// cluster nodes information.
+// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/6.0/cluster-nodes-info.html.
+type NodesInfoService struct {
+ client *Client
+ pretty bool
+ nodeId []string
+ metric []string
+ flatSettings *bool
+ human *bool
+}
+
+// NewNodesInfoService creates a new NodesInfoService.
+func NewNodesInfoService(client *Client) *NodesInfoService {
+ return &NodesInfoService{
+ client: client,
+ nodeId: []string{"_all"},
+ metric: []string{"_all"},
+ }
+}
+
+// NodeId is a list of node IDs or names to limit the returned information.
+// Use "_local" to return information from the node you're connecting to,
+// leave empty to get information from all nodes.
+func (s *NodesInfoService) NodeId(nodeId ...string) *NodesInfoService {
+ s.nodeId = append(s.nodeId, nodeId...)
+ return s
+}
+
+// Metric is a list of metrics you wish returned. Leave empty to return all.
+// Valid metrics are: settings, os, process, jvm, thread_pool, network,
+// transport, http, and plugins.
+func (s *NodesInfoService) Metric(metric ...string) *NodesInfoService {
+ s.metric = append(s.metric, metric...)
+ return s
+}
+
+// FlatSettings returns settings in flat format (default: false).
+func (s *NodesInfoService) FlatSettings(flatSettings bool) *NodesInfoService {
+ s.flatSettings = &flatSettings
+ return s
+}
+
+// Human indicates whether to return time and byte values in human-readable format.
+func (s *NodesInfoService) Human(human bool) *NodesInfoService {
+ s.human = &human
+ return s
+}
+
+// Pretty indicates whether to indent the returned JSON.
+func (s *NodesInfoService) Pretty(pretty bool) *NodesInfoService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *NodesInfoService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_nodes/{node_id}/{metric}", map[string]string{
+ "node_id": strings.Join(s.nodeId, ","),
+ "metric": strings.Join(s.metric, ","),
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.flatSettings != nil {
+ params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+ }
+ if s.human != nil {
+ params.Set("human", fmt.Sprintf("%v", *s.human))
+ }
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *NodesInfoService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *NodesInfoService) Do(ctx context.Context) (*NodesInfoResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(NodesInfoResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// NodesInfoResponse is the response of NodesInfoService.Do.
+type NodesInfoResponse struct {
+ ClusterName string `json:"cluster_name"`
+ Nodes map[string]*NodesInfoNode `json:"nodes"`
+}
+
+type NodesInfoNode struct {
+ // Name of the node, e.g. "Mister Fear"
+ Name string `json:"name"`
+ // TransportAddress, e.g. "127.0.0.1:9300"
+ TransportAddress string `json:"transport_address"`
+ // Host is the host name, e.g. "macbookair"
+ Host string `json:"host"`
+ // IP is the IP address, e.g. "192.168.1.2"
+ IP string `json:"ip"`
+ // Version is the Elasticsearch version running on the node, e.g. "1.4.3"
+ Version string `json:"version"`
+ // Build is the Elasticsearch build, e.g. "36a29a7"
+ Build string `json:"build"`
+ // HTTPAddress, e.g. "127.0.0.1:9200"
+ HTTPAddress string `json:"http_address"`
+ // HTTPSAddress, e.g. "127.0.0.1:9200"
+ HTTPSAddress string `json:"https_address"`
+
+ // Attributes of the node.
+ Attributes map[string]interface{} `json:"attributes"`
+
+ // Settings of the node, e.g. paths and pidfile.
+ Settings map[string]interface{} `json:"settings"`
+
+ // OS information, e.g. CPU and memory.
+ OS *NodesInfoNodeOS `json:"os"`
+
+ // Process information, e.g. max file descriptors.
+ Process *NodesInfoNodeProcess `json:"process"`
+
+ // JVM information, e.g. VM version.
+ JVM *NodesInfoNodeJVM `json:"jvm"`
+
+ // ThreadPool information.
+ ThreadPool *NodesInfoNodeThreadPool `json:"thread_pool"`
+
+ // Network information.
+ Network *NodesInfoNodeNetwork `json:"network"`
+
+ // Network information.
+ Transport *NodesInfoNodeTransport `json:"transport"`
+
+ // HTTP information.
+ HTTP *NodesInfoNodeHTTP `json:"http"`
+
+ // Plugins information.
+ Plugins []*NodesInfoNodePlugin `json:"plugins"`
+}
+
+type NodesInfoNodeOS struct {
+ RefreshInterval string `json:"refresh_interval"` // e.g. 1s
+ RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000
+ AvailableProcessors int `json:"available_processors"` // e.g. 4
+
+ // CPU information
+ CPU struct {
+ Vendor string `json:"vendor"` // e.g. Intel
+ Model string `json:"model"` // e.g. iMac15,1
+ MHz int `json:"mhz"` // e.g. 3500
+ TotalCores int `json:"total_cores"` // e.g. 4
+ TotalSockets int `json:"total_sockets"` // e.g. 4
+ CoresPerSocket int `json:"cores_per_socket"` // e.g. 16
+ CacheSizeInBytes int `json:"cache_size_in_bytes"` // e.g. 256
+ } `json:"cpu"`
+
+ // Mem information
+ Mem struct {
+ Total string `json:"total"` // e.g. 16gb
+ TotalInBytes int `json:"total_in_bytes"` // e.g. 17179869184
+ } `json:"mem"`
+
+ // Swap information
+ Swap struct {
+ Total string `json:"total"` // e.g. 1gb
+ TotalInBytes int `json:"total_in_bytes"` // e.g. 1073741824
+ } `json:"swap"`
+}
+
+type NodesInfoNodeProcess struct {
+ RefreshInterval string `json:"refresh_interval"` // e.g. 1s
+ RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000
+ ID int `json:"id"` // process id, e.g. 87079
+ MaxFileDescriptors int `json:"max_file_descriptors"` // e.g. 32768
+ Mlockall bool `json:"mlockall"` // e.g. false
+}
+
+type NodesInfoNodeJVM struct {
+ PID int `json:"pid"` // process id, e.g. 87079
+ Version string `json:"version"` // e.g. "1.8.0_25"
+ VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM"
+ VMVersion string `json:"vm_version"` // e.g. "25.25-b02"
+ VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation"
+ StartTime time.Time `json:"start_time"` // e.g. "2015-01-03T15:18:30.982Z"
+ StartTimeInMillis int64 `json:"start_time_in_millis"`
+
+ // Mem information
+ Mem struct {
+ HeapInit string `json:"heap_init"` // e.g. 1gb
+ HeapInitInBytes int `json:"heap_init_in_bytes"`
+ HeapMax string `json:"heap_max"` // e.g. 4gb
+ HeapMaxInBytes int `json:"heap_max_in_bytes"`
+ NonHeapInit string `json:"non_heap_init"` // e.g. 2.4mb
+ NonHeapInitInBytes int `json:"non_heap_init_in_bytes"`
+ NonHeapMax string `json:"non_heap_max"` // e.g. 0b
+ NonHeapMaxInBytes int `json:"non_heap_max_in_bytes"`
+ DirectMax string `json:"direct_max"` // e.g. 4gb
+ DirectMaxInBytes int `json:"direct_max_in_bytes"`
+ } `json:"mem"`
+
+ GCCollectors []string `json:"gc_collectors"` // e.g. ["ParNew"]
+ MemoryPools []string `json:"memory_pools"` // e.g. ["Code Cache", "Metaspace"]
+}
+
+type NodesInfoNodeThreadPool struct {
+ Percolate *NodesInfoNodeThreadPoolSection `json:"percolate"`
+ Bench *NodesInfoNodeThreadPoolSection `json:"bench"`
+ Listener *NodesInfoNodeThreadPoolSection `json:"listener"`
+ Index *NodesInfoNodeThreadPoolSection `json:"index"`
+ Refresh *NodesInfoNodeThreadPoolSection `json:"refresh"`
+ Suggest *NodesInfoNodeThreadPoolSection `json:"suggest"`
+ Generic *NodesInfoNodeThreadPoolSection `json:"generic"`
+ Warmer *NodesInfoNodeThreadPoolSection `json:"warmer"`
+ Search *NodesInfoNodeThreadPoolSection `json:"search"`
+ Flush *NodesInfoNodeThreadPoolSection `json:"flush"`
+ Optimize *NodesInfoNodeThreadPoolSection `json:"optimize"`
+ Management *NodesInfoNodeThreadPoolSection `json:"management"`
+ Get *NodesInfoNodeThreadPoolSection `json:"get"`
+ Merge *NodesInfoNodeThreadPoolSection `json:"merge"`
+ Bulk *NodesInfoNodeThreadPoolSection `json:"bulk"`
+ Snapshot *NodesInfoNodeThreadPoolSection `json:"snapshot"`
+}
+
+type NodesInfoNodeThreadPoolSection struct {
+ Type string `json:"type"` // e.g. fixed
+ Min int `json:"min"` // e.g. 4
+ Max int `json:"max"` // e.g. 4
+ KeepAlive string `json:"keep_alive"` // e.g. "5m"
+ QueueSize interface{} `json:"queue_size"` // e.g. "1k" or -1
+}
+
+type NodesInfoNodeNetwork struct {
+ RefreshInterval string `json:"refresh_interval"` // e.g. 1s
+ RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000
+ PrimaryInterface struct {
+ Address string `json:"address"` // e.g. 192.168.1.2
+ Name string `json:"name"` // e.g. en0
+ MACAddress string `json:"mac_address"` // e.g. 11:22:33:44:55:66
+ } `json:"primary_interface"`
+}
+
+type NodesInfoNodeTransport struct {
+ BoundAddress []string `json:"bound_address"`
+ PublishAddress string `json:"publish_address"`
+ Profiles map[string]*NodesInfoNodeTransportProfile `json:"profiles"`
+}
+
+type NodesInfoNodeTransportProfile struct {
+ BoundAddress []string `json:"bound_address"`
+ PublishAddress string `json:"publish_address"`
+}
+
+type NodesInfoNodeHTTP struct {
+ BoundAddress []string `json:"bound_address"` // e.g. ["127.0.0.1:9200", "[fe80::1]:9200", "[::1]:9200"]
+ PublishAddress string `json:"publish_address"` // e.g. "127.0.0.1:9300"
+ MaxContentLength string `json:"max_content_length"` // e.g. "100mb"
+ MaxContentLengthInBytes int64 `json:"max_content_length_in_bytes"`
+}
+
+type NodesInfoNodePlugin struct {
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Site bool `json:"site"`
+ JVM bool `json:"jvm"`
+ URL string `json:"url"` // e.g. /_plugin/dummy/
+}
diff --git a/vendor/github.com/olivere/elastic/nodes_info_test.go b/vendor/github.com/olivere/elastic/nodes_info_test.go
new file mode 100644
index 000000000..41d997584
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/nodes_info_test.go
@@ -0,0 +1,43 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestNodesInfo(t *testing.T) {
+ client, err := NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ info, err := client.NodesInfo().Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if info == nil {
+ t.Fatal("expected nodes info")
+ }
+
+ if info.ClusterName == "" {
+ t.Errorf("expected cluster name; got: %q", info.ClusterName)
+ }
+ if len(info.Nodes) == 0 {
+ t.Errorf("expected some nodes; got: %d", len(info.Nodes))
+ }
+ for id, node := range info.Nodes {
+ if id == "" {
+ t.Errorf("expected node id; got: %q", id)
+ }
+ if node == nil {
+ t.Fatalf("expected node info; got: %v", node)
+ }
+ if node.IP == "" {
+ t.Errorf("expected node IP; got: %q", node.IP)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/nodes_stats.go b/vendor/github.com/olivere/elastic/nodes_stats.go
new file mode 100644
index 000000000..7c5f0c9d6
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/nodes_stats.go
@@ -0,0 +1,703 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// NodesStatsService returns node statistics.
+// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-nodes-stats.html
+// for details.
+type NodesStatsService struct {
+ client *Client
+ pretty bool
+ metric []string
+ indexMetric []string
+ nodeId []string
+ completionFields []string
+ fielddataFields []string
+ fields []string
+ groups *bool
+ human *bool
+ level string
+ timeout string
+ types []string
+}
+
+// NewNodesStatsService creates a new NodesStatsService.
+func NewNodesStatsService(client *Client) *NodesStatsService {
+ return &NodesStatsService{
+ client: client,
+ }
+}
+
+// Metric limits the information returned to the specified metrics.
+func (s *NodesStatsService) Metric(metric ...string) *NodesStatsService {
+ s.metric = append(s.metric, metric...)
+ return s
+}
+
+// IndexMetric limits the information returned for `indices` metric
+// to the specific index metrics. Isn't used if `indices` (or `all`)
+// metric isn't specified..
+func (s *NodesStatsService) IndexMetric(indexMetric ...string) *NodesStatsService {
+ s.indexMetric = append(s.indexMetric, indexMetric...)
+ return s
+}
+
+// NodeId is a list of node IDs or names to limit the returned information;
+// use `_local` to return information from the node you're connecting to,
+// leave empty to get information from all nodes.
+func (s *NodesStatsService) NodeId(nodeId ...string) *NodesStatsService {
+ s.nodeId = append(s.nodeId, nodeId...)
+ return s
+}
+
+// CompletionFields is a list of fields for `fielddata` and `suggest`
+// index metric (supports wildcards).
+func (s *NodesStatsService) CompletionFields(completionFields ...string) *NodesStatsService {
+ s.completionFields = append(s.completionFields, completionFields...)
+ return s
+}
+
+// FielddataFields is a list of fields for `fielddata` index metric (supports wildcards).
+func (s *NodesStatsService) FielddataFields(fielddataFields ...string) *NodesStatsService {
+ s.fielddataFields = append(s.fielddataFields, fielddataFields...)
+ return s
+}
+
+// Fields is a list of fields for `fielddata` and `completion` index metric (supports wildcards).
+func (s *NodesStatsService) Fields(fields ...string) *NodesStatsService {
+ s.fields = append(s.fields, fields...)
+ return s
+}
+
+// Groups is a list of search groups for `search` index metric.
+func (s *NodesStatsService) Groups(groups bool) *NodesStatsService {
+ s.groups = &groups
+ return s
+}
+
+// Human indicates whether to return time and byte values in human-readable format.
+func (s *NodesStatsService) Human(human bool) *NodesStatsService {
+ s.human = &human
+ return s
+}
+
+// Level specifies whether to return indices stats aggregated at node, index or shard level.
+func (s *NodesStatsService) Level(level string) *NodesStatsService {
+ s.level = level
+ return s
+}
+
+// Timeout specifies an explicit operation timeout.
+func (s *NodesStatsService) Timeout(timeout string) *NodesStatsService {
+ s.timeout = timeout
+ return s
+}
+
+// Types a list of document types for the `indexing` index metric.
+func (s *NodesStatsService) Types(types ...string) *NodesStatsService {
+ s.types = append(s.types, types...)
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *NodesStatsService) Pretty(pretty bool) *NodesStatsService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *NodesStatsService) buildURL() (string, url.Values, error) {
+ var err error
+ var path string
+
+ if len(s.nodeId) > 0 && len(s.metric) > 0 && len(s.indexMetric) > 0 {
+ path, err = uritemplates.Expand("/_nodes/{node_id}/stats/{metric}/{index_metric}", map[string]string{
+ "index_metric": strings.Join(s.indexMetric, ","),
+ "node_id": strings.Join(s.nodeId, ","),
+ "metric": strings.Join(s.metric, ","),
+ })
+ } else if len(s.nodeId) > 0 && len(s.metric) > 0 && len(s.indexMetric) == 0 {
+ path, err = uritemplates.Expand("/_nodes/{node_id}/stats/{metric}", map[string]string{
+ "node_id": strings.Join(s.nodeId, ","),
+ "metric": strings.Join(s.metric, ","),
+ })
+ } else if len(s.nodeId) > 0 && len(s.metric) == 0 && len(s.indexMetric) > 0 {
+ path, err = uritemplates.Expand("/_nodes/{node_id}/stats/_all/{index_metric}", map[string]string{
+ "index_metric": strings.Join(s.indexMetric, ","),
+ "node_id": strings.Join(s.nodeId, ","),
+ })
+ } else if len(s.nodeId) > 0 && len(s.metric) == 0 && len(s.indexMetric) == 0 {
+ path, err = uritemplates.Expand("/_nodes/{node_id}/stats", map[string]string{
+ "node_id": strings.Join(s.nodeId, ","),
+ })
+ } else if len(s.nodeId) == 0 && len(s.metric) > 0 && len(s.indexMetric) > 0 {
+ path, err = uritemplates.Expand("/_nodes/stats/{metric}/{index_metric}", map[string]string{
+ "index_metric": strings.Join(s.indexMetric, ","),
+ "metric": strings.Join(s.metric, ","),
+ })
+ } else if len(s.nodeId) == 0 && len(s.metric) > 0 && len(s.indexMetric) == 0 {
+ path, err = uritemplates.Expand("/_nodes/stats/{metric}", map[string]string{
+ "metric": strings.Join(s.metric, ","),
+ })
+ } else if len(s.nodeId) == 0 && len(s.metric) == 0 && len(s.indexMetric) > 0 {
+ path, err = uritemplates.Expand("/_nodes/stats/_all/{index_metric}", map[string]string{
+ "index_metric": strings.Join(s.indexMetric, ","),
+ })
+ } else { // if len(s.nodeId) == 0 && len(s.metric) == 0 && len(s.indexMetric) == 0 {
+ path = "/_nodes/stats"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if len(s.completionFields) > 0 {
+ params.Set("completion_fields", strings.Join(s.completionFields, ","))
+ }
+ if len(s.fielddataFields) > 0 {
+ params.Set("fielddata_fields", strings.Join(s.fielddataFields, ","))
+ }
+ if len(s.fields) > 0 {
+ params.Set("fields", strings.Join(s.fields, ","))
+ }
+ if s.groups != nil {
+ params.Set("groups", fmt.Sprintf("%v", *s.groups))
+ }
+ if s.human != nil {
+ params.Set("human", fmt.Sprintf("%v", *s.human))
+ }
+ if s.level != "" {
+ params.Set("level", s.level)
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if len(s.types) > 0 {
+ params.Set("types", strings.Join(s.types, ","))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *NodesStatsService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *NodesStatsService) Do(ctx context.Context) (*NodesStatsResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(NodesStatsResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// NodesStatsResponse is the response of NodesStatsService.Do.
+type NodesStatsResponse struct {
+ ClusterName string `json:"cluster_name"`
+ Nodes map[string]*NodesStatsNode `json:"nodes"`
+}
+
+type NodesStatsNode struct {
+ // Timestamp when these stats we're gathered.
+ Timestamp int64 `json:"timestamp"`
+ // Name of the node, e.g. "Mister Fear"
+ Name string `json:"name"`
+ // TransportAddress, e.g. "127.0.0.1:9300"
+ TransportAddress string `json:"transport_address"`
+ // Host is the host name, e.g. "macbookair"
+ Host string `json:"host"`
+ // IP is an IP address, e.g. "192.168.1.2"
+ IP string `json:"ip"`
+ // Roles is a list of the roles of the node, e.g. master, data, ingest.
+ Roles []string `json:"roles"`
+
+ // Attributes of the node.
+ Attributes map[string]interface{} `json:"attributes"`
+
+ // Indices returns index information.
+ Indices *NodesStatsIndex `json:"indices"`
+
+ // OS information, e.g. CPU and memory.
+ OS *NodesStatsNodeOS `json:"os"`
+
+ // Process information, e.g. max file descriptors.
+ Process *NodesStatsNodeProcess `json:"process"`
+
+ // JVM information, e.g. VM version.
+ JVM *NodesStatsNodeJVM `json:"jvm"`
+
+ // ThreadPool information.
+ ThreadPool map[string]*NodesStatsNodeThreadPool `json:"thread_pool"`
+
+ // FS returns information about the filesystem.
+ FS *NodesStatsNodeFS `json:"fs"`
+
+ // Network information.
+ Transport *NodesStatsNodeTransport `json:"transport"`
+
+ // HTTP information.
+ HTTP *NodesStatsNodeHTTP `json:"http"`
+
+ // Breaker contains information about circuit breakers.
+ Breaker map[string]*NodesStatsBreaker `json:"breakers"`
+
+ // ScriptStats information.
+ ScriptStats *NodesStatsScriptStats `json:"script"`
+
+ // Discovery information.
+ Discovery *NodesStatsDiscovery `json:"discovery"`
+
+ // Ingest information
+ Ingest *NodesStatsIngest `json:"ingest"`
+}
+
+type NodesStatsIndex struct {
+ Docs *NodesStatsDocsStats `json:"docs"`
+ Store *NodesStatsStoreStats `json:"store"`
+ Indexing *NodesStatsIndexingStats `json:"indexing"`
+ Get *NodesStatsGetStats `json:"get"`
+ Search *NodesStatsSearchStats `json:"search"`
+ Merges *NodesStatsMergeStats `json:"merges"`
+ Refresh *NodesStatsRefreshStats `json:"refresh"`
+ Flush *NodesStatsFlushStats `json:"flush"`
+ Warmer *NodesStatsWarmerStats `json:"warmer"`
+ QueryCache *NodesStatsQueryCacheStats `json:"query_cache"`
+ Fielddata *NodesStatsFielddataStats `json:"fielddata"`
+ Percolate *NodesStatsPercolateStats `json:"percolate"`
+ Completion *NodesStatsCompletionStats `json:"completion"`
+ Segments *NodesStatsSegmentsStats `json:"segments"`
+ Translog *NodesStatsTranslogStats `json:"translog"`
+ Suggest *NodesStatsSuggestStats `json:"suggest"`
+ RequestCache *NodesStatsRequestCacheStats `json:"request_cache"`
+ Recovery NodesStatsRecoveryStats `json:"recovery"`
+
+ Indices map[string]*NodesStatsIndex `json:"indices"` // for level=indices
+ Shards map[string]*NodesStatsIndex `json:"shards"` // for level=shards
+}
+
+type NodesStatsDocsStats struct {
+ Count int64 `json:"count"`
+ Deleted int64 `json:"deleted"`
+}
+
+type NodesStatsStoreStats struct {
+ Size string `json:"size"`
+ SizeInBytes int64 `json:"size_in_bytes"`
+}
+
+type NodesStatsIndexingStats struct {
+ IndexTotal int64 `json:"index_total"`
+ IndexTime string `json:"index_time"`
+ IndexTimeInMillis int64 `json:"index_time_in_millis"`
+ IndexCurrent int64 `json:"index_current"`
+ IndexFailed int64 `json:"index_failed"`
+ DeleteTotal int64 `json:"delete_total"`
+ DeleteTime string `json:"delete_time"`
+ DeleteTimeInMillis int64 `json:"delete_time_in_millis"`
+ DeleteCurrent int64 `json:"delete_current"`
+ NoopUpdateTotal int64 `json:"noop_update_total"`
+
+ Types map[string]*NodesStatsIndexingStats `json:"types"` // stats for individual types
+}
+
+type NodesStatsGetStats struct {
+ Total int64 `json:"total"`
+ Time string `json:"get_time"`
+ TimeInMillis int64 `json:"time_in_millis"`
+ Exists int64 `json:"exists"`
+ ExistsTime string `json:"exists_time"`
+ ExistsTimeInMillis int64 `json:"exists_in_millis"`
+ Missing int64 `json:"missing"`
+ MissingTime string `json:"missing_time"`
+ MissingTimeInMillis int64 `json:"missing_in_millis"`
+ Current int64 `json:"current"`
+}
+
+type NodesStatsSearchStats struct {
+ OpenContexts int64 `json:"open_contexts"`
+ QueryTotal int64 `json:"query_total"`
+ QueryTime string `json:"query_time"`
+ QueryTimeInMillis int64 `json:"query_time_in_millis"`
+ QueryCurrent int64 `json:"query_current"`
+ FetchTotal int64 `json:"fetch_total"`
+ FetchTime string `json:"fetch_time"`
+ FetchTimeInMillis int64 `json:"fetch_time_in_millis"`
+ FetchCurrent int64 `json:"fetch_current"`
+ ScrollTotal int64 `json:"scroll_total"`
+ ScrollTime string `json:"scroll_time"`
+ ScrollTimeInMillis int64 `json:"scroll_time_in_millis"`
+ ScrollCurrent int64 `json:"scroll_current"`
+
+ Groups map[string]*NodesStatsSearchStats `json:"groups"` // stats for individual groups
+}
+
+type NodesStatsMergeStats struct {
+ Current int64 `json:"current"`
+ CurrentDocs int64 `json:"current_docs"`
+ CurrentSize string `json:"current_size"`
+ CurrentSizeInBytes int64 `json:"current_size_in_bytes"`
+ Total int64 `json:"total"`
+ TotalTime string `json:"total_time"`
+ TotalTimeInMillis int64 `json:"total_time_in_millis"`
+ TotalDocs int64 `json:"total_docs"`
+ TotalSize string `json:"total_size"`
+ TotalSizeInBytes int64 `json:"total_size_in_bytes"`
+ TotalStoppedTime string `json:"total_stopped_time"`
+ TotalStoppedTimeInMillis int64 `json:"total_stopped_time_in_millis"`
+ TotalThrottledTime string `json:"total_throttled_time"`
+ TotalThrottledTimeInMillis int64 `json:"total_throttled_time_in_millis"`
+ TotalThrottleBytes string `json:"total_auto_throttle"`
+ TotalThrottleBytesInBytes int64 `json:"total_auto_throttle_in_bytes"`
+}
+
+type NodesStatsRefreshStats struct {
+ Total int64 `json:"total"`
+ TotalTime string `json:"total_time"`
+ TotalTimeInMillis int64 `json:"total_time_in_millis"`
+}
+
+type NodesStatsFlushStats struct {
+ Total int64 `json:"total"`
+ TotalTime string `json:"total_time"`
+ TotalTimeInMillis int64 `json:"total_time_in_millis"`
+}
+
+type NodesStatsWarmerStats struct {
+ Current int64 `json:"current"`
+ Total int64 `json:"total"`
+ TotalTime string `json:"total_time"`
+ TotalTimeInMillis int64 `json:"total_time_in_millis"`
+}
+
+type NodesStatsQueryCacheStats struct {
+ MemorySize string `json:"memory_size"`
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
+ TotalCount int64 `json:"total_count"`
+ HitCount int64 `json:"hit_count"`
+ MissCount int64 `json:"miss_count"`
+ CacheSize int64 `json:"cache_size"`
+ CacheCount int64 `json:"cache_count"`
+ Evictions int64 `json:"evictions"`
+}
+
+type NodesStatsFielddataStats struct {
+ MemorySize string `json:"memory_size"`
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
+ Evictions int64 `json:"evictions"`
+ Fields map[string]struct {
+ MemorySize string `json:"memory_size"`
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
+ } `json:"fields"`
+}
+
+type NodesStatsPercolateStats struct {
+ Total int64 `json:"total"`
+ Time string `json:"time"`
+ TimeInMillis int64 `json:"time_in_millis"`
+ Current int64 `json:"current"`
+ MemorySize string `json:"memory_size"`
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
+ Queries int64 `json:"queries"`
+}
+
+type NodesStatsCompletionStats struct {
+ Size string `json:"size"`
+ SizeInBytes int64 `json:"size_in_bytes"`
+ Fields map[string]struct {
+ Size string `json:"size"`
+ SizeInBytes int64 `json:"size_in_bytes"`
+ } `json:"fields"`
+}
+
+type NodesStatsSegmentsStats struct {
+ Count int64 `json:"count"`
+ Memory string `json:"memory"`
+ MemoryInBytes int64 `json:"memory_in_bytes"`
+ TermsMemory string `json:"terms_memory"`
+ TermsMemoryInBytes int64 `json:"terms_memory_in_bytes"`
+ StoredFieldsMemory string `json:"stored_fields_memory"`
+ StoredFieldsMemoryInBytes int64 `json:"stored_fields_memory_in_bytes"`
+ TermVectorsMemory string `json:"term_vectors_memory"`
+ TermVectorsMemoryInBytes int64 `json:"term_vectors_memory_in_bytes"`
+ NormsMemory string `json:"norms_memory"`
+ NormsMemoryInBytes int64 `json:"norms_memory_in_bytes"`
+ DocValuesMemory string `json:"doc_values_memory"`
+ DocValuesMemoryInBytes int64 `json:"doc_values_memory_in_bytes"`
+ IndexWriterMemory string `json:"index_writer_memory"`
+ IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"`
+ IndexWriterMaxMemory string `json:"index_writer_max_memory"`
+ IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes"`
+ VersionMapMemory string `json:"version_map_memory"`
+ VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"`
+ FixedBitSetMemory string `json:"fixed_bit_set"` // not a typo
+ FixedBitSetMemoryInBytes int64 `json:"fixed_bit_set_memory_in_bytes"`
+}
+
+type NodesStatsTranslogStats struct {
+ Operations int64 `json:"operations"`
+ Size string `json:"size"`
+ SizeInBytes int64 `json:"size_in_bytes"`
+}
+
+type NodesStatsSuggestStats struct {
+ Total int64 `json:"total"`
+ TotalTime string `json:"total_time"`
+ TotalTimeInMillis int64 `json:"total_time_in_millis"`
+ Current int64 `json:"current"`
+}
+
+type NodesStatsRequestCacheStats struct {
+ MemorySize string `json:"memory_size"`
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
+ Evictions int64 `json:"evictions"`
+ HitCount int64 `json:"hit_count"`
+ MissCount int64 `json:"miss_count"`
+}
+
+type NodesStatsRecoveryStats struct {
+ CurrentAsSource int `json:"current_as_source"`
+ CurrentAsTarget int `json:"current_as_target"`
+}
+
+type NodesStatsNodeOS struct {
+ Timestamp int64 `json:"timestamp"`
+ CPU *NodesStatsNodeOSCPU `json:"cpu"`
+ Mem *NodesStatsNodeOSMem `json:"mem"`
+ Swap *NodesStatsNodeOSSwap `json:"swap"`
+}
+
+type NodesStatsNodeOSCPU struct {
+ Percent int `json:"percent"`
+ LoadAverage map[string]float64 `json:"load_average"` // keys are: 1m, 5m, and 15m
+}
+
+type NodesStatsNodeOSMem struct {
+ Total string `json:"total"`
+ TotalInBytes int64 `json:"total_in_bytes"`
+ Free string `json:"free"`
+ FreeInBytes int64 `json:"free_in_bytes"`
+ Used string `json:"used"`
+ UsedInBytes int64 `json:"used_in_bytes"`
+ FreePercent int `json:"free_percent"`
+ UsedPercent int `json:"used_percent"`
+}
+
+type NodesStatsNodeOSSwap struct {
+ Total string `json:"total"`
+ TotalInBytes int64 `json:"total_in_bytes"`
+ Free string `json:"free"`
+ FreeInBytes int64 `json:"free_in_bytes"`
+ Used string `json:"used"`
+ UsedInBytes int64 `json:"used_in_bytes"`
+}
+
+type NodesStatsNodeProcess struct {
+ Timestamp int64 `json:"timestamp"`
+ OpenFileDescriptors int64 `json:"open_file_descriptors"`
+ MaxFileDescriptors int64 `json:"max_file_descriptors"`
+ CPU struct {
+ Percent int `json:"percent"`
+ Total string `json:"total"`
+ TotalInMillis int64 `json:"total_in_millis"`
+ } `json:"cpu"`
+ Mem struct {
+ TotalVirtual string `json:"total_virtual"`
+ TotalVirtualInBytes int64 `json:"total_virtual_in_bytes"`
+ } `json:"mem"`
+}
+
+type NodesStatsNodeJVM struct {
+ Timestamp int64 `json:"timestamp"`
+ Uptime string `json:"uptime"`
+ UptimeInMillis int64 `json:"uptime_in_millis"`
+ Mem *NodesStatsNodeJVMMem `json:"mem"`
+ Threads *NodesStatsNodeJVMThreads `json:"threads"`
+ GC *NodesStatsNodeJVMGC `json:"gc"`
+ BufferPools map[string]*NodesStatsNodeJVMBufferPool `json:"buffer_pools"`
+ Classes *NodesStatsNodeJVMClasses `json:"classes"`
+}
+
+type NodesStatsNodeJVMMem struct {
+ HeapUsed string `json:"heap_used"`
+ HeapUsedInBytes int64 `json:"heap_used_in_bytes"`
+ HeapUsedPercent int `json:"heap_used_percent"`
+ HeapCommitted string `json:"heap_committed"`
+ HeapCommittedInBytes int64 `json:"heap_committed_in_bytes"`
+ HeapMax string `json:"heap_max"`
+ HeapMaxInBytes int64 `json:"heap_max_in_bytes"`
+ NonHeapUsed string `json:"non_heap_used"`
+ NonHeapUsedInBytes int64 `json:"non_heap_used_in_bytes"`
+ NonHeapCommitted string `json:"non_heap_committed"`
+ NonHeapCommittedInBytes int64 `json:"non_heap_committed_in_bytes"`
+ Pools map[string]struct {
+ Used string `json:"used"`
+ UsedInBytes int64 `json:"used_in_bytes"`
+ Max string `json:"max"`
+ MaxInBytes int64 `json:"max_in_bytes"`
+ PeakUsed string `json:"peak_used"`
+ PeakUsedInBytes int64 `json:"peak_used_in_bytes"`
+ PeakMax string `json:"peak_max"`
+ PeakMaxInBytes int64 `json:"peak_max_in_bytes"`
+ } `json:"pools"`
+}
+
+type NodesStatsNodeJVMThreads struct {
+ Count int64 `json:"count"`
+ PeakCount int64 `json:"peak_count"`
+}
+
+type NodesStatsNodeJVMGC struct {
+ Collectors map[string]*NodesStatsNodeJVMGCCollector `json:"collectors"`
+}
+
+type NodesStatsNodeJVMGCCollector struct {
+ CollectionCount int64 `json:"collection_count"`
+ CollectionTime string `json:"collection_time"`
+ CollectionTimeInMillis int64 `json:"collection_time_in_millis"`
+}
+
+type NodesStatsNodeJVMBufferPool struct {
+ Count int64 `json:"count"`
+ TotalCapacity string `json:"total_capacity"`
+ TotalCapacityInBytes int64 `json:"total_capacity_in_bytes"`
+}
+
+type NodesStatsNodeJVMClasses struct {
+ CurrentLoadedCount int64 `json:"current_loaded_count"`
+ TotalLoadedCount int64 `json:"total_loaded_count"`
+ TotalUnloadedCount int64 `json:"total_unloaded_count"`
+}
+
+type NodesStatsNodeThreadPool struct {
+ Threads int `json:"threads"`
+ Queue int `json:"queue"`
+ Active int `json:"active"`
+ Rejected int64 `json:"rejected"`
+ Largest int `json:"largest"`
+ Completed int64 `json:"completed"`
+}
+
+type NodesStatsNodeFS struct {
+ Timestamp int64 `json:"timestamp"`
+ Total *NodesStatsNodeFSEntry `json:"total"`
+ Data []*NodesStatsNodeFSEntry `json:"data"`
+ IOStats *NodesStatsNodeFSIOStats `json:"io_stats"`
+}
+
+type NodesStatsNodeFSEntry struct {
+ Path string `json:"path"`
+ Mount string `json:"mount"`
+ Type string `json:"type"`
+ Total string `json:"total"`
+ TotalInBytes int64 `json:"total_in_bytes"`
+ Free string `json:"free"`
+ FreeInBytes int64 `json:"free_in_bytes"`
+ Available string `json:"available"`
+ AvailableInBytes int64 `json:"available_in_bytes"`
+ Spins string `json:"spins"`
+}
+
+type NodesStatsNodeFSIOStats struct {
+ Devices []*NodesStatsNodeFSIOStatsEntry `json:"devices"`
+ Total *NodesStatsNodeFSIOStatsEntry `json:"total"`
+}
+
+type NodesStatsNodeFSIOStatsEntry struct {
+ DeviceName string `json:"device_name"`
+ Operations int64 `json:"operations"`
+ ReadOperations int64 `json:"read_operations"`
+ WriteOperations int64 `json:"write_operations"`
+ ReadKilobytes int64 `json:"read_kilobytes"`
+ WriteKilobytes int64 `json:"write_kilobytes"`
+}
+
+type NodesStatsNodeTransport struct {
+ ServerOpen int `json:"server_open"`
+ RxCount int64 `json:"rx_count"`
+ RxSize string `json:"rx_size"`
+ RxSizeInBytes int64 `json:"rx_size_in_bytes"`
+ TxCount int64 `json:"tx_count"`
+ TxSize string `json:"tx_size"`
+ TxSizeInBytes int64 `json:"tx_size_in_bytes"`
+}
+
+type NodesStatsNodeHTTP struct {
+ CurrentOpen int `json:"current_open"`
+ TotalOpened int `json:"total_opened"`
+}
+
+type NodesStatsBreaker struct {
+ LimitSize string `json:"limit_size"`
+ LimitSizeInBytes int64 `json:"limit_size_in_bytes"`
+ EstimatedSize string `json:"estimated_size"`
+ EstimatedSizeInBytes int64 `json:"estimated_size_in_bytes"`
+ Overhead float64 `json:"overhead"`
+ Tripped int64 `json:"tripped"`
+}
+
+type NodesStatsScriptStats struct {
+ Compilations int64 `json:"compilations"`
+ CacheEvictions int64 `json:"cache_evictions"`
+}
+
+type NodesStatsDiscovery struct {
+ ClusterStateQueue *NodesStatsDiscoveryStats `json:"cluster_state_queue"`
+}
+
+type NodesStatsDiscoveryStats struct {
+ Total int64 `json:"total"`
+ Pending int64 `json:"pending"`
+ Committed int64 `json:"committed"`
+}
+
+type NodesStatsIngest struct {
+ Total *NodesStatsIngestStats `json:"total"`
+ Pipelines interface{} `json:"pipelines"`
+}
+
+type NodesStatsIngestStats struct {
+ Count int64 `json:"count"`
+ Time string `json:"time"`
+ TimeInMillis int64 `json:"time_in_millis"`
+ Current int64 `json:"current"`
+ Failed int64 `json:"failed"`
+}
diff --git a/vendor/github.com/olivere/elastic/nodes_stats_test.go b/vendor/github.com/olivere/elastic/nodes_stats_test.go
new file mode 100644
index 000000000..4b249a2f4
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/nodes_stats_test.go
@@ -0,0 +1,138 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestNodesStats(t *testing.T) {
+ client, err := NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ info, err := client.NodesStats().Human(true).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if info == nil {
+ t.Fatal("expected nodes stats")
+ }
+
+ if info.ClusterName == "" {
+ t.Errorf("expected cluster name; got: %q", info.ClusterName)
+ }
+ if len(info.Nodes) == 0 {
+ t.Errorf("expected some nodes; got: %d", len(info.Nodes))
+ }
+ for id, node := range info.Nodes {
+ if id == "" {
+ t.Errorf("expected node id; got: %q", id)
+ }
+ if node == nil {
+ t.Fatalf("expected node info; got: %v", node)
+ }
+ if len(node.Name) == 0 {
+ t.Errorf("expected node name; got: %q", node.Name)
+ }
+ if node.Timestamp == 0 {
+ t.Errorf("expected timestamp; got: %q", node.Timestamp)
+ }
+ }
+}
+
+func TestNodesStatsBuildURL(t *testing.T) {
+ tests := []struct {
+ NodeIds []string
+ Metrics []string
+ IndexMetrics []string
+ Expected string
+ }{
+ {
+ NodeIds: nil,
+ Metrics: nil,
+ IndexMetrics: nil,
+ Expected: "/_nodes/stats",
+ },
+ {
+ NodeIds: []string{"node1"},
+ Metrics: nil,
+ IndexMetrics: nil,
+ Expected: "/_nodes/node1/stats",
+ },
+ {
+ NodeIds: []string{"node1", "node2"},
+ Metrics: nil,
+ IndexMetrics: nil,
+ Expected: "/_nodes/node1%2Cnode2/stats",
+ },
+ {
+ NodeIds: nil,
+ Metrics: []string{"indices"},
+ IndexMetrics: nil,
+ Expected: "/_nodes/stats/indices",
+ },
+ {
+ NodeIds: nil,
+ Metrics: []string{"indices", "jvm"},
+ IndexMetrics: nil,
+ Expected: "/_nodes/stats/indices%2Cjvm",
+ },
+ {
+ NodeIds: []string{"node1"},
+ Metrics: []string{"indices", "jvm"},
+ IndexMetrics: nil,
+ Expected: "/_nodes/node1/stats/indices%2Cjvm",
+ },
+ {
+ NodeIds: nil,
+ Metrics: nil,
+ IndexMetrics: []string{"fielddata"},
+ Expected: "/_nodes/stats/_all/fielddata",
+ },
+ {
+ NodeIds: []string{"node1"},
+ Metrics: nil,
+ IndexMetrics: []string{"fielddata"},
+ Expected: "/_nodes/node1/stats/_all/fielddata",
+ },
+ {
+ NodeIds: nil,
+ Metrics: []string{"indices"},
+ IndexMetrics: []string{"fielddata"},
+ Expected: "/_nodes/stats/indices/fielddata",
+ },
+ {
+ NodeIds: []string{"node1"},
+ Metrics: []string{"indices"},
+ IndexMetrics: []string{"fielddata"},
+ Expected: "/_nodes/node1/stats/indices/fielddata",
+ },
+ {
+ NodeIds: []string{"node1", "node2"},
+ Metrics: []string{"indices", "jvm"},
+ IndexMetrics: []string{"fielddata", "docs"},
+ Expected: "/_nodes/node1%2Cnode2/stats/indices%2Cjvm/fielddata%2Cdocs",
+ },
+ }
+
+ client, err := NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, tt := range tests {
+ svc := client.NodesStats().NodeId(tt.NodeIds...).Metric(tt.Metrics...).IndexMetric(tt.IndexMetrics...)
+ path, _, err := svc.buildURL()
+ if err != nil {
+ t.Errorf("#%d: expected no error, got %v", i, err)
+ } else {
+ if want, have := tt.Expected, path; want != have {
+ t.Errorf("#%d: expected %q, got %q", i, want, have)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/percolate_test.go b/vendor/github.com/olivere/elastic/percolate_test.go
new file mode 100644
index 000000000..3b3b2efb7
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/percolate_test.go
@@ -0,0 +1,68 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestPercolate(t *testing.T) {
+ //client := setupTestClientAndCreateIndex(t, SetErrorLog(log.New(os.Stdout, "", 0)))
+ //client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+ client := setupTestClientAndCreateIndex(t)
+
+ // Create query index
+ createQueryIndex, err := client.CreateIndex(testQueryIndex).Body(testQueryMapping).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if createQueryIndex == nil {
+ t.Errorf("expected result to be != nil; got: %v", createQueryIndex)
+ }
+
+ // Add a document
+ _, err = client.Index().
+ Index(testQueryIndex).
+ Type("doc").
+ Id("1").
+ BodyJson(`{"query":{"match":{"message":"bonsai tree"}}}`).
+ Refresh("wait_for").
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Percolate should return our registered query
+ pq := NewPercolatorQuery().
+ Field("query").
+ DocumentType("doc").
+ Document(doctype{Message: "A new bonsai tree in the office"})
+ res, err := client.Search(testQueryIndex).Type("doc").Query(pq).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected results != nil; got nil")
+ }
+ if res.Hits == nil {
+ t.Fatal("expected SearchResult.Hits != nil; got nil")
+ }
+ if got, want := res.Hits.TotalHits, int64(1); got != want {
+ t.Fatalf("expected SearchResult.Hits.TotalHits = %d; got %d", want, got)
+ }
+ if got, want := len(res.Hits.Hits), 1; got != want {
+ t.Fatalf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, got)
+ }
+ hit := res.Hits.Hits[0]
+ if hit.Index != testQueryIndex {
+ t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testQueryIndex, hit.Index)
+ }
+ got := string(*hit.Source)
+ expected := `{"query":{"match":{"message":"bonsai tree"}}}`
+ if got != expected {
+ t.Fatalf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/ping.go b/vendor/github.com/olivere/elastic/ping.go
new file mode 100644
index 000000000..5c2d34f00
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/ping.go
@@ -0,0 +1,127 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+ "net/url"
+)
+
+// PingService checks if an Elasticsearch server on a given URL is alive.
+// When asked for, it can also return various information about the
+// Elasticsearch server, e.g. the Elasticsearch version number.
+//
+// Ping simply starts a HTTP GET request to the URL of the server.
+// If the server responds with HTTP Status code 200 OK, the server is alive.
+type PingService struct {
+ client *Client
+ url string
+ timeout string
+ httpHeadOnly bool
+ pretty bool
+}
+
+// PingResult is the result returned from querying the Elasticsearch server.
+type PingResult struct {
+ Name string `json:"name"`
+ ClusterName string `json:"cluster_name"`
+ Version struct {
+ Number string `json:"number"`
+ BuildHash string `json:"build_hash"`
+ BuildTimestamp string `json:"build_timestamp"`
+ BuildSnapshot bool `json:"build_snapshot"`
+ LuceneVersion string `json:"lucene_version"`
+ } `json:"version"`
+ TagLine string `json:"tagline"`
+}
+
+func NewPingService(client *Client) *PingService {
+ return &PingService{
+ client: client,
+ url: DefaultURL,
+ httpHeadOnly: false,
+ pretty: false,
+ }
+}
+
+func (s *PingService) URL(url string) *PingService {
+ s.url = url
+ return s
+}
+
+func (s *PingService) Timeout(timeout string) *PingService {
+ s.timeout = timeout
+ return s
+}
+
+// HeadOnly makes the service to only return the status code in Do;
+// the PingResult will be nil.
+func (s *PingService) HttpHeadOnly(httpHeadOnly bool) *PingService {
+ s.httpHeadOnly = httpHeadOnly
+ return s
+}
+
+func (s *PingService) Pretty(pretty bool) *PingService {
+ s.pretty = pretty
+ return s
+}
+
+// Do returns the PingResult, the HTTP status code of the Elasticsearch
+// server, and an error.
+func (s *PingService) Do(ctx context.Context) (*PingResult, int, error) {
+ s.client.mu.RLock()
+ basicAuth := s.client.basicAuth
+ basicAuthUsername := s.client.basicAuthUsername
+ basicAuthPassword := s.client.basicAuthPassword
+ s.client.mu.RUnlock()
+
+ url_ := s.url + "/"
+
+ params := make(url.Values)
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if len(params) > 0 {
+ url_ += "?" + params.Encode()
+ }
+
+ var method string
+ if s.httpHeadOnly {
+ method = "HEAD"
+ } else {
+ method = "GET"
+ }
+
+ // Notice: This service must NOT use PerformRequest!
+ req, err := NewRequest(method, url_)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ if basicAuth {
+ req.SetBasicAuth(basicAuthUsername, basicAuthPassword)
+ }
+
+ res, err := s.client.c.Do((*http.Request)(req).WithContext(ctx))
+ if err != nil {
+ return nil, 0, err
+ }
+ defer res.Body.Close()
+
+ var ret *PingResult
+ if !s.httpHeadOnly {
+ ret = new(PingResult)
+ if err := json.NewDecoder(res.Body).Decode(ret); err != nil {
+ return nil, res.StatusCode, err
+ }
+ }
+
+ return ret, res.StatusCode, nil
+}
diff --git a/vendor/github.com/olivere/elastic/ping_test.go b/vendor/github.com/olivere/elastic/ping_test.go
new file mode 100644
index 000000000..273913803
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/ping_test.go
@@ -0,0 +1,65 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "net/http"
+ "testing"
+)
+
+func TestPingGet(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ res, code, err := client.Ping(DefaultURL).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if code != http.StatusOK {
+ t.Errorf("expected status code = %d; got %d", http.StatusOK, code)
+ }
+ if res == nil {
+ t.Fatalf("expected to return result, got: %v", res)
+ }
+ if res.Name == "" {
+ t.Errorf("expected Name != \"\"; got %q", res.Name)
+ }
+ if res.Version.Number == "" {
+ t.Errorf("expected Version.Number != \"\"; got %q", res.Version.Number)
+ }
+}
+
+func TestPingHead(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ res, code, err := client.Ping(DefaultURL).HttpHeadOnly(true).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if code != http.StatusOK {
+ t.Errorf("expected status code = %d; got %d", http.StatusOK, code)
+ }
+ if res != nil {
+ t.Errorf("expected not to return result, got: %v", res)
+ }
+}
+
+func TestPingHeadFailure(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ res, code, err := client.
+ Ping("http://127.0.0.1:9299").
+ HttpHeadOnly(true).
+ Do(context.TODO())
+ if err == nil {
+ t.Error("expected error, got nil")
+ }
+ if code == http.StatusOK {
+ t.Errorf("expected status code != %d; got %d", http.StatusOK, code)
+ }
+ if res != nil {
+ t.Errorf("expected not to return result, got: %v", res)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/plugins.go b/vendor/github.com/olivere/elastic/plugins.go
new file mode 100644
index 000000000..60bda7552
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/plugins.go
@@ -0,0 +1,40 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "context"
+
+// HasPlugin indicates whether the cluster has the named plugin.
+func (c *Client) HasPlugin(name string) (bool, error) {
+ plugins, err := c.Plugins()
+ if err != nil {
+ return false, nil
+ }
+ for _, plugin := range plugins {
+ if plugin == name {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// Plugins returns the list of all registered plugins.
+func (c *Client) Plugins() ([]string, error) {
+ stats, err := c.ClusterStats().Do(context.Background())
+ if err != nil {
+ return nil, err
+ }
+ if stats == nil {
+ return nil, err
+ }
+ if stats.Nodes == nil {
+ return nil, err
+ }
+ var plugins []string
+ for _, plugin := range stats.Nodes.Plugins {
+ plugins = append(plugins, plugin.Name)
+ }
+ return plugins, nil
+}
diff --git a/vendor/github.com/olivere/elastic/plugins_test.go b/vendor/github.com/olivere/elastic/plugins_test.go
new file mode 100644
index 000000000..969f0b0e5
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/plugins_test.go
@@ -0,0 +1,32 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "testing"
+
+func TestClientPlugins(t *testing.T) {
+ client, err := NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Plugins()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestClientHasPlugin(t *testing.T) {
+ client, err := NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+ found, err := client.HasPlugin("no-such-plugin")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if found {
+ t.Fatalf("expected to not find plugin %q", "no-such-plugin")
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/query.go b/vendor/github.com/olivere/elastic/query.go
new file mode 100644
index 000000000..ad01354a0
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/query.go
@@ -0,0 +1,13 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Query represents the generic query interface. A query's sole purpose
+// is to return the source of the query as a JSON-serializable object.
+// Returning map[string]interface{} is the norm for queries.
+type Query interface {
+ // Source returns the JSON-serializable query request.
+ Source() (interface{}, error)
+}
diff --git a/vendor/github.com/olivere/elastic/recipes/bulk_insert/bulk_insert.go b/vendor/github.com/olivere/elastic/recipes/bulk_insert/bulk_insert.go
new file mode 100644
index 000000000..5a8ab39d0
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/recipes/bulk_insert/bulk_insert.go
@@ -0,0 +1,173 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+// BulkInsert illustrates how to bulk insert documents into Elasticsearch.
+//
+// It uses two goroutines to do so. The first creates a simple document
+// and sends it to the second via a channel. The second goroutine collects
+// those documents, creates a bulk request that is added to a Bulk service
+// and committed to Elasticsearch after reaching a number of documents.
+// The number of documents after which a commit happens can be specified
+// via the "bulk-size" flag.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
+// for details on the Bulk API in Elasticsearch.
+//
+// Example
+//
+// Bulk index 100.000 documents into the index "warehouse", type "product",
+// committing every set of 1.000 documents.
+//
+// bulk_insert -index=warehouse -type=product -n=100000 -bulk-size=1000
+//
+package main
+
+import (
+ "context"
+ "encoding/base64"
+ "errors"
+ "flag"
+ "fmt"
+ "log"
+ "math/rand"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/sync/errgroup"
+ "github.com/olivere/elastic"
+)
+
+func main() {
+ var (
+ url = flag.String("url", "http://localhost:9200", "Elasticsearch URL")
+ index = flag.String("index", "", "Elasticsearch index name")
+ typ = flag.String("type", "", "Elasticsearch type name")
+ sniff = flag.Bool("sniff", true, "Enable or disable sniffing")
+ n = flag.Int("n", 0, "Number of documents to bulk insert")
+ bulkSize = flag.Int("bulk-size", 0, "Number of documents to collect before committing")
+ )
+ flag.Parse()
+ log.SetFlags(0)
+ rand.Seed(time.Now().UnixNano())
+
+ if *url == "" {
+ log.Fatal("missing url parameter")
+ }
+ if *index == "" {
+ log.Fatal("missing index parameter")
+ }
+ if *typ == "" {
+ log.Fatal("missing type parameter")
+ }
+ if *n <= 0 {
+ log.Fatal("n must be a positive number")
+ }
+ if *bulkSize <= 0 {
+ log.Fatal("bulk-size must be a positive number")
+ }
+
+ // Create an Elasticsearch client
+ client, err := elastic.NewClient(elastic.SetURL(*url), elastic.SetSniff(*sniff))
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Setup a group of goroutines from the excellent errgroup package
+ g, ctx := errgroup.WithContext(context.TODO())
+
+ // The first goroutine will emit documents and send it to the second goroutine
+ // via the docsc channel.
+ // The second Goroutine will simply bulk insert the documents.
+ type doc struct {
+ ID string `json:"id"`
+ Timestamp time.Time `json:"@timestamp"`
+ }
+ docsc := make(chan doc)
+
+ begin := time.Now()
+
+ // Goroutine to create documents
+ g.Go(func() error {
+ defer close(docsc)
+
+ buf := make([]byte, 32)
+ for i := 0; i < *n; i++ {
+ // Generate a random ID
+ _, err := rand.Read(buf)
+ if err != nil {
+ return err
+ }
+ id := base64.URLEncoding.EncodeToString(buf)
+
+ // Construct the document
+ d := doc{
+ ID: id,
+ Timestamp: time.Now(),
+ }
+
+ // Send over to 2nd goroutine, or cancel
+ select {
+ case docsc <- d:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+ return nil
+ })
+
+ // Second goroutine will consume the documents sent from the first and bulk insert into ES
+ var total uint64
+ g.Go(func() error {
+ bulk := client.Bulk().Index(*index).Type(*typ)
+ for d := range docsc {
+ // Simple progress
+ current := atomic.AddUint64(&total, 1)
+ dur := time.Since(begin).Seconds()
+ sec := int(dur)
+ pps := int64(float64(current) / dur)
+ fmt.Printf("%10d | %6d req/s | %02d:%02d\r", current, pps, sec/60, sec%60)
+
+ // Enqueue the document
+ bulk.Add(elastic.NewBulkIndexRequest().Id(d.ID).Doc(d))
+ if bulk.NumberOfActions() >= *bulkSize {
+ // Commit
+ res, err := bulk.Do(ctx)
+ if err != nil {
+ return err
+ }
+ if res.Errors {
+ // Look up the failed documents with res.Failed(), and e.g. recommit
+ return errors.New("bulk commit failed")
+ }
+ // "bulk" is reset after Do, so you can reuse it
+ }
+
+ select {
+ default:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+
+ // Commit the final batch before exiting
+ if bulk.NumberOfActions() > 0 {
+ _, err = bulk.Do(ctx)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+
+ // Wait until all goroutines are finished
+ if err := g.Wait(); err != nil {
+ log.Fatal(err)
+ }
+
+ // Final results
+ dur := time.Since(begin).Seconds()
+ sec := int(dur)
+ pps := int64(float64(total) / dur)
+ fmt.Printf("%10d | %6d req/s | %02d:%02d\n", total, pps, sec/60, sec%60)
+}
diff --git a/vendor/github.com/olivere/elastic/recipes/connect/connect.go b/vendor/github.com/olivere/elastic/recipes/connect/connect.go
new file mode 100644
index 000000000..baff6c114
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/recipes/connect/connect.go
@@ -0,0 +1,43 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+// Connect simply connects to Elasticsearch.
+//
+// Example
+//
+//
+// connect -url=http://127.0.0.1:9200 -sniff=false
+//
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+
+ "github.com/olivere/elastic"
+)
+
+func main() {
+ var (
+ url = flag.String("url", "http://localhost:9200", "Elasticsearch URL")
+ sniff = flag.Bool("sniff", true, "Enable or disable sniffing")
+ )
+ flag.Parse()
+ log.SetFlags(0)
+
+ if *url == "" {
+ *url = "http://127.0.0.1:9200"
+ }
+
+ // Create an Elasticsearch client
+ client, err := elastic.NewClient(elastic.SetURL(*url), elastic.SetSniff(*sniff))
+ if err != nil {
+ log.Fatal(err)
+ }
+ _ = client
+
+ // Just a status message
+ fmt.Println("Connection succeeded")
+}
diff --git a/vendor/github.com/olivere/elastic/recipes/sliced_scroll/sliced_scroll.go b/vendor/github.com/olivere/elastic/recipes/sliced_scroll/sliced_scroll.go
new file mode 100644
index 000000000..d753a61cb
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/recipes/sliced_scroll/sliced_scroll.go
@@ -0,0 +1,161 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+// SlicedScroll illustrates scrolling through a set of documents
+// in parallel. It uses the sliced scrolling feature introduced
+// in Elasticsearch 5.0 to create a number of Goroutines, each
+// scrolling through a slice of the total results. A second goroutine
+// receives the hits from the set of goroutines scrolling through
+// the slices and simply counts the total number and the number of
+// documents received per slice.
+//
+// The speedup of sliced scrolling can be significant but is very
+// dependent on the specific use case.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-scroll.html#sliced-scroll
+// for details on sliced scrolling in Elasticsearch.
+//
+// Example
+//
+// Scroll with 4 parallel slices through an index called "products".
+// Use "_uid" as the default field:
+//
+// sliced_scroll -index=products -n=4
+//
+package main
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/sync/errgroup"
+ "github.com/olivere/elastic"
+)
+
+func main() {
+ var (
+ url = flag.String("url", "http://localhost:9200", "Elasticsearch URL")
+ index = flag.String("index", "", "Elasticsearch index name")
+ typ = flag.String("type", "", "Elasticsearch type name")
+ field = flag.String("field", "", "Slice field (must be numeric)")
+ numSlices = flag.Int("n", 2, "Number of slices to use in parallel")
+ sniff = flag.Bool("sniff", true, "Enable or disable sniffing")
+ )
+ flag.Parse()
+ log.SetFlags(0)
+
+ if *url == "" {
+ log.Fatal("missing url parameter")
+ }
+ if *index == "" {
+ log.Fatal("missing index parameter")
+ }
+ if *numSlices <= 0 {
+ log.Fatal("n must be greater than zero")
+ }
+
+ // Create an Elasticsearch client
+ client, err := elastic.NewClient(elastic.SetURL(*url), elastic.SetSniff(*sniff))
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Setup a group of goroutines from the excellent errgroup package
+ g, ctx := errgroup.WithContext(context.TODO())
+
+ // Hits channel will be sent to from the first set of goroutines and consumed by the second
+ type hit struct {
+ Slice int
+ Hit elastic.SearchHit
+ }
+ hitsc := make(chan hit)
+
+ begin := time.Now()
+
+ // Start a number of goroutines to parallelize scrolling
+ var wg sync.WaitGroup
+ for i := 0; i < *numSlices; i++ {
+ wg.Add(1)
+
+ slice := i
+
+ // Prepare the query
+ var query elastic.Query
+ if *typ == "" {
+ query = elastic.NewMatchAllQuery()
+ } else {
+ query = elastic.NewTypeQuery(*typ)
+ }
+
+ // Prepare the slice
+ sliceQuery := elastic.NewSliceQuery().Id(i).Max(*numSlices)
+ if *field != "" {
+ sliceQuery = sliceQuery.Field(*field)
+ }
+
+ // Start goroutine for this sliced scroll
+ g.Go(func() error {
+ defer wg.Done()
+ svc := client.Scroll(*index).Query(query).Slice(sliceQuery)
+ for {
+ res, err := svc.Do(ctx)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ for _, searchHit := range res.Hits.Hits {
+ // Pass the hit to the hits channel, which will be consumed below
+ select {
+ case hitsc <- hit{Slice: slice, Hit: *searchHit}:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+ }
+ return nil
+ })
+ }
+ go func() {
+ // Wait until all scrolling is done
+ wg.Wait()
+ close(hitsc)
+ }()
+
+ // Second goroutine will consume the hits sent from the workers in first set of goroutines
+ var total uint64
+ totals := make([]uint64, *numSlices)
+ g.Go(func() error {
+ for hit := range hitsc {
+ // We simply count the hits here.
+ atomic.AddUint64(&totals[hit.Slice], 1)
+ current := atomic.AddUint64(&total, 1)
+ sec := int(time.Since(begin).Seconds())
+ fmt.Printf("%8d | %02d:%02d\r", current, sec/60, sec%60)
+ select {
+ default:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+ return nil
+ })
+
+ // Wait until all goroutines are finished
+ if err := g.Wait(); err != nil {
+ log.Fatal(err)
+ }
+
+ fmt.Printf("Scrolled through a total of %d documents in %v\n", total, time.Since(begin))
+ for i := 0; i < *numSlices; i++ {
+ fmt.Printf("Slice %2d received %d documents\n", i, totals[i])
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/reindex.go b/vendor/github.com/olivere/elastic/reindex.go
new file mode 100644
index 000000000..35440fa80
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/reindex.go
@@ -0,0 +1,685 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+)
+
+// ReindexService is a method to copy documents from one index to another.
+// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-reindex.html.
+type ReindexService struct {
+ client *Client
+ pretty bool
+ refresh string
+ timeout string
+ waitForActiveShards string
+ waitForCompletion *bool
+ requestsPerSecond *int
+ body interface{}
+ source *ReindexSource
+ destination *ReindexDestination
+ conflicts string
+ size *int
+ script *Script
+}
+
+// NewReindexService creates a new ReindexService.
+func NewReindexService(client *Client) *ReindexService {
+ return &ReindexService{
+ client: client,
+ }
+}
+
+// WaitForActiveShards sets the number of shard copies that must be active before
+// proceeding with the reindex operation. Defaults to 1, meaning the primary shard only.
+// Set to `all` for all shard copies, otherwise set to any non-negative value less than or
+// equal to the total number of copies for the shard (number of replicas + 1).
+func (s *ReindexService) WaitForActiveShards(waitForActiveShards string) *ReindexService {
+ s.waitForActiveShards = waitForActiveShards
+ return s
+}
+
+// RequestsPerSecond specifies the throttle to set on this request in sub-requests per second.
+// -1 means set no throttle as does "unlimited" which is the only non-float this accepts.
+func (s *ReindexService) RequestsPerSecond(requestsPerSecond int) *ReindexService {
+ s.requestsPerSecond = &requestsPerSecond
+ return s
+}
+
+// Refresh indicates whether Elasticsearch should refresh the effected indexes
+// immediately.
+func (s *ReindexService) Refresh(refresh string) *ReindexService {
+ s.refresh = refresh
+ return s
+}
+
+// Timeout is the time each individual bulk request should wait for shards
+// that are unavailable.
+func (s *ReindexService) Timeout(timeout string) *ReindexService {
+ s.timeout = timeout
+ return s
+}
+
+// WaitForCompletion indicates whether Elasticsearch should block until the
+// reindex is complete.
+func (s *ReindexService) WaitForCompletion(waitForCompletion bool) *ReindexService {
+ s.waitForCompletion = &waitForCompletion
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *ReindexService) Pretty(pretty bool) *ReindexService {
+ s.pretty = pretty
+ return s
+}
+
+// Source specifies the source of the reindexing process.
+func (s *ReindexService) Source(source *ReindexSource) *ReindexService {
+ s.source = source
+ return s
+}
+
+// SourceIndex specifies the source index of the reindexing process.
+func (s *ReindexService) SourceIndex(index string) *ReindexService {
+ if s.source == nil {
+ s.source = NewReindexSource()
+ }
+ s.source = s.source.Index(index)
+ return s
+}
+
+// Destination specifies the destination of the reindexing process.
+func (s *ReindexService) Destination(destination *ReindexDestination) *ReindexService {
+ s.destination = destination
+ return s
+}
+
+// DestinationIndex specifies the destination index of the reindexing process.
+func (s *ReindexService) DestinationIndex(index string) *ReindexService {
+ if s.destination == nil {
+ s.destination = NewReindexDestination()
+ }
+ s.destination = s.destination.Index(index)
+ return s
+}
+
+// DestinationIndexAndType specifies both the destination index and type
+// of the reindexing process.
+func (s *ReindexService) DestinationIndexAndType(index, typ string) *ReindexService {
+ if s.destination == nil {
+ s.destination = NewReindexDestination()
+ }
+ s.destination = s.destination.Index(index)
+ s.destination = s.destination.Type(typ)
+ return s
+}
+
+// Conflicts indicates what to do when the process detects version conflicts.
+// Possible values are "proceed" and "abort".
+func (s *ReindexService) Conflicts(conflicts string) *ReindexService {
+ s.conflicts = conflicts
+ return s
+}
+
+// AbortOnVersionConflict aborts the request on version conflicts.
+// It is an alias to setting Conflicts("abort").
+func (s *ReindexService) AbortOnVersionConflict() *ReindexService {
+ s.conflicts = "abort"
+ return s
+}
+
+// ProceedOnVersionConflict aborts the request on version conflicts.
+// It is an alias to setting Conflicts("proceed").
+func (s *ReindexService) ProceedOnVersionConflict() *ReindexService {
+ s.conflicts = "proceed"
+ return s
+}
+
+// Size sets an upper limit for the number of processed documents.
+func (s *ReindexService) Size(size int) *ReindexService {
+ s.size = &size
+ return s
+}
+
+// Script allows for modification of the documents as they are reindexed
+// from source to destination.
+func (s *ReindexService) Script(script *Script) *ReindexService {
+ s.script = script
+ return s
+}
+
+// Body specifies the body of the request to send to Elasticsearch.
+// It overrides settings specified with other setters, e.g. Query.
+func (s *ReindexService) Body(body interface{}) *ReindexService {
+ s.body = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ReindexService) buildURL() (string, url.Values, error) {
+ // Build URL path
+ path := "/_reindex"
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.refresh != "" {
+ params.Set("refresh", s.refresh)
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.requestsPerSecond != nil {
+ params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond))
+ }
+ if s.waitForActiveShards != "" {
+ params.Set("wait_for_active_shards", s.waitForActiveShards)
+ }
+ if s.waitForCompletion != nil {
+ params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ReindexService) Validate() error {
+ var invalid []string
+ if s.body != nil {
+ return nil
+ }
+ if s.source == nil {
+ invalid = append(invalid, "Source")
+ } else {
+ if len(s.source.indices) == 0 {
+ invalid = append(invalid, "Source.Index")
+ }
+ }
+ if s.destination == nil {
+ invalid = append(invalid, "Destination")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// getBody returns the body part of the document request.
+func (s *ReindexService) getBody() (interface{}, error) {
+ if s.body != nil {
+ return s.body, nil
+ }
+
+ body := make(map[string]interface{})
+
+ if s.conflicts != "" {
+ body["conflicts"] = s.conflicts
+ }
+ if s.size != nil {
+ body["size"] = *s.size
+ }
+ if s.script != nil {
+ out, err := s.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ body["script"] = out
+ }
+
+ src, err := s.source.Source()
+ if err != nil {
+ return nil, err
+ }
+ body["source"] = src
+
+ dst, err := s.destination.Source()
+ if err != nil {
+ return nil, err
+ }
+ body["dest"] = dst
+
+ return body, nil
+}
+
+// Do executes the operation.
+func (s *ReindexService) Do(ctx context.Context) (*BulkIndexByScrollResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ body, err := s.getBody()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(BulkIndexByScrollResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// DoAsync executes the reindexing operation asynchronously by starting a new task.
+// Callers need to use the Task Management API to watch the outcome of the reindexing
+// operation.
+func (s *ReindexService) DoAsync(ctx context.Context) (*StartTaskResult, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // DoAsync only makes sense with WaitForCompletion set to true
+ if s.waitForCompletion != nil && *s.waitForCompletion {
+ return nil, fmt.Errorf("cannot start a task with WaitForCompletion set to true")
+ }
+ f := false
+ s.waitForCompletion = &f
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ body, err := s.getBody()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(StartTaskResult)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Source of Reindex --
+
+// ReindexSource specifies the source of a Reindex process.
+type ReindexSource struct {
+ searchType string // default in ES is "query_then_fetch"
+ indices []string
+ types []string
+ routing *string
+ preference *string
+ requestCache *bool
+ scroll string
+ query Query
+ sorts []SortInfo
+ sorters []Sorter
+ searchSource *SearchSource
+ remoteInfo *ReindexRemoteInfo
+}
+
+// NewReindexSource creates a new ReindexSource.
+func NewReindexSource() *ReindexSource {
+ return &ReindexSource{}
+}
+
+// SearchType is the search operation type. Possible values are
+// "query_then_fetch" and "dfs_query_then_fetch".
+func (r *ReindexSource) SearchType(searchType string) *ReindexSource {
+ r.searchType = searchType
+ return r
+}
+
+func (r *ReindexSource) SearchTypeDfsQueryThenFetch() *ReindexSource {
+ return r.SearchType("dfs_query_then_fetch")
+}
+
+func (r *ReindexSource) SearchTypeQueryThenFetch() *ReindexSource {
+ return r.SearchType("query_then_fetch")
+}
+
+func (r *ReindexSource) Index(indices ...string) *ReindexSource {
+ r.indices = append(r.indices, indices...)
+ return r
+}
+
+func (r *ReindexSource) Type(types ...string) *ReindexSource {
+ r.types = append(r.types, types...)
+ return r
+}
+
+func (r *ReindexSource) Preference(preference string) *ReindexSource {
+ r.preference = &preference
+ return r
+}
+
+func (r *ReindexSource) RequestCache(requestCache bool) *ReindexSource {
+ r.requestCache = &requestCache
+ return r
+}
+
+func (r *ReindexSource) Scroll(scroll string) *ReindexSource {
+ r.scroll = scroll
+ return r
+}
+
+func (r *ReindexSource) Query(query Query) *ReindexSource {
+ r.query = query
+ return r
+}
+
+// Sort adds a sort order.
+func (s *ReindexSource) Sort(field string, ascending bool) *ReindexSource {
+ s.sorts = append(s.sorts, SortInfo{Field: field, Ascending: ascending})
+ return s
+}
+
+// SortWithInfo adds a sort order.
+func (s *ReindexSource) SortWithInfo(info SortInfo) *ReindexSource {
+ s.sorts = append(s.sorts, info)
+ return s
+}
+
+// SortBy adds a sort order.
+func (s *ReindexSource) SortBy(sorter ...Sorter) *ReindexSource {
+ s.sorters = append(s.sorters, sorter...)
+ return s
+}
+
+// RemoteInfo sets up reindexing from a remote cluster.
+func (s *ReindexSource) RemoteInfo(ri *ReindexRemoteInfo) *ReindexSource {
+ s.remoteInfo = ri
+ return s
+}
+
+// Source returns a serializable JSON request for the request.
+func (r *ReindexSource) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+
+ if r.query != nil {
+ src, err := r.query.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["query"] = src
+ } else if r.searchSource != nil {
+ src, err := r.searchSource.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["source"] = src
+ }
+
+ if r.searchType != "" {
+ source["search_type"] = r.searchType
+ }
+
+ switch len(r.indices) {
+ case 0:
+ case 1:
+ source["index"] = r.indices[0]
+ default:
+ source["index"] = r.indices
+ }
+
+ switch len(r.types) {
+ case 0:
+ case 1:
+ source["type"] = r.types[0]
+ default:
+ source["type"] = r.types
+ }
+
+ if r.preference != nil && *r.preference != "" {
+ source["preference"] = *r.preference
+ }
+
+ if r.requestCache != nil {
+ source["request_cache"] = fmt.Sprintf("%v", *r.requestCache)
+ }
+
+ if r.scroll != "" {
+ source["scroll"] = r.scroll
+ }
+
+ if r.remoteInfo != nil {
+ src, err := r.remoteInfo.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["remote"] = src
+ }
+
+ if len(r.sorters) > 0 {
+ var sortarr []interface{}
+ for _, sorter := range r.sorters {
+ src, err := sorter.Source()
+ if err != nil {
+ return nil, err
+ }
+ sortarr = append(sortarr, src)
+ }
+ source["sort"] = sortarr
+ } else if len(r.sorts) > 0 {
+ var sortarr []interface{}
+ for _, sort := range r.sorts {
+ src, err := sort.Source()
+ if err != nil {
+ return nil, err
+ }
+ sortarr = append(sortarr, src)
+ }
+ source["sort"] = sortarr
+ }
+
+ return source, nil
+}
+
+// ReindexRemoteInfo contains information for reindexing from a remote cluster.
+type ReindexRemoteInfo struct {
+ host string
+ username string
+ password string
+ socketTimeout string // e.g. "1m" or "30s"
+ connectTimeout string // e.g. "1m" or "30s"
+}
+
+// NewReindexRemoteInfo creates a new ReindexRemoteInfo.
+func NewReindexRemoteInfo() *ReindexRemoteInfo {
+ return &ReindexRemoteInfo{}
+}
+
+// Host sets the host information of the remote cluster.
+// It must be of the form "http(s)://<hostname>:<port>"
+func (ri *ReindexRemoteInfo) Host(host string) *ReindexRemoteInfo {
+ ri.host = host
+ return ri
+}
+
+// Username sets the username to authenticate with the remote cluster.
+func (ri *ReindexRemoteInfo) Username(username string) *ReindexRemoteInfo {
+ ri.username = username
+ return ri
+}
+
+// Password sets the password to authenticate with the remote cluster.
+func (ri *ReindexRemoteInfo) Password(password string) *ReindexRemoteInfo {
+ ri.password = password
+ return ri
+}
+
+// SocketTimeout sets the socket timeout to connect with the remote cluster.
+// Use ES compatible values like e.g. "30s" or "1m".
+func (ri *ReindexRemoteInfo) SocketTimeout(timeout string) *ReindexRemoteInfo {
+ ri.socketTimeout = timeout
+ return ri
+}
+
+// ConnectTimeout sets the connection timeout to connect with the remote cluster.
+// Use ES compatible values like e.g. "30s" or "1m".
+func (ri *ReindexRemoteInfo) ConnectTimeout(timeout string) *ReindexRemoteInfo {
+ ri.connectTimeout = timeout
+ return ri
+}
+
+// Source returns the serializable JSON data for the request.
+func (ri *ReindexRemoteInfo) Source() (interface{}, error) {
+ res := make(map[string]interface{})
+ res["host"] = ri.host
+ if len(ri.username) > 0 {
+ res["username"] = ri.username
+ }
+ if len(ri.password) > 0 {
+ res["password"] = ri.password
+ }
+ if len(ri.socketTimeout) > 0 {
+ res["socket_timeout"] = ri.socketTimeout
+ }
+ if len(ri.connectTimeout) > 0 {
+ res["connect_timeout"] = ri.connectTimeout
+ }
+ return res, nil
+}
+
+// -source Destination of Reindex --
+
+// ReindexDestination is the destination of a Reindex API call.
+// It is basically the meta data of a BulkIndexRequest.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-reindex.html
+// fsourcer details.
+type ReindexDestination struct {
+ index string
+ typ string
+ routing string
+ parent string
+ opType string
+ version int64 // default is MATCH_ANY
+ versionType string // default is "internal"
+}
+
+// NewReindexDestination returns a new ReindexDestination.
+func NewReindexDestination() *ReindexDestination {
+ return &ReindexDestination{}
+}
+
+// Index specifies name of the Elasticsearch index to use as the destination
+// of a reindexing process.
+func (r *ReindexDestination) Index(index string) *ReindexDestination {
+ r.index = index
+ return r
+}
+
+// Type specifies the Elasticsearch type to use for reindexing.
+func (r *ReindexDestination) Type(typ string) *ReindexDestination {
+ r.typ = typ
+ return r
+}
+
+// Routing specifies a routing value for the reindexing request.
+// It can be "keep", "discard", or start with "=". The latter specifies
+// the routing on the bulk request.
+func (r *ReindexDestination) Routing(routing string) *ReindexDestination {
+ r.routing = routing
+ return r
+}
+
+// Keep sets the routing on the bulk request sent for each match to the routing
+// of the match (the default).
+func (r *ReindexDestination) Keep() *ReindexDestination {
+ r.routing = "keep"
+ return r
+}
+
+// Discard sets the routing on the bulk request sent for each match to null.
+func (r *ReindexDestination) Discard() *ReindexDestination {
+ r.routing = "discard"
+ return r
+}
+
+// Parent specifies the identifier of the parent document (if available).
+func (r *ReindexDestination) Parent(parent string) *ReindexDestination {
+ r.parent = parent
+ return r
+}
+
+// OpType specifies if this request should follow create-only or upsert
+// behavior. This follows the OpType of the standard document index API.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-index_.html#operation-type
+// for details.
+func (r *ReindexDestination) OpType(opType string) *ReindexDestination {
+ r.opType = opType
+ return r
+}
+
+// Version indicates the version of the document as part of an optimistic
+// concurrency model.
+func (r *ReindexDestination) Version(version int64) *ReindexDestination {
+ r.version = version
+ return r
+}
+
+// VersionType specifies how versions are created.
+func (r *ReindexDestination) VersionType(versionType string) *ReindexDestination {
+ r.versionType = versionType
+ return r
+}
+
+// Source returns a serializable JSON request for the request.
+func (r *ReindexDestination) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ if r.index != "" {
+ source["index"] = r.index
+ }
+ if r.typ != "" {
+ source["type"] = r.typ
+ }
+ if r.routing != "" {
+ source["routing"] = r.routing
+ }
+ if r.opType != "" {
+ source["op_type"] = r.opType
+ }
+ if r.parent != "" {
+ source["parent"] = r.parent
+ }
+ if r.version > 0 {
+ source["version"] = r.version
+ }
+ if r.versionType != "" {
+ source["version_type"] = r.versionType
+ }
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/reindex_test.go b/vendor/github.com/olivere/elastic/reindex_test.go
new file mode 100644
index 000000000..fadf4bfc7
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/reindex_test.go
@@ -0,0 +1,401 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+)
+
+func TestReindexSourceWithBodyMap(t *testing.T) {
+ client := setupTestClient(t)
+ out, err := client.Reindex().Body(map[string]interface{}{
+ "source": map[string]interface{}{
+ "index": "twitter",
+ },
+ "dest": map[string]interface{}{
+ "index": "new_twitter",
+ },
+ }).getBody()
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := json.Marshal(out)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := string(b)
+ want := `{"dest":{"index":"new_twitter"},"source":{"index":"twitter"}}`
+ if got != want {
+ t.Fatalf("\ngot %s\nwant %s", got, want)
+ }
+}
+
+func TestReindexSourceWithBodyString(t *testing.T) {
+ client := setupTestClient(t)
+ got, err := client.Reindex().Body(`{"source":{"index":"twitter"},"dest":{"index":"new_twitter"}}`).getBody()
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := `{"source":{"index":"twitter"},"dest":{"index":"new_twitter"}}`
+ if got != want {
+ t.Fatalf("\ngot %s\nwant %s", got, want)
+ }
+}
+
+func TestReindexSourceWithSourceIndexAndDestinationIndex(t *testing.T) {
+ client := setupTestClient(t)
+ out, err := client.Reindex().SourceIndex("twitter").DestinationIndex("new_twitter").getBody()
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := json.Marshal(out)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := string(b)
+ want := `{"dest":{"index":"new_twitter"},"source":{"index":"twitter"}}`
+ if got != want {
+ t.Fatalf("\ngot %s\nwant %s", got, want)
+ }
+}
+
+func TestReindexSourceWithSourceAndDestinationAndVersionType(t *testing.T) {
+ client := setupTestClient(t)
+ src := NewReindexSource().Index("twitter")
+ dst := NewReindexDestination().Index("new_twitter").VersionType("external")
+ out, err := client.Reindex().Source(src).Destination(dst).getBody()
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := json.Marshal(out)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := string(b)
+ want := `{"dest":{"index":"new_twitter","version_type":"external"},"source":{"index":"twitter"}}`
+ if got != want {
+ t.Fatalf("\ngot %s\nwant %s", got, want)
+ }
+}
+
+func TestReindexSourceWithSourceAndRemoteAndDestination(t *testing.T) {
+ client := setupTestClient(t)
+ src := NewReindexSource().Index("twitter").RemoteInfo(
+ NewReindexRemoteInfo().Host("http://otherhost:9200").
+ Username("alice").
+ Password("secret").
+ ConnectTimeout("10s").
+ SocketTimeout("1m"),
+ )
+ dst := NewReindexDestination().Index("new_twitter")
+ out, err := client.Reindex().Source(src).Destination(dst).getBody()
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := json.Marshal(out)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := string(b)
+ want := `{"dest":{"index":"new_twitter"},"source":{"index":"twitter","remote":{"connect_timeout":"10s","host":"http://otherhost:9200","password":"secret","socket_timeout":"1m","username":"alice"}}}`
+ if got != want {
+ t.Fatalf("\ngot %s\nwant %s", got, want)
+ }
+}
+
+func TestReindexSourceWithSourceAndDestinationAndOpType(t *testing.T) {
+ client := setupTestClient(t)
+ src := NewReindexSource().Index("twitter")
+ dst := NewReindexDestination().Index("new_twitter").OpType("create")
+ out, err := client.Reindex().Source(src).Destination(dst).getBody()
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := json.Marshal(out)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := string(b)
+ want := `{"dest":{"index":"new_twitter","op_type":"create"},"source":{"index":"twitter"}}`
+ if got != want {
+ t.Fatalf("\ngot %s\nwant %s", got, want)
+ }
+}
+
+func TestReindexSourceWithConflictsProceed(t *testing.T) {
+ client := setupTestClient(t)
+ src := NewReindexSource().Index("twitter")
+ dst := NewReindexDestination().Index("new_twitter").OpType("create")
+ out, err := client.Reindex().Conflicts("proceed").Source(src).Destination(dst).getBody()
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := json.Marshal(out)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := string(b)
+ want := `{"conflicts":"proceed","dest":{"index":"new_twitter","op_type":"create"},"source":{"index":"twitter"}}`
+ if got != want {
+ t.Fatalf("\ngot %s\nwant %s", got, want)
+ }
+}
+
+func TestReindexSourceWithProceedOnVersionConflict(t *testing.T) {
+ client := setupTestClient(t)
+ src := NewReindexSource().Index("twitter")
+ dst := NewReindexDestination().Index("new_twitter").OpType("create")
+ out, err := client.Reindex().ProceedOnVersionConflict().Source(src).Destination(dst).getBody()
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := json.Marshal(out)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := string(b)
+ want := `{"conflicts":"proceed","dest":{"index":"new_twitter","op_type":"create"},"source":{"index":"twitter"}}`
+ if got != want {
+ t.Fatalf("\ngot %s\nwant %s", got, want)
+ }
+}
+
+func TestReindexSourceWithQuery(t *testing.T) {
+ client := setupTestClient(t)
+ src := NewReindexSource().Index("twitter").Type("doc").Query(NewTermQuery("user", "olivere"))
+ dst := NewReindexDestination().Index("new_twitter")
+ out, err := client.Reindex().Source(src).Destination(dst).getBody()
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := json.Marshal(out)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := string(b)
+ want := `{"dest":{"index":"new_twitter"},"source":{"index":"twitter","query":{"term":{"user":"olivere"}},"type":"doc"}}`
+ if got != want {
+ t.Fatalf("\ngot %s\nwant %s", got, want)
+ }
+}
+
+func TestReindexSourceWithMultipleSourceIndicesAndTypes(t *testing.T) {
+ client := setupTestClient(t)
+ src := NewReindexSource().Index("twitter", "blog").Type("doc", "post")
+ dst := NewReindexDestination().Index("all_together")
+ out, err := client.Reindex().Source(src).Destination(dst).getBody()
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := json.Marshal(out)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := string(b)
+ want := `{"dest":{"index":"all_together"},"source":{"index":["twitter","blog"],"type":["doc","post"]}}`
+ if got != want {
+ t.Fatalf("\ngot %s\nwant %s", got, want)
+ }
+}
+
+func TestReindexSourceWithSourceAndSize(t *testing.T) {
+ client := setupTestClient(t)
+ src := NewReindexSource().Index("twitter").Sort("date", false)
+ dst := NewReindexDestination().Index("new_twitter")
+ out, err := client.Reindex().Size(10000).Source(src).Destination(dst).getBody()
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := json.Marshal(out)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := string(b)
+ want := `{"dest":{"index":"new_twitter"},"size":10000,"source":{"index":"twitter","sort":[{"date":{"order":"desc"}}]}}`
+ if got != want {
+ t.Fatalf("\ngot %s\nwant %s", got, want)
+ }
+}
+
+func TestReindexSourceWithScript(t *testing.T) {
+ client := setupTestClient(t)
+ src := NewReindexSource().Index("twitter")
+ dst := NewReindexDestination().Index("new_twitter").VersionType("external")
+ scr := NewScriptInline("if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}")
+ out, err := client.Reindex().Source(src).Destination(dst).Script(scr).getBody()
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := json.Marshal(out)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := string(b)
+ want := `{"dest":{"index":"new_twitter","version_type":"external"},"script":{"source":"if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}"},"source":{"index":"twitter"}}`
+ if got != want {
+ t.Fatalf("\ngot %s\nwant %s", got, want)
+ }
+}
+
+func TestReindexSourceWithRouting(t *testing.T) {
+ client := setupTestClient(t)
+ src := NewReindexSource().Index("source").Query(NewMatchQuery("company", "cat"))
+ dst := NewReindexDestination().Index("dest").Routing("=cat")
+ out, err := client.Reindex().Source(src).Destination(dst).getBody()
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := json.Marshal(out)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := string(b)
+ want := `{"dest":{"index":"dest","routing":"=cat"},"source":{"index":"source","query":{"match":{"company":{"query":"cat"}}}}}`
+ if got != want {
+ t.Fatalf("\ngot %s\nwant %s", got, want)
+ }
+}
+
+func TestReindex(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
+ esversion, err := client.ElasticsearchVersion(DefaultURL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if esversion < "2.3.0" {
+ t.Skipf("Elasticsearch %v does not support Reindex API yet", esversion)
+ }
+
+ sourceCount, err := client.Count(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if sourceCount <= 0 {
+ t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount)
+ }
+
+ targetCount, err := client.Count(testIndexName2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if targetCount != 0 {
+ t.Fatalf("expected %d documents; got: %d", 0, targetCount)
+ }
+
+ // Simple copying
+ src := NewReindexSource().Index(testIndexName)
+ dst := NewReindexDestination().Index(testIndexName2)
+ res, err := client.Reindex().Source(src).Destination(dst).Refresh("true").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected result != nil")
+ }
+ if res.Total != sourceCount {
+ t.Errorf("expected %d, got %d", sourceCount, res.Total)
+ }
+ if res.Updated != 0 {
+ t.Errorf("expected %d, got %d", 0, res.Updated)
+ }
+ if res.Created != sourceCount {
+ t.Errorf("expected %d, got %d", sourceCount, res.Created)
+ }
+
+ targetCount, err = client.Count(testIndexName2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if targetCount != sourceCount {
+ t.Fatalf("expected %d documents; got: %d", sourceCount, targetCount)
+ }
+}
+
+func TestReindexAsync(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
+ esversion, err := client.ElasticsearchVersion(DefaultURL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if esversion < "2.3.0" {
+ t.Skipf("Elasticsearch %v does not support Reindex API yet", esversion)
+ }
+
+ sourceCount, err := client.Count(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if sourceCount <= 0 {
+ t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount)
+ }
+
+ targetCount, err := client.Count(testIndexName2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if targetCount != 0 {
+ t.Fatalf("expected %d documents; got: %d", 0, targetCount)
+ }
+
+ // Simple copying
+ src := NewReindexSource().Index(testIndexName)
+ dst := NewReindexDestination().Index(testIndexName2)
+ res, err := client.Reindex().Source(src).Destination(dst).DoAsync(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected result != nil")
+ }
+ if res.TaskId == "" {
+ t.Errorf("expected a task id, got %+v", res)
+ }
+
+ tasksGetTask := client.TasksGetTask()
+ taskStatus, err := tasksGetTask.TaskId(res.TaskId).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if taskStatus == nil {
+ t.Fatal("expected task status result != nil")
+ }
+}
+
+func TestReindexWithWaitForCompletionTrueCannotBeStarted(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+ esversion, err := client.ElasticsearchVersion(DefaultURL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if esversion < "2.3.0" {
+ t.Skipf("Elasticsearch %v does not support Reindex API yet", esversion)
+ }
+
+ sourceCount, err := client.Count(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if sourceCount <= 0 {
+ t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount)
+ }
+
+ targetCount, err := client.Count(testIndexName2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if targetCount != 0 {
+ t.Fatalf("expected %d documents; got: %d", 0, targetCount)
+ }
+
+ // DoAsync should fail when WaitForCompletion is true
+ src := NewReindexSource().Index(testIndexName)
+ dst := NewReindexDestination().Index(testIndexName2)
+ _, err = client.Reindex().Source(src).Destination(dst).WaitForCompletion(true).DoAsync(context.TODO())
+ if err == nil {
+ t.Fatal("error should have been returned")
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/request.go b/vendor/github.com/olivere/elastic/request.go
new file mode 100644
index 000000000..87d191965
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/request.go
@@ -0,0 +1,79 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "runtime"
+ "strings"
+)
+
+// Elasticsearch-specific HTTP request
+type Request http.Request
+
+// NewRequest is a http.Request and adds features such as encoding the body.
+func NewRequest(method, url string) (*Request, error) {
+ req, err := http.NewRequest(method, url, nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Add("User-Agent", "elastic/"+Version+" ("+runtime.GOOS+"-"+runtime.GOARCH+")")
+ req.Header.Add("Accept", "application/json")
+ req.Header.Set("Content-Type", "application/json")
+ return (*Request)(req), nil
+}
+
+// SetBasicAuth wraps http.Request's SetBasicAuth.
+func (r *Request) SetBasicAuth(username, password string) {
+ ((*http.Request)(r)).SetBasicAuth(username, password)
+}
+
+// SetBody encodes the body in the request.
+func (r *Request) SetBody(body interface{}) error {
+ switch b := body.(type) {
+ case string:
+ return r.setBodyString(b)
+ default:
+ return r.setBodyJson(body)
+ }
+}
+
+// setBodyJson encodes the body as a struct to be marshaled via json.Marshal.
+func (r *Request) setBodyJson(data interface{}) error {
+ body, err := json.Marshal(data)
+ if err != nil {
+ return err
+ }
+ r.Header.Set("Content-Type", "application/json")
+ r.setBodyReader(bytes.NewReader(body))
+ return nil
+}
+
+// setBodyString encodes the body as a string.
+func (r *Request) setBodyString(body string) error {
+ return r.setBodyReader(strings.NewReader(body))
+}
+
+// setBodyReader writes the body from an io.Reader.
+func (r *Request) setBodyReader(body io.Reader) error {
+ rc, ok := body.(io.ReadCloser)
+ if !ok && body != nil {
+ rc = ioutil.NopCloser(body)
+ }
+ r.Body = rc
+ if body != nil {
+ switch v := body.(type) {
+ case *strings.Reader:
+ r.ContentLength = int64(v.Len())
+ case *bytes.Buffer:
+ r.ContentLength = int64(v.Len())
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/olivere/elastic/request_test.go b/vendor/github.com/olivere/elastic/request_test.go
new file mode 100644
index 000000000..04fbecbab
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/request_test.go
@@ -0,0 +1,72 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "testing"
+
+var testReq *Request // used as a temporary variable to avoid compiler optimizations in tests/benchmarks
+
+func TestRequestSetContentType(t *testing.T) {
+ req, err := NewRequest("GET", "/")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want, have := "application/json", req.Header.Get("Content-Type"); want != have {
+ t.Fatalf("want %q, have %q", want, have)
+ }
+ req.Header.Set("Content-Type", "application/x-ndjson")
+ if want, have := "application/x-ndjson", req.Header.Get("Content-Type"); want != have {
+ t.Fatalf("want %q, have %q", want, have)
+ }
+}
+
+func BenchmarkRequestSetBodyString(b *testing.B) {
+ req, err := NewRequest("GET", "/")
+ if err != nil {
+ b.Fatal(err)
+ }
+ for i := 0; i < b.N; i++ {
+ body := `{"query":{"match_all":{}}}`
+ err = req.SetBody(body)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ testReq = req
+}
+
+func BenchmarkRequestSetBodyBytes(b *testing.B) {
+ req, err := NewRequest("GET", "/")
+ if err != nil {
+ b.Fatal(err)
+ }
+ for i := 0; i < b.N; i++ {
+ body := []byte(`{"query":{"match_all":{}}}`)
+ err = req.SetBody(body)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ testReq = req
+}
+
+func BenchmarkRequestSetBodyMap(b *testing.B) {
+ req, err := NewRequest("GET", "/")
+ if err != nil {
+ b.Fatal(err)
+ }
+ for i := 0; i < b.N; i++ {
+ body := map[string]interface{}{
+ "query": map[string]interface{}{
+ "match_all": map[string]interface{}{},
+ },
+ }
+ err = req.SetBody(body)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ testReq = req
+}
diff --git a/vendor/github.com/olivere/elastic/rescore.go b/vendor/github.com/olivere/elastic/rescore.go
new file mode 100644
index 000000000..9b7eaee1d
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/rescore.go
@@ -0,0 +1,44 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+type Rescore struct {
+ rescorer Rescorer
+ windowSize *int
+ defaultRescoreWindowSize *int
+}
+
+func NewRescore() *Rescore {
+ return &Rescore{}
+}
+
+func (r *Rescore) WindowSize(windowSize int) *Rescore {
+ r.windowSize = &windowSize
+ return r
+}
+
+func (r *Rescore) IsEmpty() bool {
+ return r.rescorer == nil
+}
+
+func (r *Rescore) Rescorer(rescorer Rescorer) *Rescore {
+ r.rescorer = rescorer
+ return r
+}
+
+func (r *Rescore) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ if r.windowSize != nil {
+ source["window_size"] = *r.windowSize
+ } else if r.defaultRescoreWindowSize != nil {
+ source["window_size"] = *r.defaultRescoreWindowSize
+ }
+ rescorerSrc, err := r.rescorer.Source()
+ if err != nil {
+ return nil, err
+ }
+ source[r.rescorer.Name()] = rescorerSrc
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/rescorer.go b/vendor/github.com/olivere/elastic/rescorer.go
new file mode 100644
index 000000000..ccd4bb854
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/rescorer.go
@@ -0,0 +1,64 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+type Rescorer interface {
+ Name() string
+ Source() (interface{}, error)
+}
+
+// -- Query Rescorer --
+
+type QueryRescorer struct {
+ query Query
+ rescoreQueryWeight *float64
+ queryWeight *float64
+ scoreMode string
+}
+
+func NewQueryRescorer(query Query) *QueryRescorer {
+ return &QueryRescorer{
+ query: query,
+ }
+}
+
+func (r *QueryRescorer) Name() string {
+ return "query"
+}
+
+func (r *QueryRescorer) RescoreQueryWeight(rescoreQueryWeight float64) *QueryRescorer {
+ r.rescoreQueryWeight = &rescoreQueryWeight
+ return r
+}
+
+func (r *QueryRescorer) QueryWeight(queryWeight float64) *QueryRescorer {
+ r.queryWeight = &queryWeight
+ return r
+}
+
+func (r *QueryRescorer) ScoreMode(scoreMode string) *QueryRescorer {
+ r.scoreMode = scoreMode
+ return r
+}
+
+func (r *QueryRescorer) Source() (interface{}, error) {
+ rescoreQuery, err := r.query.Source()
+ if err != nil {
+ return nil, err
+ }
+
+ source := make(map[string]interface{})
+ source["rescore_query"] = rescoreQuery
+ if r.queryWeight != nil {
+ source["query_weight"] = *r.queryWeight
+ }
+ if r.rescoreQueryWeight != nil {
+ source["rescore_query_weight"] = *r.rescoreQueryWeight
+ }
+ if r.scoreMode != "" {
+ source["score_mode"] = r.scoreMode
+ }
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/response.go b/vendor/github.com/olivere/elastic/response.go
new file mode 100644
index 000000000..4fcdc32d6
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/response.go
@@ -0,0 +1,41 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+)
+
+// Response represents a response from Elasticsearch.
+type Response struct {
+ // StatusCode is the HTTP status code, e.g. 200.
+ StatusCode int
+ // Header is the HTTP header from the HTTP response.
+ // Keys in the map are canonicalized (see http.CanonicalHeaderKey).
+ Header http.Header
+ // Body is the deserialized response body.
+ Body json.RawMessage
+}
+
+// newResponse creates a new response from the HTTP response.
+func (c *Client) newResponse(res *http.Response) (*Response, error) {
+ r := &Response{
+ StatusCode: res.StatusCode,
+ Header: res.Header,
+ }
+ if res.Body != nil {
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, err
+ }
+ // HEAD requests return a body but no content
+ if len(slurp) > 0 {
+ r.Body = json.RawMessage(slurp)
+ }
+ }
+ return r, nil
+}
diff --git a/vendor/github.com/olivere/elastic/response_test.go b/vendor/github.com/olivere/elastic/response_test.go
new file mode 100644
index 000000000..e62773403
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/response_test.go
@@ -0,0 +1,48 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "testing"
+)
+
+func BenchmarkResponse(b *testing.B) {
+ c := &Client{
+ decoder: &DefaultDecoder{},
+ }
+
+ var resp *Response
+ for n := 0; n < b.N; n++ {
+ iteration := fmt.Sprint(n)
+ body := fmt.Sprintf(`{"n":%d}`, n)
+ res := &http.Response{
+ Header: http.Header{
+ "X-Iteration": []string{iteration},
+ },
+ Body: ioutil.NopCloser(bytes.NewBufferString(body)),
+ StatusCode: http.StatusOK,
+ }
+ var err error
+ resp, err = c.newResponse(res)
+ if err != nil {
+ b.Fatal(err)
+ }
+ /*
+ if want, have := body, string(resp.Body); want != have {
+ b.Fatalf("want %q, have %q", want, have)
+ }
+ //*/
+ /*
+ if want, have := iteration, resp.Header.Get("X-Iteration"); want != have {
+ b.Fatalf("want %q, have %q", want, have)
+ }
+ //*/
+ }
+ _ = resp
+}
diff --git a/vendor/github.com/olivere/elastic/retrier.go b/vendor/github.com/olivere/elastic/retrier.go
new file mode 100644
index 000000000..46d3adfcb
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/retrier.go
@@ -0,0 +1,61 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "net/http"
+ "time"
+)
+
+// RetrierFunc specifies the signature of a Retry function.
+type RetrierFunc func(context.Context, int, *http.Request, *http.Response, error) (time.Duration, bool, error)
+
+// Retrier decides whether to retry a failed HTTP request with Elasticsearch.
+type Retrier interface {
+ // Retry is called when a request has failed. It decides whether to retry
+ // the call, how long to wait for the next call, or whether to return an
+ // error (which will be returned to the service that started the HTTP
+ // request in the first place).
+ //
+ // Callers may also use this to inspect the HTTP request/response and
+ // the error that happened. Additional data can be passed through via
+ // the context.
+ Retry(ctx context.Context, retry int, req *http.Request, resp *http.Response, err error) (time.Duration, bool, error)
+}
+
+// -- StopRetrier --
+
+// StopRetrier is an implementation that does no retries.
+type StopRetrier struct {
+}
+
+// NewStopRetrier returns a retrier that does no retries.
+func NewStopRetrier() *StopRetrier {
+ return &StopRetrier{}
+}
+
+// Retry does not retry.
+func (r *StopRetrier) Retry(ctx context.Context, retry int, req *http.Request, resp *http.Response, err error) (time.Duration, bool, error) {
+ return 0, false, nil
+}
+
+// -- BackoffRetrier --
+
+// BackoffRetrier is an implementation that does nothing but return nil on Retry.
+type BackoffRetrier struct {
+ backoff Backoff
+}
+
+// NewBackoffRetrier returns a retrier that uses the given backoff strategy.
+func NewBackoffRetrier(backoff Backoff) *BackoffRetrier {
+ return &BackoffRetrier{backoff: backoff}
+}
+
+// Retry calls into the backoff strategy and its wait interval.
+func (r *BackoffRetrier) Retry(ctx context.Context, retry int, req *http.Request, resp *http.Response, err error) (time.Duration, bool, error) {
+ wait, goahead := r.backoff.Next(retry)
+ return wait, goahead, nil
+}
diff --git a/vendor/github.com/olivere/elastic/retrier_test.go b/vendor/github.com/olivere/elastic/retrier_test.go
new file mode 100644
index 000000000..c1c5ff524
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/retrier_test.go
@@ -0,0 +1,174 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "errors"
+ "net/http"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+type testRetrier struct {
+ Retrier
+ N int64
+ Err error
+}
+
+func (r *testRetrier) Retry(ctx context.Context, retry int, req *http.Request, resp *http.Response, err error) (time.Duration, bool, error) {
+ atomic.AddInt64(&r.N, 1)
+ if r.Err != nil {
+ return 0, false, r.Err
+ }
+ return r.Retrier.Retry(ctx, retry, req, resp, err)
+}
+
+func TestStopRetrier(t *testing.T) {
+ r := NewStopRetrier()
+ wait, ok, err := r.Retry(context.TODO(), 1, nil, nil, nil)
+ if want, got := 0*time.Second, wait; want != got {
+ t.Fatalf("expected %v, got %v", want, got)
+ }
+ if want, got := false, ok; want != got {
+ t.Fatalf("expected %v, got %v", want, got)
+ }
+ if err != nil {
+ t.Fatalf("expected nil, got %v", err)
+ }
+}
+
+func TestRetrier(t *testing.T) {
+ var numFailedReqs int
+ fail := func(r *http.Request) (*http.Response, error) {
+ numFailedReqs += 1
+ //return &http.Response{Request: r, StatusCode: 400}, nil
+ return nil, errors.New("request failed")
+ }
+
+ tr := &failingTransport{path: "/fail", fail: fail}
+ httpClient := &http.Client{Transport: tr}
+
+ retrier := &testRetrier{
+ Retrier: NewBackoffRetrier(NewSimpleBackoff(100, 100, 100, 100, 100)),
+ }
+
+ client, err := NewClient(
+ SetHttpClient(httpClient),
+ SetMaxRetries(5),
+ SetHealthcheck(false),
+ SetRetrier(retrier))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/fail",
+ })
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ if res != nil {
+ t.Fatal("expected no response")
+ }
+ // Connection should be marked as dead after it failed
+ if numFailedReqs != 5 {
+ t.Errorf("expected %d failed requests; got: %d", 5, numFailedReqs)
+ }
+ if retrier.N != 5 {
+ t.Errorf("expected %d Retrier calls; got: %d", 5, retrier.N)
+ }
+}
+
+func TestRetrierWithError(t *testing.T) {
+ var numFailedReqs int
+ fail := func(r *http.Request) (*http.Response, error) {
+ numFailedReqs += 1
+ //return &http.Response{Request: r, StatusCode: 400}, nil
+ return nil, errors.New("request failed")
+ }
+
+ tr := &failingTransport{path: "/fail", fail: fail}
+ httpClient := &http.Client{Transport: tr}
+
+ kaboom := errors.New("kaboom")
+ retrier := &testRetrier{
+ Err: kaboom,
+ Retrier: NewBackoffRetrier(NewSimpleBackoff(100, 100, 100, 100, 100)),
+ }
+
+ client, err := NewClient(
+ SetHttpClient(httpClient),
+ SetMaxRetries(5),
+ SetHealthcheck(false),
+ SetRetrier(retrier))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/fail",
+ })
+ if err != kaboom {
+ t.Fatalf("expected %v, got %v", kaboom, err)
+ }
+ if res != nil {
+ t.Fatal("expected no response")
+ }
+ if numFailedReqs != 1 {
+ t.Errorf("expected %d failed requests; got: %d", 1, numFailedReqs)
+ }
+ if retrier.N != 1 {
+ t.Errorf("expected %d Retrier calls; got: %d", 1, retrier.N)
+ }
+}
+
+func TestRetrierOnPerformRequest(t *testing.T) {
+ var numFailedReqs int
+ fail := func(r *http.Request) (*http.Response, error) {
+ numFailedReqs += 1
+ //return &http.Response{Request: r, StatusCode: 400}, nil
+ return nil, errors.New("request failed")
+ }
+
+ tr := &failingTransport{path: "/fail", fail: fail}
+ httpClient := &http.Client{Transport: tr}
+
+ defaultRetrier := &testRetrier{
+ Retrier: NewStopRetrier(),
+ }
+ requestRetrier := &testRetrier{
+ Retrier: NewStopRetrier(),
+ }
+
+ client, err := NewClient(
+ SetHttpClient(httpClient),
+ SetHealthcheck(false),
+ SetRetrier(defaultRetrier))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/fail",
+ Retrier: requestRetrier,
+ })
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ if res != nil {
+ t.Fatal("expected no response")
+ }
+ if want, have := int64(0), defaultRetrier.N; want != have {
+ t.Errorf("defaultRetrier: expected %d calls; got: %d", want, have)
+ }
+ if want, have := int64(1), requestRetrier.N; want != have {
+ t.Errorf("requestRetrier: expected %d calls; got: %d", want, have)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/retry.go b/vendor/github.com/olivere/elastic/retry.go
new file mode 100644
index 000000000..3571a3b7a
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/retry.go
@@ -0,0 +1,56 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+// This file is based on code (c) 2014 Cenk Altı and governed by the MIT license.
+// See https://github.com/cenkalti/backoff for original source.
+
+package elastic
+
+import "time"
+
+// An Operation is executing by Retry() or RetryNotify().
+// The operation will be retried using a backoff policy if it returns an error.
+type Operation func() error
+
+// Notify is a notify-on-error function. It receives error returned
+// from an operation.
+//
+// Notice that if the backoff policy stated to stop retrying,
+// the notify function isn't called.
+type Notify func(error)
+
+// Retry the function f until it does not return error or BackOff stops.
+// f is guaranteed to be run at least once.
+// It is the caller's responsibility to reset b after Retry returns.
+//
+// Retry sleeps the goroutine for the duration returned by BackOff after a
+// failed operation returns.
+func Retry(o Operation, b Backoff) error { return RetryNotify(o, b, nil) }
+
+// RetryNotify calls notify function with the error and wait duration
+// for each failed attempt before sleep.
+func RetryNotify(operation Operation, b Backoff, notify Notify) error {
+ var err error
+ var wait time.Duration
+ var retry bool
+ var n int
+
+ for {
+ if err = operation(); err == nil {
+ return nil
+ }
+
+ n++
+ wait, retry = b.Next(n)
+ if !retry {
+ return err
+ }
+
+ if notify != nil {
+ notify(err)
+ }
+
+ time.Sleep(wait)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/retry_test.go b/vendor/github.com/olivere/elastic/retry_test.go
new file mode 100644
index 000000000..804313095
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/retry_test.go
@@ -0,0 +1,44 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+// This file is based on code that is (c) 2014 Cenk Altı and governed
+// by the MIT license.
+// See https://github.com/cenkalti/backoff for original source.
+
+package elastic
+
+import (
+ "errors"
+ "testing"
+ "time"
+)
+
+func TestRetry(t *testing.T) {
+ const successOn = 3
+ var i = 0
+
+ // This function is successfull on "successOn" calls.
+ f := func() error {
+ i++
+ // t.Logf("function is called %d. time\n", i)
+
+ if i == successOn {
+ // t.Log("OK")
+ return nil
+ }
+
+ // t.Log("error")
+ return errors.New("error")
+ }
+
+ min := time.Duration(8) * time.Millisecond
+ max := time.Duration(256) * time.Millisecond
+ err := Retry(f, NewExponentialBackoff(min, max))
+ if err != nil {
+ t.Errorf("unexpected error: %s", err.Error())
+ }
+ if i != successOn {
+ t.Errorf("invalid number of retries: %d", i)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/run-es.sh b/vendor/github.com/olivere/elastic/run-es.sh
new file mode 100755
index 000000000..1f4a851d4
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/run-es.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+VERSION=${VERSION:=6.1.2}
+docker run --rm -p 9200:9200 -e "http.host=0.0.0.0" -e "transport.host=127.0.0.1" -e "bootstrap.memory_lock=true" -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" docker.elastic.co/elasticsearch/elasticsearch:$VERSION elasticsearch -Expack.security.enabled=false -Enetwork.host=_local_,_site_ -Enetwork.publish_host=_local_
diff --git a/vendor/github.com/olivere/elastic/script.go b/vendor/github.com/olivere/elastic/script.go
new file mode 100644
index 000000000..273473950
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/script.go
@@ -0,0 +1,127 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "errors"
+
+// Script holds all the paramaters necessary to compile or find in cache
+// and then execute a script.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-scripting.html
+// for details of scripting.
+type Script struct {
+ script string
+ typ string
+ lang string
+ params map[string]interface{}
+}
+
+// NewScript creates and initializes a new Script.
+func NewScript(script string) *Script {
+ return &Script{
+ script: script,
+ typ: "inline",
+ params: make(map[string]interface{}),
+ }
+}
+
+// NewScriptInline creates and initializes a new inline script, i.e. code.
+func NewScriptInline(script string) *Script {
+ return NewScript(script).Type("inline")
+}
+
+// NewScriptStored creates and initializes a new stored script.
+func NewScriptStored(script string) *Script {
+ return NewScript(script).Type("id")
+}
+
+// Script is either the cache key of the script to be compiled/executed
+// or the actual script source code for inline scripts. For indexed
+// scripts this is the id used in the request. For file scripts this is
+// the file name.
+func (s *Script) Script(script string) *Script {
+ s.script = script
+ return s
+}
+
+// Type sets the type of script: "inline" or "id".
+func (s *Script) Type(typ string) *Script {
+ s.typ = typ
+ return s
+}
+
+// Lang sets the language of the script. Permitted values are "groovy",
+// "expression", "mustache", "mvel" (default), "javascript", "python".
+// To use certain languages, you need to configure your server and/or
+// add plugins. See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-scripting.html
+// for details.
+func (s *Script) Lang(lang string) *Script {
+ s.lang = lang
+ return s
+}
+
+// Param adds a key/value pair to the parameters that this script will be executed with.
+func (s *Script) Param(name string, value interface{}) *Script {
+ if s.params == nil {
+ s.params = make(map[string]interface{})
+ }
+ s.params[name] = value
+ return s
+}
+
+// Params sets the map of parameters this script will be executed with.
+func (s *Script) Params(params map[string]interface{}) *Script {
+ s.params = params
+ return s
+}
+
+// Source returns the JSON serializable data for this Script.
+func (s *Script) Source() (interface{}, error) {
+ if s.typ == "" && s.lang == "" && len(s.params) == 0 {
+ return s.script, nil
+ }
+ source := make(map[string]interface{})
+ // Beginning with 6.0, the type can only be "source" or "id"
+ if s.typ == "" || s.typ == "inline" {
+ source["source"] = s.script
+ } else {
+ source["id"] = s.script
+ }
+ if s.lang != "" {
+ source["lang"] = s.lang
+ }
+ if len(s.params) > 0 {
+ source["params"] = s.params
+ }
+ return source, nil
+}
+
+// -- Script Field --
+
+// ScriptField is a single script field.
+type ScriptField struct {
+ FieldName string // name of the field
+
+ script *Script
+}
+
+// NewScriptField creates and initializes a new ScriptField.
+func NewScriptField(fieldName string, script *Script) *ScriptField {
+ return &ScriptField{FieldName: fieldName, script: script}
+}
+
+// Source returns the serializable JSON for the ScriptField.
+func (f *ScriptField) Source() (interface{}, error) {
+ if f.script == nil {
+ return nil, errors.New("ScriptField expects script")
+ }
+ source := make(map[string]interface{})
+ src, err := f.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["script"] = src
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/script_test.go b/vendor/github.com/olivere/elastic/script_test.go
new file mode 100644
index 000000000..aa475d7eb
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/script_test.go
@@ -0,0 +1,61 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestScriptingDefault(t *testing.T) {
+ builder := NewScript("doc['field'].value * 2")
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"source":"doc['field'].value * 2"}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestScriptingInline(t *testing.T) {
+ builder := NewScriptInline("doc['field'].value * factor").Param("factor", 2.0)
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"params":{"factor":2},"source":"doc['field'].value * factor"}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestScriptingStored(t *testing.T) {
+ builder := NewScriptStored("script-with-id").Param("factor", 2.0)
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"id":"script-with-id","params":{"factor":2}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/scroll.go b/vendor/github.com/olivere/elastic/scroll.go
new file mode 100644
index 000000000..ac51a8c00
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/scroll.go
@@ -0,0 +1,470 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/url"
+ "strings"
+ "sync"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+const (
+ // DefaultScrollKeepAlive is the default time a scroll cursor will be kept alive.
+ DefaultScrollKeepAlive = "5m"
+)
+
+// ScrollService iterates over pages of search results from Elasticsearch.
+type ScrollService struct {
+ client *Client
+ retrier Retrier
+ indices []string
+ types []string
+ keepAlive string
+ body interface{}
+ ss *SearchSource
+ size *int
+ pretty bool
+ routing string
+ preference string
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+
+ mu sync.RWMutex
+ scrollId string
+}
+
+// NewScrollService initializes and returns a new ScrollService.
+func NewScrollService(client *Client) *ScrollService {
+ builder := &ScrollService{
+ client: client,
+ ss: NewSearchSource(),
+ keepAlive: DefaultScrollKeepAlive,
+ }
+ return builder
+}
+
+// Retrier allows to set specific retry logic for this ScrollService.
+// If not specified, it will use the client's default retrier.
+func (s *ScrollService) Retrier(retrier Retrier) *ScrollService {
+ s.retrier = retrier
+ return s
+}
+
+// Index sets the name of one or more indices to iterate over.
+func (s *ScrollService) Index(indices ...string) *ScrollService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+// Type sets the name of one or more types to iterate over.
+func (s *ScrollService) Type(types ...string) *ScrollService {
+ if s.types == nil {
+ s.types = make([]string, 0)
+ }
+ s.types = append(s.types, types...)
+ return s
+}
+
+// Scroll is an alias for KeepAlive, the time to keep
+// the cursor alive (e.g. "5m" for 5 minutes).
+func (s *ScrollService) Scroll(keepAlive string) *ScrollService {
+ s.keepAlive = keepAlive
+ return s
+}
+
+// KeepAlive sets the maximum time after which the cursor will expire.
+// It is "2m" by default.
+func (s *ScrollService) KeepAlive(keepAlive string) *ScrollService {
+ s.keepAlive = keepAlive
+ return s
+}
+
+// Size specifies the number of documents Elasticsearch should return
+// from each shard, per page.
+func (s *ScrollService) Size(size int) *ScrollService {
+ s.size = &size
+ return s
+}
+
+// Body sets the raw body to send to Elasticsearch. This can be e.g. a string,
+// a map[string]interface{} or anything that can be serialized into JSON.
+// Notice that setting the body disables the use of SearchSource and many
+// other properties of the ScanService.
+func (s *ScrollService) Body(body interface{}) *ScrollService {
+ s.body = body
+ return s
+}
+
+// SearchSource sets the search source builder to use with this iterator.
+// Notice that only a certain number of properties can be used when scrolling,
+// e.g. query and sorting.
+func (s *ScrollService) SearchSource(searchSource *SearchSource) *ScrollService {
+ s.ss = searchSource
+ if s.ss == nil {
+ s.ss = NewSearchSource()
+ }
+ return s
+}
+
+// Query sets the query to perform, e.g. a MatchAllQuery.
+func (s *ScrollService) Query(query Query) *ScrollService {
+ s.ss = s.ss.Query(query)
+ return s
+}
+
+// PostFilter is executed as the last filter. It only affects the
+// search hits but not facets. See
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-post-filter.html
+// for details.
+func (s *ScrollService) PostFilter(postFilter Query) *ScrollService {
+ s.ss = s.ss.PostFilter(postFilter)
+ return s
+}
+
+// Slice allows slicing the scroll request into several batches.
+// This is supported in Elasticsearch 5.0 or later.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-scroll.html#sliced-scroll
+// for details.
+func (s *ScrollService) Slice(sliceQuery Query) *ScrollService {
+ s.ss = s.ss.Slice(sliceQuery)
+ return s
+}
+
+// FetchSource indicates whether the response should contain the stored
+// _source for every hit.
+func (s *ScrollService) FetchSource(fetchSource bool) *ScrollService {
+ s.ss = s.ss.FetchSource(fetchSource)
+ return s
+}
+
+// FetchSourceContext indicates how the _source should be fetched.
+func (s *ScrollService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *ScrollService {
+ s.ss = s.ss.FetchSourceContext(fetchSourceContext)
+ return s
+}
+
+// Version can be set to true to return a version for each search hit.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-version.html.
+func (s *ScrollService) Version(version bool) *ScrollService {
+ s.ss = s.ss.Version(version)
+ return s
+}
+
+// Sort adds a sort order. This can have negative effects on the performance
+// of the scroll operation as Elasticsearch needs to sort first.
+func (s *ScrollService) Sort(field string, ascending bool) *ScrollService {
+ s.ss = s.ss.Sort(field, ascending)
+ return s
+}
+
+// SortWithInfo specifies a sort order. Notice that sorting can have a
+// negative impact on scroll performance.
+func (s *ScrollService) SortWithInfo(info SortInfo) *ScrollService {
+ s.ss = s.ss.SortWithInfo(info)
+ return s
+}
+
+// SortBy specifies a sort order. Notice that sorting can have a
+// negative impact on scroll performance.
+func (s *ScrollService) SortBy(sorter ...Sorter) *ScrollService {
+ s.ss = s.ss.SortBy(sorter...)
+ return s
+}
+
+// Pretty asks Elasticsearch to pretty-print the returned JSON.
+func (s *ScrollService) Pretty(pretty bool) *ScrollService {
+ s.pretty = pretty
+ return s
+}
+
+// Routing is a list of specific routing values to control the shards
+// the search will be executed on.
+func (s *ScrollService) Routing(routings ...string) *ScrollService {
+ s.routing = strings.Join(routings, ",")
+ return s
+}
+
+// Preference sets the preference to execute the search. Defaults to
+// randomize across shards ("random"). Can be set to "_local" to prefer
+// local shards, "_primary" to execute on primary shards only,
+// or a custom value which guarantees that the same order will be used
+// across different requests.
+func (s *ScrollService) Preference(preference string) *ScrollService {
+ s.preference = preference
+ return s
+}
+
+// IgnoreUnavailable indicates whether the specified concrete indices
+// should be ignored when unavailable (missing or closed).
+func (s *ScrollService) IgnoreUnavailable(ignoreUnavailable bool) *ScrollService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices. (This includes `_all` string
+// or when no indices have been specified).
+func (s *ScrollService) AllowNoIndices(allowNoIndices bool) *ScrollService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *ScrollService) ExpandWildcards(expandWildcards string) *ScrollService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// ScrollId specifies the identifier of a scroll in action.
+func (s *ScrollService) ScrollId(scrollId string) *ScrollService {
+ s.mu.Lock()
+ s.scrollId = scrollId
+ s.mu.Unlock()
+ return s
+}
+
+// Do returns the next search result. It will return io.EOF as error if there
+// are no more search results.
+func (s *ScrollService) Do(ctx context.Context) (*SearchResult, error) {
+ s.mu.RLock()
+ nextScrollId := s.scrollId
+ s.mu.RUnlock()
+ if len(nextScrollId) == 0 {
+ return s.first(ctx)
+ }
+ return s.next(ctx)
+}
+
+// Clear cancels the current scroll operation. If you don't do this manually,
+// the scroll will be expired automatically by Elasticsearch. You can control
+// how long a scroll cursor is kept alive with the KeepAlive func.
+func (s *ScrollService) Clear(ctx context.Context) error {
+ s.mu.RLock()
+ scrollId := s.scrollId
+ s.mu.RUnlock()
+ if len(scrollId) == 0 {
+ return nil
+ }
+
+ path := "/_search/scroll"
+ params := url.Values{}
+ body := struct {
+ ScrollId []string `json:"scroll_id,omitempty"`
+ }{
+ ScrollId: []string{scrollId},
+ }
+
+ _, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "DELETE",
+ Path: path,
+ Params: params,
+ Body: body,
+ Retrier: s.retrier,
+ })
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// -- First --
+
+// first takes the first page of search results.
+func (s *ScrollService) first(ctx context.Context) (*SearchResult, error) {
+ // Get URL and parameters for request
+ path, params, err := s.buildFirstURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP request body
+ body, err := s.bodyFirst()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ Retrier: s.retrier,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(SearchResult)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ s.mu.Lock()
+ s.scrollId = ret.ScrollId
+ s.mu.Unlock()
+ if ret.Hits == nil || len(ret.Hits.Hits) == 0 {
+ return nil, io.EOF
+ }
+ return ret, nil
+}
+
+// buildFirstURL builds the URL for retrieving the first page.
+func (s *ScrollService) buildFirstURL() (string, url.Values, error) {
+ // Build URL
+ var err error
+ var path string
+ if len(s.indices) == 0 && len(s.types) == 0 {
+ path = "/_search"
+ } else if len(s.indices) > 0 && len(s.types) == 0 {
+ path, err = uritemplates.Expand("/{index}/_search", map[string]string{
+ "index": strings.Join(s.indices, ","),
+ })
+ } else if len(s.indices) == 0 && len(s.types) > 0 {
+ path, err = uritemplates.Expand("/_all/{typ}/_search", map[string]string{
+ "typ": strings.Join(s.types, ","),
+ })
+ } else {
+ path, err = uritemplates.Expand("/{index}/{typ}/_search", map[string]string{
+ "index": strings.Join(s.indices, ","),
+ "typ": strings.Join(s.types, ","),
+ })
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.size != nil && *s.size > 0 {
+ params.Set("size", fmt.Sprintf("%d", *s.size))
+ }
+ if len(s.keepAlive) > 0 {
+ params.Set("scroll", s.keepAlive)
+ }
+ if len(s.routing) > 0 {
+ params.Set("routing", s.routing)
+ }
+ if len(s.preference) > 0 {
+ params.Set("preference", s.preference)
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if len(s.expandWildcards) > 0 {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+
+ return path, params, nil
+}
+
+// bodyFirst returns the request to fetch the first batch of results.
+func (s *ScrollService) bodyFirst() (interface{}, error) {
+ var err error
+ var body interface{}
+
+ if s.body != nil {
+ body = s.body
+ } else {
+ // Use _doc sort by default if none is specified
+ if !s.ss.hasSort() {
+ // Use efficient sorting when no user-defined query/body is specified
+ s.ss = s.ss.SortBy(SortByDoc{})
+ }
+
+ // Body from search source
+ body, err = s.ss.Source()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return body, nil
+}
+
+// -- Next --
+
+func (s *ScrollService) next(ctx context.Context) (*SearchResult, error) {
+ // Get URL for request
+ path, params, err := s.buildNextURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ body, err := s.bodyNext()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ Retrier: s.retrier,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(SearchResult)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ s.mu.Lock()
+ s.scrollId = ret.ScrollId
+ s.mu.Unlock()
+ if ret.Hits == nil || len(ret.Hits.Hits) == 0 {
+ return nil, io.EOF
+ }
+ return ret, nil
+}
+
+// buildNextURL builds the URL for the operation.
+func (s *ScrollService) buildNextURL() (string, url.Values, error) {
+ path := "/_search/scroll"
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+
+ return path, params, nil
+}
+
+// body returns the request to fetch the next batch of results.
+func (s *ScrollService) bodyNext() (interface{}, error) {
+ s.mu.RLock()
+ body := struct {
+ Scroll string `json:"scroll"`
+ ScrollId string `json:"scroll_id,omitempty"`
+ }{
+ Scroll: s.keepAlive,
+ ScrollId: s.scrollId,
+ }
+ s.mu.RUnlock()
+ return body, nil
+}
diff --git a/vendor/github.com/olivere/elastic/scroll_test.go b/vendor/github.com/olivere/elastic/scroll_test.go
new file mode 100644
index 000000000..c94e5f92f
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/scroll_test.go
@@ -0,0 +1,387 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "io"
+ _ "net/http"
+ "testing"
+)
+
+func TestScroll(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Should return all documents. Just don't call Do yet!
+ svc := client.Scroll(testIndexName).Size(1)
+
+ pages := 0
+ docs := 0
+
+ for {
+ res, err := svc.Do(context.TODO())
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected results != nil; got nil")
+ }
+ if res.Hits == nil {
+ t.Fatal("expected results.Hits != nil; got nil")
+ }
+ if want, have := int64(3), res.Hits.TotalHits; want != have {
+ t.Fatalf("expected results.Hits.TotalHits = %d; got %d", want, have)
+ }
+ if want, have := 1, len(res.Hits.Hits); want != have {
+ t.Fatalf("expected len(results.Hits.Hits) = %d; got %d", want, have)
+ }
+
+ pages++
+
+ for _, hit := range res.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ docs++
+ }
+
+ if len(res.ScrollId) == 0 {
+ t.Fatalf("expected scrollId in results; got %q", res.ScrollId)
+ }
+ }
+
+ if want, have := 3, pages; want != have {
+ t.Fatalf("expected to retrieve %d pages; got %d", want, have)
+ }
+ if want, have := 3, docs; want != have {
+ t.Fatalf("expected to retrieve %d hits; got %d", want, have)
+ }
+
+ err = svc.Clear(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = svc.Do(context.TODO())
+ if err == nil {
+ t.Fatal("expected to fail")
+ }
+}
+
+func TestScrollWithQueryAndSort(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+ // client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a scroll service that returns tweets from user olivere
+ // and returns them sorted by "message", in reverse order.
+ //
+ // Just don't call Do yet!
+ svc := client.Scroll(testIndexName).
+ Query(NewTermQuery("user", "olivere")).
+ Sort("message", false).
+ Size(1)
+
+ docs := 0
+ pages := 0
+ for {
+ res, err := svc.Do(context.TODO())
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected results != nil; got nil")
+ }
+ if res.Hits == nil {
+ t.Fatal("expected results.Hits != nil; got nil")
+ }
+ if want, have := int64(2), res.Hits.TotalHits; want != have {
+ t.Fatalf("expected results.Hits.TotalHits = %d; got %d", want, have)
+ }
+ if want, have := 1, len(res.Hits.Hits); want != have {
+ t.Fatalf("expected len(results.Hits.Hits) = %d; got %d", want, have)
+ }
+
+ pages++
+
+ for _, hit := range res.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ docs++
+ }
+ }
+
+ if want, have := 2, pages; want != have {
+ t.Fatalf("expected to retrieve %d pages; got %d", want, have)
+ }
+ if want, have := 2, docs; want != have {
+ t.Fatalf("expected to retrieve %d hits; got %d", want, have)
+ }
+}
+
+func TestScrollWithBody(t *testing.T) {
+ // client := setupTestClientAndCreateIndexAndLog(t)
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch.", Retweets: 4}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic.", Retweets: 10}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun.", Retweets: 3}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Test with simple strings and a map
+ var tests = []struct {
+ Body interface{}
+ ExpectedTotalHits int64
+ ExpectedDocs int
+ ExpectedPages int
+ }{
+ {
+ Body: `{"query":{"match_all":{}}}`,
+ ExpectedTotalHits: 3,
+ ExpectedDocs: 3,
+ ExpectedPages: 3,
+ },
+ {
+ Body: `{"query":{"term":{"user":"olivere"}},"sort":["_doc"]}`,
+ ExpectedTotalHits: 2,
+ ExpectedDocs: 2,
+ ExpectedPages: 2,
+ },
+ {
+ Body: `{"query":{"term":{"user":"olivere"}},"sort":[{"retweets":"desc"}]}`,
+ ExpectedTotalHits: 2,
+ ExpectedDocs: 2,
+ ExpectedPages: 2,
+ },
+ {
+ Body: map[string]interface{}{
+ "query": map[string]interface{}{
+ "term": map[string]interface{}{
+ "user": "olivere",
+ },
+ },
+ "sort": []interface{}{"_doc"},
+ },
+ ExpectedTotalHits: 2,
+ ExpectedDocs: 2,
+ ExpectedPages: 2,
+ },
+ }
+
+ for i, tt := range tests {
+ // Should return all documents. Just don't call Do yet!
+ svc := client.Scroll(testIndexName).Size(1).Body(tt.Body)
+
+ pages := 0
+ docs := 0
+
+ for {
+ res, err := svc.Do(context.TODO())
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatalf("#%d: expected results != nil; got nil", i)
+ }
+ if res.Hits == nil {
+ t.Fatalf("#%d: expected results.Hits != nil; got nil", i)
+ }
+ if want, have := tt.ExpectedTotalHits, res.Hits.TotalHits; want != have {
+ t.Fatalf("#%d: expected results.Hits.TotalHits = %d; got %d", i, want, have)
+ }
+ if want, have := 1, len(res.Hits.Hits); want != have {
+ t.Fatalf("#%d: expected len(results.Hits.Hits) = %d; got %d", i, want, have)
+ }
+
+ pages++
+
+ for _, hit := range res.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Fatalf("#%d: expected SearchResult.Hits.Hit.Index = %q; got %q", i, testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatalf("#%d: %v", i, err)
+ }
+ docs++
+ }
+
+ if len(res.ScrollId) == 0 {
+ t.Fatalf("#%d: expected scrollId in results; got %q", i, res.ScrollId)
+ }
+ }
+
+ if want, have := tt.ExpectedPages, pages; want != have {
+ t.Fatalf("#%d: expected to retrieve %d pages; got %d", i, want, have)
+ }
+ if want, have := tt.ExpectedDocs, docs; want != have {
+ t.Fatalf("#%d: expected to retrieve %d hits; got %d", i, want, have)
+ }
+
+ err = svc.Clear(context.TODO())
+ if err != nil {
+ t.Fatalf("#%d: failed to clear scroll context: %v", i, err)
+ }
+
+ _, err = svc.Do(context.TODO())
+ if err == nil {
+ t.Fatalf("#%d: expected to fail", i)
+ }
+ }
+}
+
+func TestScrollWithSlice(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ // Should return all documents. Just don't call Do yet!
+ sliceQuery := NewSliceQuery().Id(0).Max(2)
+ svc := client.Scroll(testIndexName).Slice(sliceQuery).Size(1)
+
+ pages := 0
+ docs := 0
+
+ for {
+ res, err := svc.Do(context.TODO())
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected results != nil; got nil")
+ }
+ if res.Hits == nil {
+ t.Fatal("expected results.Hits != nil; got nil")
+ }
+
+ pages++
+
+ for _, hit := range res.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ docs++
+ }
+
+ if len(res.ScrollId) == 0 {
+ t.Fatalf("expected scrollId in results; got %q", res.ScrollId)
+ }
+ }
+
+ if pages == 0 {
+ t.Fatal("expected to retrieve some pages")
+ }
+ if docs == 0 {
+ t.Fatal("expected to retrieve some hits")
+ }
+
+ if err := svc.Clear(context.TODO()); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := svc.Do(context.TODO()); err == nil {
+ t.Fatal("expected to fail")
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search.go b/vendor/github.com/olivere/elastic/search.go
new file mode 100644
index 000000000..12d51bf1f
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search.go
@@ -0,0 +1,581 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "reflect"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// Search for documents in Elasticsearch.
+type SearchService struct {
+ client *Client
+ searchSource *SearchSource
+ source interface{}
+ pretty bool
+ filterPath []string
+ searchType string
+ index []string
+ typ []string
+ routing string
+ preference string
+ requestCache *bool
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+}
+
+// NewSearchService creates a new service for searching in Elasticsearch.
+func NewSearchService(client *Client) *SearchService {
+ builder := &SearchService{
+ client: client,
+ searchSource: NewSearchSource(),
+ }
+ return builder
+}
+
+// SearchSource sets the search source builder to use with this service.
+func (s *SearchService) SearchSource(searchSource *SearchSource) *SearchService {
+ s.searchSource = searchSource
+ if s.searchSource == nil {
+ s.searchSource = NewSearchSource()
+ }
+ return s
+}
+
+// Source allows the user to set the request body manually without using
+// any of the structs and interfaces in Elastic.
+func (s *SearchService) Source(source interface{}) *SearchService {
+ s.source = source
+ return s
+}
+
+// FilterPath allows reducing the response, a mechanism known as
+// response filtering and described here:
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/common-options.html#common-options-response-filtering.
+func (s *SearchService) FilterPath(filterPath ...string) *SearchService {
+ s.filterPath = append(s.filterPath, filterPath...)
+ return s
+}
+
+// Index sets the names of the indices to use for search.
+func (s *SearchService) Index(index ...string) *SearchService {
+ s.index = append(s.index, index...)
+ return s
+}
+
+// Types adds search restrictions for a list of types.
+func (s *SearchService) Type(typ ...string) *SearchService {
+ s.typ = append(s.typ, typ...)
+ return s
+}
+
+// Pretty enables the caller to indent the JSON output.
+func (s *SearchService) Pretty(pretty bool) *SearchService {
+ s.pretty = pretty
+ return s
+}
+
+// Timeout sets the timeout to use, e.g. "1s" or "1000ms".
+func (s *SearchService) Timeout(timeout string) *SearchService {
+ s.searchSource = s.searchSource.Timeout(timeout)
+ return s
+}
+
+// Profile sets the Profile API flag on the search source.
+// When enabled, a search executed by this service will return query
+// profiling data.
+func (s *SearchService) Profile(profile bool) *SearchService {
+ s.searchSource = s.searchSource.Profile(profile)
+ return s
+}
+
+// Collapse adds field collapsing.
+func (s *SearchService) Collapse(collapse *CollapseBuilder) *SearchService {
+ s.searchSource = s.searchSource.Collapse(collapse)
+ return s
+}
+
+// TimeoutInMillis sets the timeout in milliseconds.
+func (s *SearchService) TimeoutInMillis(timeoutInMillis int) *SearchService {
+ s.searchSource = s.searchSource.TimeoutInMillis(timeoutInMillis)
+ return s
+}
+
+// SearchType sets the search operation type. Valid values are:
+// "query_then_fetch", "query_and_fetch", "dfs_query_then_fetch",
+// "dfs_query_and_fetch", "count", "scan".
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-search-type.html
+// for details.
+func (s *SearchService) SearchType(searchType string) *SearchService {
+ s.searchType = searchType
+ return s
+}
+
+// Routing is a list of specific routing values to control the shards
+// the search will be executed on.
+func (s *SearchService) Routing(routings ...string) *SearchService {
+ s.routing = strings.Join(routings, ",")
+ return s
+}
+
+// Preference sets the preference to execute the search. Defaults to
+// randomize across shards ("random"). Can be set to "_local" to prefer
+// local shards, "_primary" to execute on primary shards only,
+// or a custom value which guarantees that the same order will be used
+// across different requests.
+func (s *SearchService) Preference(preference string) *SearchService {
+ s.preference = preference
+ return s
+}
+
+// RequestCache indicates whether the cache should be used for this
+// request or not, defaults to index level setting.
+func (s *SearchService) RequestCache(requestCache bool) *SearchService {
+ s.requestCache = &requestCache
+ return s
+}
+
+// Query sets the query to perform, e.g. MatchAllQuery.
+func (s *SearchService) Query(query Query) *SearchService {
+ s.searchSource = s.searchSource.Query(query)
+ return s
+}
+
+// PostFilter will be executed after the query has been executed and
+// only affects the search hits, not the aggregations.
+// This filter is always executed as the last filtering mechanism.
+func (s *SearchService) PostFilter(postFilter Query) *SearchService {
+ s.searchSource = s.searchSource.PostFilter(postFilter)
+ return s
+}
+
+// FetchSource indicates whether the response should contain the stored
+// _source for every hit.
+func (s *SearchService) FetchSource(fetchSource bool) *SearchService {
+ s.searchSource = s.searchSource.FetchSource(fetchSource)
+ return s
+}
+
+// FetchSourceContext indicates how the _source should be fetched.
+func (s *SearchService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchService {
+ s.searchSource = s.searchSource.FetchSourceContext(fetchSourceContext)
+ return s
+}
+
+// Highlight adds highlighting to the search.
+func (s *SearchService) Highlight(highlight *Highlight) *SearchService {
+ s.searchSource = s.searchSource.Highlight(highlight)
+ return s
+}
+
+// GlobalSuggestText defines the global text to use with all suggesters.
+// This avoids repetition.
+func (s *SearchService) GlobalSuggestText(globalText string) *SearchService {
+ s.searchSource = s.searchSource.GlobalSuggestText(globalText)
+ return s
+}
+
+// Suggester adds a suggester to the search.
+func (s *SearchService) Suggester(suggester Suggester) *SearchService {
+ s.searchSource = s.searchSource.Suggester(suggester)
+ return s
+}
+
+// Aggregation adds an aggreation to perform as part of the search.
+func (s *SearchService) Aggregation(name string, aggregation Aggregation) *SearchService {
+ s.searchSource = s.searchSource.Aggregation(name, aggregation)
+ return s
+}
+
+// MinScore sets the minimum score below which docs will be filtered out.
+func (s *SearchService) MinScore(minScore float64) *SearchService {
+ s.searchSource = s.searchSource.MinScore(minScore)
+ return s
+}
+
+// From index to start the search from. Defaults to 0.
+func (s *SearchService) From(from int) *SearchService {
+ s.searchSource = s.searchSource.From(from)
+ return s
+}
+
+// Size is the number of search hits to return. Defaults to 10.
+func (s *SearchService) Size(size int) *SearchService {
+ s.searchSource = s.searchSource.Size(size)
+ return s
+}
+
+// Explain indicates whether each search hit should be returned with
+// an explanation of the hit (ranking).
+func (s *SearchService) Explain(explain bool) *SearchService {
+ s.searchSource = s.searchSource.Explain(explain)
+ return s
+}
+
+// Version indicates whether each search hit should be returned with
+// a version associated to it.
+func (s *SearchService) Version(version bool) *SearchService {
+ s.searchSource = s.searchSource.Version(version)
+ return s
+}
+
+// Sort adds a sort order.
+func (s *SearchService) Sort(field string, ascending bool) *SearchService {
+ s.searchSource = s.searchSource.Sort(field, ascending)
+ return s
+}
+
+// SortWithInfo adds a sort order.
+func (s *SearchService) SortWithInfo(info SortInfo) *SearchService {
+ s.searchSource = s.searchSource.SortWithInfo(info)
+ return s
+}
+
+// SortBy adds a sort order.
+func (s *SearchService) SortBy(sorter ...Sorter) *SearchService {
+ s.searchSource = s.searchSource.SortBy(sorter...)
+ return s
+}
+
+// NoStoredFields indicates that no stored fields should be loaded, resulting in only
+// id and type to be returned per field.
+func (s *SearchService) NoStoredFields() *SearchService {
+ s.searchSource = s.searchSource.NoStoredFields()
+ return s
+}
+
+// StoredField adds a single field to load and return (note, must be stored) as
+// part of the search request. If none are specified, the source of the
+// document will be returned.
+func (s *SearchService) StoredField(fieldName string) *SearchService {
+ s.searchSource = s.searchSource.StoredField(fieldName)
+ return s
+}
+
+// StoredFields sets the fields to load and return as part of the search request.
+// If none are specified, the source of the document will be returned.
+func (s *SearchService) StoredFields(fields ...string) *SearchService {
+ s.searchSource = s.searchSource.StoredFields(fields...)
+ return s
+}
+
+// TrackScores is applied when sorting and controls if scores will be
+// tracked as well. Defaults to false.
+func (s *SearchService) TrackScores(trackScores bool) *SearchService {
+ s.searchSource = s.searchSource.TrackScores(trackScores)
+ return s
+}
+
+// SearchAfter allows a different form of pagination by using a live cursor,
+// using the results of the previous page to help the retrieval of the next.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-search-after.html
+func (s *SearchService) SearchAfter(sortValues ...interface{}) *SearchService {
+ s.searchSource = s.searchSource.SearchAfter(sortValues...)
+ return s
+}
+
+// IgnoreUnavailable indicates whether the specified concrete indices
+// should be ignored when unavailable (missing or closed).
+func (s *SearchService) IgnoreUnavailable(ignoreUnavailable bool) *SearchService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices. (This includes `_all` string
+// or when no indices have been specified).
+func (s *SearchService) AllowNoIndices(allowNoIndices bool) *SearchService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *SearchService) ExpandWildcards(expandWildcards string) *SearchService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *SearchService) buildURL() (string, url.Values, error) {
+ var err error
+ var path string
+
+ if len(s.index) > 0 && len(s.typ) > 0 {
+ path, err = uritemplates.Expand("/{index}/{type}/_search", map[string]string{
+ "index": strings.Join(s.index, ","),
+ "type": strings.Join(s.typ, ","),
+ })
+ } else if len(s.index) > 0 {
+ path, err = uritemplates.Expand("/{index}/_search", map[string]string{
+ "index": strings.Join(s.index, ","),
+ })
+ } else if len(s.typ) > 0 {
+ path, err = uritemplates.Expand("/_all/{type}/_search", map[string]string{
+ "type": strings.Join(s.typ, ","),
+ })
+ } else {
+ path = "/_search"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+ if s.searchType != "" {
+ params.Set("search_type", s.searchType)
+ }
+ if s.routing != "" {
+ params.Set("routing", s.routing)
+ }
+ if s.preference != "" {
+ params.Set("preference", s.preference)
+ }
+ if s.requestCache != nil {
+ params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if len(s.filterPath) > 0 {
+ params.Set("filter_path", strings.Join(s.filterPath, ","))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *SearchService) Validate() error {
+ return nil
+}
+
+// Do executes the search and returns a SearchResult.
+func (s *SearchService) Do(ctx context.Context) (*SearchResult, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Perform request
+ var body interface{}
+ if s.source != nil {
+ body = s.source
+ } else {
+ src, err := s.searchSource.Source()
+ if err != nil {
+ return nil, err
+ }
+ body = src
+ }
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return search results
+ ret := new(SearchResult)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// SearchResult is the result of a search in Elasticsearch.
+type SearchResult struct {
+ TookInMillis int64 `json:"took"` // search time in milliseconds
+ ScrollId string `json:"_scroll_id"` // only used with Scroll and Scan operations
+ Hits *SearchHits `json:"hits"` // the actual search hits
+ Suggest SearchSuggest `json:"suggest"` // results from suggesters
+ Aggregations Aggregations `json:"aggregations"` // results from aggregations
+ TimedOut bool `json:"timed_out"` // true if the search timed out
+ Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet
+ Profile *SearchProfile `json:"profile,omitempty"` // profiling results, if optional Profile API was active for this search
+ Shards *shardsInfo `json:"_shards,omitempty"` // shard information
+}
+
+// TotalHits is a convenience function to return the number of hits for
+// a search result.
+func (r *SearchResult) TotalHits() int64 {
+ if r.Hits != nil {
+ return r.Hits.TotalHits
+ }
+ return 0
+}
+
+// Each is a utility function to iterate over all hits. It saves you from
+// checking for nil values. Notice that Each will ignore errors in
+// serializing JSON and hits with empty/nil _source will get an empty
+// value
+func (r *SearchResult) Each(typ reflect.Type) []interface{} {
+ if r.Hits == nil || r.Hits.Hits == nil || len(r.Hits.Hits) == 0 {
+ return nil
+ }
+ var slice []interface{}
+ for _, hit := range r.Hits.Hits {
+ v := reflect.New(typ).Elem()
+ if hit.Source == nil {
+ slice = append(slice, v.Interface())
+ continue
+ }
+ if err := json.Unmarshal(*hit.Source, v.Addr().Interface()); err == nil {
+ slice = append(slice, v.Interface())
+ }
+ }
+ return slice
+}
+
+// SearchHits specifies the list of search hits.
+type SearchHits struct {
+ TotalHits int64 `json:"total"` // total number of hits found
+ MaxScore *float64 `json:"max_score"` // maximum score of all hits
+ Hits []*SearchHit `json:"hits"` // the actual hits returned
+}
+
+// SearchHit is a single hit.
+type SearchHit struct {
+ Score *float64 `json:"_score"` // computed score
+ Index string `json:"_index"` // index name
+ Type string `json:"_type"` // type meta field
+ Id string `json:"_id"` // external or internal
+ Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields)
+ Routing string `json:"_routing"` // routing meta field
+ Parent string `json:"_parent"` // parent meta field
+ Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService
+ Sort []interface{} `json:"sort"` // sort information
+ Highlight SearchHitHighlight `json:"highlight"` // highlighter information
+ Source *json.RawMessage `json:"_source"` // stored document source
+ Fields map[string]interface{} `json:"fields"` // returned (stored) fields
+ Explanation *SearchExplanation `json:"_explanation"` // explains how the score was computed
+ MatchedQueries []string `json:"matched_queries"` // matched queries
+ InnerHits map[string]*SearchHitInnerHits `json:"inner_hits"` // inner hits with ES >= 1.5.0
+
+ // Shard
+ // HighlightFields
+ // SortValues
+ // MatchedFilters
+}
+
+type SearchHitInnerHits struct {
+ Hits *SearchHits `json:"hits"`
+}
+
+// SearchExplanation explains how the score for a hit was computed.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-explain.html.
+type SearchExplanation struct {
+ Value float64 `json:"value"` // e.g. 1.0
+ Description string `json:"description"` // e.g. "boost" or "ConstantScore(*:*), product of:"
+ Details []SearchExplanation `json:"details,omitempty"` // recursive details
+}
+
+// Suggest
+
+// SearchSuggest is a map of suggestions.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters.html.
+type SearchSuggest map[string][]SearchSuggestion
+
+// SearchSuggestion is a single search suggestion.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters.html.
+type SearchSuggestion struct {
+ Text string `json:"text"`
+ Offset int `json:"offset"`
+ Length int `json:"length"`
+ Options []SearchSuggestionOption `json:"options"`
+}
+
+// SearchSuggestionOption is an option of a SearchSuggestion.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters.html.
+type SearchSuggestionOption struct {
+ Text string `json:"text"`
+ Index string `json:"_index"`
+ Type string `json:"_type"`
+ Id string `json:"_id"`
+ Score float64 `json:"score"`
+ Highlighted string `json:"highlighted"`
+ CollateMatch bool `json:"collate_match"`
+ Freq int `json:"freq"` // from TermSuggestion.Option in Java API
+ Source *json.RawMessage `json:"_source"`
+}
+
+// SearchProfile is a list of shard profiling data collected during
+// query execution in the "profile" section of a SearchResult
+type SearchProfile struct {
+ Shards []SearchProfileShardResult `json:"shards"`
+}
+
+// SearchProfileShardResult returns the profiling data for a single shard
+// accessed during the search query or aggregation.
+type SearchProfileShardResult struct {
+ ID string `json:"id"`
+ Searches []QueryProfileShardResult `json:"searches"`
+ Aggregations []ProfileResult `json:"aggregations"`
+}
+
+// QueryProfileShardResult is a container class to hold the profile results
+// for a single shard in the request. It comtains a list of query profiles,
+// a collector tree and a total rewrite tree.
+type QueryProfileShardResult struct {
+ Query []ProfileResult `json:"query,omitempty"`
+ RewriteTime int64 `json:"rewrite_time,omitempty"`
+ Collector []interface{} `json:"collector,omitempty"`
+}
+
+// CollectorResult holds the profile timings of the collectors used in the
+// search. Children's CollectorResults may be embedded inside of a parent
+// CollectorResult.
+type CollectorResult struct {
+ Name string `json:"name,omitempty"`
+ Reason string `json:"reason,omitempty"`
+ Time string `json:"time,omitempty"`
+ TimeNanos int64 `json:"time_in_nanos,omitempty"`
+ Children []CollectorResult `json:"children,omitempty"`
+}
+
+// ProfileResult is the internal representation of a profiled query,
+// corresponding to a single node in the query tree.
+type ProfileResult struct {
+ Type string `json:"type"`
+ Description string `json:"description,omitempty"`
+ NodeTime string `json:"time,omitempty"`
+ NodeTimeNanos int64 `json:"time_in_nanos,omitempty"`
+ Breakdown map[string]int64 `json:"breakdown,omitempty"`
+ Children []ProfileResult `json:"children,omitempty"`
+}
+
+// Aggregations (see search_aggs.go)
+
+// Highlighting
+
+// SearchHitHighlight is the highlight information of a search hit.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-highlighting.html
+// for a general discussion of highlighting.
+type SearchHitHighlight map[string][]string
diff --git a/vendor/github.com/olivere/elastic/search_aggs.go b/vendor/github.com/olivere/elastic/search_aggs.go
new file mode 100644
index 000000000..c5082b2b1
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs.go
@@ -0,0 +1,1450 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+// Aggregations can be seen as a unit-of-work that build
+// analytic information over a set of documents. It is
+// (in many senses) the follow-up of facets in Elasticsearch.
+// For more details about aggregations, visit:
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations.html
+type Aggregation interface {
+ // Source returns a JSON-serializable aggregation that is a fragment
+ // of the request sent to Elasticsearch.
+ Source() (interface{}, error)
+}
+
+// Aggregations is a list of aggregations that are part of a search result.
+type Aggregations map[string]*json.RawMessage
+
+// Min returns min aggregation results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-min-aggregation.html
+func (a Aggregations) Min(name string) (*AggregationValueMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationValueMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Max returns max aggregation results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-max-aggregation.html
+func (a Aggregations) Max(name string) (*AggregationValueMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationValueMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Sum returns sum aggregation results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-sum-aggregation.html
+func (a Aggregations) Sum(name string) (*AggregationValueMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationValueMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Avg returns average aggregation results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-avg-aggregation.html
+func (a Aggregations) Avg(name string) (*AggregationValueMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationValueMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// ValueCount returns value-count aggregation results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-valuecount-aggregation.html
+func (a Aggregations) ValueCount(name string) (*AggregationValueMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationValueMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Cardinality returns cardinality aggregation results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-cardinality-aggregation.html
+func (a Aggregations) Cardinality(name string) (*AggregationValueMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationValueMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Stats returns stats aggregation results.
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-stats-aggregation.html
+func (a Aggregations) Stats(name string) (*AggregationStatsMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationStatsMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// ExtendedStats returns extended stats aggregation results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-extendedstats-aggregation.html
+func (a Aggregations) ExtendedStats(name string) (*AggregationExtendedStatsMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationExtendedStatsMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// MatrixStats returns matrix stats aggregation results.
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-matrix-stats-aggregation.html
+func (a Aggregations) MatrixStats(name string) (*AggregationMatrixStats, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationMatrixStats)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Percentiles returns percentiles results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-percentile-aggregation.html
+func (a Aggregations) Percentiles(name string) (*AggregationPercentilesMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationPercentilesMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// PercentileRanks returns percentile ranks results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-percentile-rank-aggregation.html
+func (a Aggregations) PercentileRanks(name string) (*AggregationPercentilesMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationPercentilesMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// TopHits returns top-hits aggregation results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-top-hits-aggregation.html
+func (a Aggregations) TopHits(name string) (*AggregationTopHitsMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationTopHitsMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Global returns global results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-global-aggregation.html
+func (a Aggregations) Global(name string) (*AggregationSingleBucket, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationSingleBucket)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Filter returns filter results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-filter-aggregation.html
+func (a Aggregations) Filter(name string) (*AggregationSingleBucket, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationSingleBucket)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Filters returns filters results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-filters-aggregation.html
+func (a Aggregations) Filters(name string) (*AggregationBucketFilters, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketFilters)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Missing returns missing results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-missing-aggregation.html
+func (a Aggregations) Missing(name string) (*AggregationSingleBucket, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationSingleBucket)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Nested returns nested results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-nested-aggregation.html
+func (a Aggregations) Nested(name string) (*AggregationSingleBucket, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationSingleBucket)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// ReverseNested returns reverse-nested results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-reverse-nested-aggregation.html
+func (a Aggregations) ReverseNested(name string) (*AggregationSingleBucket, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationSingleBucket)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Children returns children results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-children-aggregation.html
+func (a Aggregations) Children(name string) (*AggregationSingleBucket, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationSingleBucket)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Terms returns terms aggregation results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-terms-aggregation.html
+func (a Aggregations) Terms(name string) (*AggregationBucketKeyItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketKeyItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// SignificantTerms returns significant terms aggregation results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html
+func (a Aggregations) SignificantTerms(name string) (*AggregationBucketSignificantTerms, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketSignificantTerms)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Sampler returns sampler aggregation results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-sampler-aggregation.html
+func (a Aggregations) Sampler(name string) (*AggregationSingleBucket, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationSingleBucket)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Range returns range aggregation results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-range-aggregation.html
+func (a Aggregations) Range(name string) (*AggregationBucketRangeItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketRangeItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// KeyedRange returns keyed range aggregation results.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-range-aggregation.html.
+func (a Aggregations) KeyedRange(name string) (*AggregationBucketKeyedRangeItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketKeyedRangeItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// DateRange returns date range aggregation results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-daterange-aggregation.html
+func (a Aggregations) DateRange(name string) (*AggregationBucketRangeItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketRangeItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// IPRange returns IP range aggregation results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-iprange-aggregation.html
+func (a Aggregations) IPRange(name string) (*AggregationBucketRangeItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketRangeItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Histogram returns histogram aggregation results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-histogram-aggregation.html
+func (a Aggregations) Histogram(name string) (*AggregationBucketHistogramItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketHistogramItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// DateHistogram returns date histogram aggregation results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-datehistogram-aggregation.html
+func (a Aggregations) DateHistogram(name string) (*AggregationBucketHistogramItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketHistogramItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// GeoBounds returns geo-bounds aggregation results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-geobounds-aggregation.html
+func (a Aggregations) GeoBounds(name string) (*AggregationGeoBoundsMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationGeoBoundsMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// GeoHash returns geo-hash aggregation results.
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-geohashgrid-aggregation.html
+func (a Aggregations) GeoHash(name string) (*AggregationBucketKeyItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketKeyItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// GeoDistance returns geo distance aggregation results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-geodistance-aggregation.html
+func (a Aggregations) GeoDistance(name string) (*AggregationBucketRangeItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketRangeItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// AvgBucket returns average bucket pipeline aggregation results.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-avg-bucket-aggregation.html
+func (a Aggregations) AvgBucket(name string) (*AggregationPipelineSimpleValue, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationPipelineSimpleValue)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// SumBucket returns sum bucket pipeline aggregation results.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-sum-bucket-aggregation.html
+func (a Aggregations) SumBucket(name string) (*AggregationPipelineSimpleValue, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationPipelineSimpleValue)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// StatsBucket returns stats bucket pipeline aggregation results.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-stats-bucket-aggregation.html
+func (a Aggregations) StatsBucket(name string) (*AggregationPipelineStatsMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationPipelineStatsMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// PercentilesBucket returns stats bucket pipeline aggregation results.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-percentiles-bucket-aggregation.html
+func (a Aggregations) PercentilesBucket(name string) (*AggregationPipelinePercentilesMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationPipelinePercentilesMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// MaxBucket returns maximum bucket pipeline aggregation results.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-max-bucket-aggregation.html
+func (a Aggregations) MaxBucket(name string) (*AggregationPipelineBucketMetricValue, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationPipelineBucketMetricValue)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// MinBucket returns minimum bucket pipeline aggregation results.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-min-bucket-aggregation.html
+func (a Aggregations) MinBucket(name string) (*AggregationPipelineBucketMetricValue, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationPipelineBucketMetricValue)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// MovAvg returns moving average pipeline aggregation results.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html
+func (a Aggregations) MovAvg(name string) (*AggregationPipelineSimpleValue, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationPipelineSimpleValue)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Derivative returns derivative pipeline aggregation results.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-derivative-aggregation.html
+func (a Aggregations) Derivative(name string) (*AggregationPipelineDerivative, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationPipelineDerivative)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// CumulativeSum returns a cumulative sum pipeline aggregation results.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-cumulative-sum-aggregation.html
+func (a Aggregations) CumulativeSum(name string) (*AggregationPipelineSimpleValue, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationPipelineSimpleValue)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// BucketScript returns bucket script pipeline aggregation results.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-bucket-script-aggregation.html
+func (a Aggregations) BucketScript(name string) (*AggregationPipelineSimpleValue, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationPipelineSimpleValue)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// SerialDiff returns serial differencing pipeline aggregation results.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-serialdiff-aggregation.html
+func (a Aggregations) SerialDiff(name string) (*AggregationPipelineSimpleValue, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationPipelineSimpleValue)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// -- Single value metric --
+
+// AggregationValueMetric is a single-value metric, returned e.g. by a
+// Min or Max aggregation.
+type AggregationValueMetric struct {
+ Aggregations
+
+ Value *float64 //`json:"value"`
+ Meta map[string]interface{} // `json:"meta,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationValueMetric structure.
+func (a *AggregationValueMetric) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["value"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Value)
+ }
+ if v, ok := aggs["meta"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Meta)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Stats metric --
+
+// AggregationStatsMetric is a multi-value metric, returned by a Stats aggregation.
+type AggregationStatsMetric struct {
+ Aggregations
+
+ Count int64 // `json:"count"`
+ Min *float64 //`json:"min,omitempty"`
+ Max *float64 //`json:"max,omitempty"`
+ Avg *float64 //`json:"avg,omitempty"`
+ Sum *float64 //`json:"sum,omitempty"`
+ Meta map[string]interface{} // `json:"meta,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationStatsMetric structure.
+func (a *AggregationStatsMetric) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Count)
+ }
+ if v, ok := aggs["min"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Min)
+ }
+ if v, ok := aggs["max"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Max)
+ }
+ if v, ok := aggs["avg"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Avg)
+ }
+ if v, ok := aggs["sum"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Sum)
+ }
+ if v, ok := aggs["meta"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Meta)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Extended stats metric --
+
+// AggregationExtendedStatsMetric is a multi-value metric, returned by an ExtendedStats aggregation.
+type AggregationExtendedStatsMetric struct {
+ Aggregations
+
+ Count int64 // `json:"count"`
+ Min *float64 //`json:"min,omitempty"`
+ Max *float64 //`json:"max,omitempty"`
+ Avg *float64 //`json:"avg,omitempty"`
+ Sum *float64 //`json:"sum,omitempty"`
+ SumOfSquares *float64 //`json:"sum_of_squares,omitempty"`
+ Variance *float64 //`json:"variance,omitempty"`
+ StdDeviation *float64 //`json:"std_deviation,omitempty"`
+ Meta map[string]interface{} // `json:"meta,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationExtendedStatsMetric structure.
+func (a *AggregationExtendedStatsMetric) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Count)
+ }
+ if v, ok := aggs["min"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Min)
+ }
+ if v, ok := aggs["max"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Max)
+ }
+ if v, ok := aggs["avg"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Avg)
+ }
+ if v, ok := aggs["sum"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Sum)
+ }
+ if v, ok := aggs["sum_of_squares"]; ok && v != nil {
+ json.Unmarshal(*v, &a.SumOfSquares)
+ }
+ if v, ok := aggs["variance"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Variance)
+ }
+ if v, ok := aggs["std_deviation"]; ok && v != nil {
+ json.Unmarshal(*v, &a.StdDeviation)
+ }
+ if v, ok := aggs["meta"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Meta)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Matrix Stats --
+
+// AggregationMatrixStats is returned by a MatrixStats aggregation.
+type AggregationMatrixStats struct {
+ Aggregations
+
+ Fields []*AggregationMatrixStatsField // `json:"field,omitempty"`
+ Meta map[string]interface{} // `json:"meta,omitempty"`
+}
+
+// AggregationMatrixStatsField represents running stats of a single field
+// returned from MatrixStats aggregation.
+type AggregationMatrixStatsField struct {
+ Name string `json:"name"`
+ Count int64 `json:"count"`
+ Mean float64 `json:"mean,omitempty"`
+ Variance float64 `json:"variance,omitempty"`
+ Skewness float64 `json:"skewness,omitempty"`
+ Kurtosis float64 `json:"kurtosis,omitempty"`
+ Covariance map[string]float64 `json:"covariance,omitempty"`
+ Correlation map[string]float64 `json:"correlation,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationMatrixStats structure.
+func (a *AggregationMatrixStats) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["fields"]; ok && v != nil {
+ // RunningStats for every field
+ json.Unmarshal(*v, &a.Fields)
+ }
+ if v, ok := aggs["meta"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Meta)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Percentiles metric --
+
+// AggregationPercentilesMetric is a multi-value metric, returned by a Percentiles aggregation.
+type AggregationPercentilesMetric struct {
+ Aggregations
+
+ Values map[string]float64 // `json:"values"`
+ Meta map[string]interface{} // `json:"meta,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationPercentilesMetric structure.
+func (a *AggregationPercentilesMetric) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["values"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Values)
+ }
+ if v, ok := aggs["meta"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Meta)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Top-hits metric --
+
+// AggregationTopHitsMetric is a metric returned by a TopHits aggregation.
+type AggregationTopHitsMetric struct {
+ Aggregations
+
+ Hits *SearchHits //`json:"hits"`
+ Meta map[string]interface{} // `json:"meta,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationTopHitsMetric structure.
+func (a *AggregationTopHitsMetric) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ a.Aggregations = aggs
+ a.Hits = new(SearchHits)
+ if v, ok := aggs["hits"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Hits)
+ }
+ if v, ok := aggs["meta"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Meta)
+ }
+ return nil
+}
+
+// -- Geo-bounds metric --
+
+// AggregationGeoBoundsMetric is a metric as returned by a GeoBounds aggregation.
+type AggregationGeoBoundsMetric struct {
+ Aggregations
+
+ Bounds struct {
+ TopLeft struct {
+ Latitude float64 `json:"lat"`
+ Longitude float64 `json:"lon"`
+ } `json:"top_left"`
+ BottomRight struct {
+ Latitude float64 `json:"lat"`
+ Longitude float64 `json:"lon"`
+ } `json:"bottom_right"`
+ } `json:"bounds"`
+
+ Meta map[string]interface{} // `json:"meta,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationGeoBoundsMetric structure.
+func (a *AggregationGeoBoundsMetric) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["bounds"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Bounds)
+ }
+ if v, ok := aggs["meta"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Meta)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Single bucket --
+
+// AggregationSingleBucket is a single bucket, returned e.g. via an aggregation of type Global.
+type AggregationSingleBucket struct {
+ Aggregations
+
+ DocCount int64 // `json:"doc_count"`
+ Meta map[string]interface{} // `json:"meta,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationSingleBucket structure.
+func (a *AggregationSingleBucket) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCount)
+ }
+ if v, ok := aggs["meta"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Meta)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Bucket range items --
+
+// AggregationBucketRangeItems is a bucket aggregation that is e.g. returned
+// with a range aggregation.
+type AggregationBucketRangeItems struct {
+ Aggregations
+
+ DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"`
+ SumOfOtherDocCount int64 //`json:"sum_other_doc_count"`
+ Buckets []*AggregationBucketRangeItem //`json:"buckets"`
+ Meta map[string]interface{} // `json:"meta,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure.
+func (a *AggregationBucketRangeItems) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCountErrorUpperBound)
+ }
+ if v, ok := aggs["sum_other_doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.SumOfOtherDocCount)
+ }
+ if v, ok := aggs["buckets"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Buckets)
+ }
+ if v, ok := aggs["meta"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Meta)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// AggregationBucketKeyedRangeItems is a bucket aggregation that is e.g. returned
+// with a keyed range aggregation.
+type AggregationBucketKeyedRangeItems struct {
+ Aggregations
+
+ DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"`
+ SumOfOtherDocCount int64 //`json:"sum_other_doc_count"`
+ Buckets map[string]*AggregationBucketRangeItem //`json:"buckets"`
+ Meta map[string]interface{} // `json:"meta,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure.
+func (a *AggregationBucketKeyedRangeItems) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCountErrorUpperBound)
+ }
+ if v, ok := aggs["sum_other_doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.SumOfOtherDocCount)
+ }
+ if v, ok := aggs["buckets"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Buckets)
+ }
+ if v, ok := aggs["meta"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Meta)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// AggregationBucketRangeItem is a single bucket of an AggregationBucketRangeItems structure.
+type AggregationBucketRangeItem struct {
+ Aggregations
+
+ Key string //`json:"key"`
+ DocCount int64 //`json:"doc_count"`
+ From *float64 //`json:"from"`
+ FromAsString string //`json:"from_as_string"`
+ To *float64 //`json:"to"`
+ ToAsString string //`json:"to_as_string"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItem structure.
+func (a *AggregationBucketRangeItem) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["key"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Key)
+ }
+ if v, ok := aggs["doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCount)
+ }
+ if v, ok := aggs["from"]; ok && v != nil {
+ json.Unmarshal(*v, &a.From)
+ }
+ if v, ok := aggs["from_as_string"]; ok && v != nil {
+ json.Unmarshal(*v, &a.FromAsString)
+ }
+ if v, ok := aggs["to"]; ok && v != nil {
+ json.Unmarshal(*v, &a.To)
+ }
+ if v, ok := aggs["to_as_string"]; ok && v != nil {
+ json.Unmarshal(*v, &a.ToAsString)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Bucket key items --
+
+// AggregationBucketKeyItems is a bucket aggregation that is e.g. returned
+// with a terms aggregation.
+type AggregationBucketKeyItems struct {
+ Aggregations
+
+ DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"`
+ SumOfOtherDocCount int64 //`json:"sum_other_doc_count"`
+ Buckets []*AggregationBucketKeyItem //`json:"buckets"`
+ Meta map[string]interface{} // `json:"meta,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItems structure.
+func (a *AggregationBucketKeyItems) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCountErrorUpperBound)
+ }
+ if v, ok := aggs["sum_other_doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.SumOfOtherDocCount)
+ }
+ if v, ok := aggs["buckets"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Buckets)
+ }
+ if v, ok := aggs["meta"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Meta)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// AggregationBucketKeyItem is a single bucket of an AggregationBucketKeyItems structure.
+type AggregationBucketKeyItem struct {
+ Aggregations
+
+ Key interface{} //`json:"key"`
+ KeyAsString *string //`json:"key_as_string"`
+ KeyNumber json.Number
+ DocCount int64 //`json:"doc_count"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItem structure.
+func (a *AggregationBucketKeyItem) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ dec := json.NewDecoder(bytes.NewReader(data))
+ dec.UseNumber()
+ if err := dec.Decode(&aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["key"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Key)
+ json.Unmarshal(*v, &a.KeyNumber)
+ }
+ if v, ok := aggs["key_as_string"]; ok && v != nil {
+ json.Unmarshal(*v, &a.KeyAsString)
+ }
+ if v, ok := aggs["doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCount)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Bucket types for significant terms --
+
+// AggregationBucketSignificantTerms is a bucket aggregation returned
+// with a significant terms aggregation.
+type AggregationBucketSignificantTerms struct {
+ Aggregations
+
+ DocCount int64 //`json:"doc_count"`
+ Buckets []*AggregationBucketSignificantTerm //`json:"buckets"`
+ Meta map[string]interface{} // `json:"meta,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerms structure.
+func (a *AggregationBucketSignificantTerms) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCount)
+ }
+ if v, ok := aggs["buckets"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Buckets)
+ }
+ if v, ok := aggs["meta"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Meta)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// AggregationBucketSignificantTerm is a single bucket of an AggregationBucketSignificantTerms structure.
+type AggregationBucketSignificantTerm struct {
+ Aggregations
+
+ Key string //`json:"key"`
+ DocCount int64 //`json:"doc_count"`
+ BgCount int64 //`json:"bg_count"`
+ Score float64 //`json:"score"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerm structure.
+func (a *AggregationBucketSignificantTerm) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["key"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Key)
+ }
+ if v, ok := aggs["doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCount)
+ }
+ if v, ok := aggs["bg_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.BgCount)
+ }
+ if v, ok := aggs["score"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Score)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Bucket filters --
+
+// AggregationBucketFilters is a multi-bucket aggregation that is returned
+// with a filters aggregation.
+type AggregationBucketFilters struct {
+ Aggregations
+
+ Buckets []*AggregationBucketKeyItem //`json:"buckets"`
+ NamedBuckets map[string]*AggregationBucketKeyItem //`json:"buckets"`
+ Meta map[string]interface{} // `json:"meta,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketFilters structure.
+func (a *AggregationBucketFilters) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["buckets"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Buckets)
+ json.Unmarshal(*v, &a.NamedBuckets)
+ }
+ if v, ok := aggs["meta"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Meta)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Bucket histogram items --
+
+// AggregationBucketHistogramItems is a bucket aggregation that is returned
+// with a date histogram aggregation.
+type AggregationBucketHistogramItems struct {
+ Aggregations
+
+ Buckets []*AggregationBucketHistogramItem //`json:"buckets"`
+ Meta map[string]interface{} // `json:"meta,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItems structure.
+func (a *AggregationBucketHistogramItems) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["buckets"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Buckets)
+ }
+ if v, ok := aggs["meta"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Meta)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// AggregationBucketHistogramItem is a single bucket of an AggregationBucketHistogramItems structure.
+type AggregationBucketHistogramItem struct {
+ Aggregations
+
+ Key float64 //`json:"key"`
+ KeyAsString *string //`json:"key_as_string"`
+ DocCount int64 //`json:"doc_count"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItem structure.
+func (a *AggregationBucketHistogramItem) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["key"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Key)
+ }
+ if v, ok := aggs["key_as_string"]; ok && v != nil {
+ json.Unmarshal(*v, &a.KeyAsString)
+ }
+ if v, ok := aggs["doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCount)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Pipeline simple value --
+
+// AggregationPipelineSimpleValue is a simple value, returned e.g. by a
+// MovAvg aggregation.
+type AggregationPipelineSimpleValue struct {
+ Aggregations
+
+ Value *float64 // `json:"value"`
+ ValueAsString string // `json:"value_as_string"`
+ Meta map[string]interface{} // `json:"meta,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineSimpleValue structure.
+func (a *AggregationPipelineSimpleValue) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["value"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Value)
+ }
+ if v, ok := aggs["value_as_string"]; ok && v != nil {
+ json.Unmarshal(*v, &a.ValueAsString)
+ }
+ if v, ok := aggs["meta"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Meta)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Pipeline simple value --
+
+// AggregationPipelineBucketMetricValue is a value returned e.g. by a
+// MaxBucket aggregation.
+type AggregationPipelineBucketMetricValue struct {
+ Aggregations
+
+ Keys []interface{} // `json:"keys"`
+ Value *float64 // `json:"value"`
+ ValueAsString string // `json:"value_as_string"`
+ Meta map[string]interface{} // `json:"meta,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineBucketMetricValue structure.
+func (a *AggregationPipelineBucketMetricValue) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["keys"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Keys)
+ }
+ if v, ok := aggs["value"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Value)
+ }
+ if v, ok := aggs["value_as_string"]; ok && v != nil {
+ json.Unmarshal(*v, &a.ValueAsString)
+ }
+ if v, ok := aggs["meta"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Meta)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Pipeline derivative --
+
+// AggregationPipelineDerivative is the value returned by a
+// Derivative aggregation.
+type AggregationPipelineDerivative struct {
+ Aggregations
+
+ Value *float64 // `json:"value"`
+ ValueAsString string // `json:"value_as_string"`
+ NormalizedValue *float64 // `json:"normalized_value"`
+ NormalizedValueAsString string // `json:"normalized_value_as_string"`
+ Meta map[string]interface{} // `json:"meta,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineDerivative structure.
+func (a *AggregationPipelineDerivative) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["value"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Value)
+ }
+ if v, ok := aggs["value_as_string"]; ok && v != nil {
+ json.Unmarshal(*v, &a.ValueAsString)
+ }
+ if v, ok := aggs["normalized_value"]; ok && v != nil {
+ json.Unmarshal(*v, &a.NormalizedValue)
+ }
+ if v, ok := aggs["normalized_value_as_string"]; ok && v != nil {
+ json.Unmarshal(*v, &a.NormalizedValueAsString)
+ }
+ if v, ok := aggs["meta"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Meta)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Pipeline stats metric --
+
+// AggregationPipelineStatsMetric is a simple value, returned e.g. by a
+// MovAvg aggregation.
+type AggregationPipelineStatsMetric struct {
+ Aggregations
+
+ Count int64 // `json:"count"`
+ CountAsString string // `json:"count_as_string"`
+ Min *float64 // `json:"min"`
+ MinAsString string // `json:"min_as_string"`
+ Max *float64 // `json:"max"`
+ MaxAsString string // `json:"max_as_string"`
+ Avg *float64 // `json:"avg"`
+ AvgAsString string // `json:"avg_as_string"`
+ Sum *float64 // `json:"sum"`
+ SumAsString string // `json:"sum_as_string"`
+
+ Meta map[string]interface{} // `json:"meta,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineStatsMetric structure.
+func (a *AggregationPipelineStatsMetric) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Count)
+ }
+ if v, ok := aggs["count_as_string"]; ok && v != nil {
+ json.Unmarshal(*v, &a.CountAsString)
+ }
+ if v, ok := aggs["min"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Min)
+ }
+ if v, ok := aggs["min_as_string"]; ok && v != nil {
+ json.Unmarshal(*v, &a.MinAsString)
+ }
+ if v, ok := aggs["max"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Max)
+ }
+ if v, ok := aggs["max_as_string"]; ok && v != nil {
+ json.Unmarshal(*v, &a.MaxAsString)
+ }
+ if v, ok := aggs["avg"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Avg)
+ }
+ if v, ok := aggs["avg_as_string"]; ok && v != nil {
+ json.Unmarshal(*v, &a.AvgAsString)
+ }
+ if v, ok := aggs["sum"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Sum)
+ }
+ if v, ok := aggs["sum_as_string"]; ok && v != nil {
+ json.Unmarshal(*v, &a.SumAsString)
+ }
+ if v, ok := aggs["meta"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Meta)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Pipeline percentiles
+
+// AggregationPipelinePercentilesMetric is the value returned by a pipeline
+// percentiles Metric aggregation
+type AggregationPipelinePercentilesMetric struct {
+ Aggregations
+
+ Values map[string]float64 // `json:"values"`
+ Meta map[string]interface{} // `json:"meta,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationPipelinePercentilesMetric structure.
+func (a *AggregationPipelinePercentilesMetric) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["values"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Values)
+ }
+ if v, ok := aggs["meta"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Meta)
+ }
+ a.Aggregations = aggs
+ return nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_children.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_children.go
new file mode 100644
index 000000000..08623a58e
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_children.go
@@ -0,0 +1,76 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// ChildrenAggregation is a special single bucket aggregation that enables
+// aggregating from buckets on parent document types to buckets on child documents.
+// It is available from 1.4.0.Beta1 upwards.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-children-aggregation.html
+type ChildrenAggregation struct {
+ typ string
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+}
+
+func NewChildrenAggregation() *ChildrenAggregation {
+ return &ChildrenAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+func (a *ChildrenAggregation) Type(typ string) *ChildrenAggregation {
+ a.typ = typ
+ return a
+}
+
+func (a *ChildrenAggregation) SubAggregation(name string, subAggregation Aggregation) *ChildrenAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *ChildrenAggregation) Meta(metaData map[string]interface{}) *ChildrenAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *ChildrenAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "to-answers" : {
+ // "children": {
+ // "type" : "answer"
+ // }
+ // }
+ // }
+ // }
+ // This method returns only the { "type" : ... } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["children"] = opts
+ opts["type"] = a.typ
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_children_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_children_test.go
new file mode 100644
index 000000000..0486079a9
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_children_test.go
@@ -0,0 +1,46 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestChildrenAggregation(t *testing.T) {
+ agg := NewChildrenAggregation().Type("answer")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"children":{"type":"answer"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestChildrenAggregationWithSubAggregation(t *testing.T) {
+ subAgg := NewTermsAggregation().Field("owner.display_name").Size(10)
+ agg := NewChildrenAggregation().Type("answer")
+ agg = agg.SubAggregation("top-names", subAgg)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"aggregations":{"top-names":{"terms":{"field":"owner.display_name","size":10}}},"children":{"type":"answer"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_count_thresholds.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_count_thresholds.go
new file mode 100644
index 000000000..53efdaf5f
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_count_thresholds.go
@@ -0,0 +1,13 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// BucketCountThresholds is used in e.g. terms and significant text aggregations.
+type BucketCountThresholds struct {
+ MinDocCount *int64
+ ShardMinDocCount *int64
+ RequiredSize *int
+ ShardSize *int
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_date_histogram.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_date_histogram.go
new file mode 100644
index 000000000..1e7a1246c
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_date_histogram.go
@@ -0,0 +1,285 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// DateHistogramAggregation is a multi-bucket aggregation similar to the
+// histogram except it can only be applied on date values.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-datehistogram-aggregation.html
+type DateHistogramAggregation struct {
+ field string
+ script *Script
+ missing interface{}
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+
+ interval string
+ order string
+ orderAsc bool
+ minDocCount *int64
+ extendedBoundsMin interface{}
+ extendedBoundsMax interface{}
+ timeZone string
+ format string
+ offset string
+}
+
+// NewDateHistogramAggregation creates a new DateHistogramAggregation.
+func NewDateHistogramAggregation() *DateHistogramAggregation {
+ return &DateHistogramAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+// Field on which the aggregation is processed.
+func (a *DateHistogramAggregation) Field(field string) *DateHistogramAggregation {
+ a.field = field
+ return a
+}
+
+func (a *DateHistogramAggregation) Script(script *Script) *DateHistogramAggregation {
+ a.script = script
+ return a
+}
+
+// Missing configures the value to use when documents miss a value.
+func (a *DateHistogramAggregation) Missing(missing interface{}) *DateHistogramAggregation {
+ a.missing = missing
+ return a
+}
+
+func (a *DateHistogramAggregation) SubAggregation(name string, subAggregation Aggregation) *DateHistogramAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *DateHistogramAggregation) Meta(metaData map[string]interface{}) *DateHistogramAggregation {
+ a.meta = metaData
+ return a
+}
+
+// Interval by which the aggregation gets processed.
+// Allowed values are: "year", "quarter", "month", "week", "day",
+// "hour", "minute". It also supports time settings like "1.5h"
+// (up to "w" for weeks).
+func (a *DateHistogramAggregation) Interval(interval string) *DateHistogramAggregation {
+ a.interval = interval
+ return a
+}
+
+// Order specifies the sort order. Valid values for order are:
+// "_key", "_count", a sub-aggregation name, or a sub-aggregation name
+// with a metric.
+func (a *DateHistogramAggregation) Order(order string, asc bool) *DateHistogramAggregation {
+ a.order = order
+ a.orderAsc = asc
+ return a
+}
+
+func (a *DateHistogramAggregation) OrderByCount(asc bool) *DateHistogramAggregation {
+ // "order" : { "_count" : "asc" }
+ a.order = "_count"
+ a.orderAsc = asc
+ return a
+}
+
+func (a *DateHistogramAggregation) OrderByCountAsc() *DateHistogramAggregation {
+ return a.OrderByCount(true)
+}
+
+func (a *DateHistogramAggregation) OrderByCountDesc() *DateHistogramAggregation {
+ return a.OrderByCount(false)
+}
+
+func (a *DateHistogramAggregation) OrderByKey(asc bool) *DateHistogramAggregation {
+ // "order" : { "_key" : "asc" }
+ a.order = "_key"
+ a.orderAsc = asc
+ return a
+}
+
+func (a *DateHistogramAggregation) OrderByKeyAsc() *DateHistogramAggregation {
+ return a.OrderByKey(true)
+}
+
+func (a *DateHistogramAggregation) OrderByKeyDesc() *DateHistogramAggregation {
+ return a.OrderByKey(false)
+}
+
+// OrderByAggregation creates a bucket ordering strategy which sorts buckets
+// based on a single-valued calc get.
+func (a *DateHistogramAggregation) OrderByAggregation(aggName string, asc bool) *DateHistogramAggregation {
+ // {
+ // "aggs" : {
+ // "genders" : {
+ // "terms" : {
+ // "field" : "gender",
+ // "order" : { "avg_height" : "desc" }
+ // },
+ // "aggs" : {
+ // "avg_height" : { "avg" : { "field" : "height" } }
+ // }
+ // }
+ // }
+ // }
+ a.order = aggName
+ a.orderAsc = asc
+ return a
+}
+
+// OrderByAggregationAndMetric creates a bucket ordering strategy which
+// sorts buckets based on a multi-valued calc get.
+func (a *DateHistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *DateHistogramAggregation {
+ // {
+ // "aggs" : {
+ // "genders" : {
+ // "terms" : {
+ // "field" : "gender",
+ // "order" : { "height_stats.avg" : "desc" }
+ // },
+ // "aggs" : {
+ // "height_stats" : { "stats" : { "field" : "height" } }
+ // }
+ // }
+ // }
+ // }
+ a.order = aggName + "." + metric
+ a.orderAsc = asc
+ return a
+}
+
+// MinDocCount sets the minimum document count per bucket.
+// Buckets with less documents than this min value will not be returned.
+func (a *DateHistogramAggregation) MinDocCount(minDocCount int64) *DateHistogramAggregation {
+ a.minDocCount = &minDocCount
+ return a
+}
+
+// TimeZone sets the timezone in which to translate dates before computing buckets.
+func (a *DateHistogramAggregation) TimeZone(timeZone string) *DateHistogramAggregation {
+ a.timeZone = timeZone
+ return a
+}
+
+// Format sets the format to use for dates.
+func (a *DateHistogramAggregation) Format(format string) *DateHistogramAggregation {
+ a.format = format
+ return a
+}
+
+// Offset sets the offset of time intervals in the histogram, e.g. "+6h".
+func (a *DateHistogramAggregation) Offset(offset string) *DateHistogramAggregation {
+ a.offset = offset
+ return a
+}
+
+// ExtendedBounds accepts int, int64, string, or time.Time values.
+// In case the lower value in the histogram would be greater than min or the
+// upper value would be less than max, empty buckets will be generated.
+func (a *DateHistogramAggregation) ExtendedBounds(min, max interface{}) *DateHistogramAggregation {
+ a.extendedBoundsMin = min
+ a.extendedBoundsMax = max
+ return a
+}
+
+// ExtendedBoundsMin accepts int, int64, string, or time.Time values.
+func (a *DateHistogramAggregation) ExtendedBoundsMin(min interface{}) *DateHistogramAggregation {
+ a.extendedBoundsMin = min
+ return a
+}
+
+// ExtendedBoundsMax accepts int, int64, string, or time.Time values.
+func (a *DateHistogramAggregation) ExtendedBoundsMax(max interface{}) *DateHistogramAggregation {
+ a.extendedBoundsMax = max
+ return a
+}
+
+func (a *DateHistogramAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "articles_over_time" : {
+ // "date_histogram" : {
+ // "field" : "date",
+ // "interval" : "month"
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the { "date_histogram" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["date_histogram"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != nil {
+ src, err := a.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ opts["script"] = src
+ }
+ if a.missing != nil {
+ opts["missing"] = a.missing
+ }
+
+ opts["interval"] = a.interval
+ if a.minDocCount != nil {
+ opts["min_doc_count"] = *a.minDocCount
+ }
+ if a.order != "" {
+ o := make(map[string]interface{})
+ if a.orderAsc {
+ o[a.order] = "asc"
+ } else {
+ o[a.order] = "desc"
+ }
+ opts["order"] = o
+ }
+ if a.timeZone != "" {
+ opts["time_zone"] = a.timeZone
+ }
+ if a.offset != "" {
+ opts["offset"] = a.offset
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil {
+ bounds := make(map[string]interface{})
+ if a.extendedBoundsMin != nil {
+ bounds["min"] = a.extendedBoundsMin
+ }
+ if a.extendedBoundsMax != nil {
+ bounds["max"] = a.extendedBoundsMax
+ }
+ opts["extended_bounds"] = bounds
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_date_histogram_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_date_histogram_test.go
new file mode 100644
index 000000000..ddf790834
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_date_histogram_test.go
@@ -0,0 +1,49 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestDateHistogramAggregation(t *testing.T) {
+ agg := NewDateHistogramAggregation().
+ Field("date").
+ Interval("month").
+ Format("YYYY-MM").
+ TimeZone("UTC").
+ Offset("+6h")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"date_histogram":{"field":"date","format":"YYYY-MM","interval":"month","offset":"+6h","time_zone":"UTC"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestDateHistogramAggregationWithMissing(t *testing.T) {
+ agg := NewDateHistogramAggregation().Field("date").Interval("year").Missing("1900")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"date_histogram":{"field":"date","interval":"year","missing":"1900"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_date_range.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_date_range.go
new file mode 100644
index 000000000..5407dadb8
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_date_range.go
@@ -0,0 +1,246 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "time"
+)
+
+// DateRangeAggregation is a range aggregation that is dedicated for
+// date values. The main difference between this aggregation and the
+// normal range aggregation is that the from and to values can be expressed
+// in Date Math expressions, and it is also possible to specify a
+// date format by which the from and to response fields will be returned.
+// Note that this aggregration includes the from value and excludes the to
+// value for each range.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-daterange-aggregation.html
+type DateRangeAggregation struct {
+ field string
+ script *Script
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+ keyed *bool
+ unmapped *bool
+ format string
+ entries []DateRangeAggregationEntry
+}
+
+type DateRangeAggregationEntry struct {
+ Key string
+ From interface{}
+ To interface{}
+}
+
+func NewDateRangeAggregation() *DateRangeAggregation {
+ return &DateRangeAggregation{
+ subAggregations: make(map[string]Aggregation),
+ entries: make([]DateRangeAggregationEntry, 0),
+ }
+}
+
+func (a *DateRangeAggregation) Field(field string) *DateRangeAggregation {
+ a.field = field
+ return a
+}
+
+func (a *DateRangeAggregation) Script(script *Script) *DateRangeAggregation {
+ a.script = script
+ return a
+}
+
+func (a *DateRangeAggregation) SubAggregation(name string, subAggregation Aggregation) *DateRangeAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *DateRangeAggregation) Meta(metaData map[string]interface{}) *DateRangeAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *DateRangeAggregation) Keyed(keyed bool) *DateRangeAggregation {
+ a.keyed = &keyed
+ return a
+}
+
+func (a *DateRangeAggregation) Unmapped(unmapped bool) *DateRangeAggregation {
+ a.unmapped = &unmapped
+ return a
+}
+
+func (a *DateRangeAggregation) Format(format string) *DateRangeAggregation {
+ a.format = format
+ return a
+}
+
+func (a *DateRangeAggregation) AddRange(from, to interface{}) *DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to})
+ return a
+}
+
+func (a *DateRangeAggregation) AddRangeWithKey(key string, from, to interface{}) *DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to})
+ return a
+}
+
+func (a *DateRangeAggregation) AddUnboundedTo(from interface{}) *DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil})
+ return a
+}
+
+func (a *DateRangeAggregation) AddUnboundedToWithKey(key string, from interface{}) *DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil})
+ return a
+}
+
+func (a *DateRangeAggregation) AddUnboundedFrom(to interface{}) *DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to})
+ return a
+}
+
+func (a *DateRangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) *DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to})
+ return a
+}
+
+func (a *DateRangeAggregation) Lt(to interface{}) *DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to})
+ return a
+}
+
+func (a *DateRangeAggregation) LtWithKey(key string, to interface{}) *DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to})
+ return a
+}
+
+func (a *DateRangeAggregation) Between(from, to interface{}) *DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to})
+ return a
+}
+
+func (a *DateRangeAggregation) BetweenWithKey(key string, from, to interface{}) *DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to})
+ return a
+}
+
+func (a *DateRangeAggregation) Gt(from interface{}) *DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil})
+ return a
+}
+
+func (a *DateRangeAggregation) GtWithKey(key string, from interface{}) *DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil})
+ return a
+}
+
+func (a *DateRangeAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "range" : {
+ // "date_range": {
+ // "field": "date",
+ // "format": "MM-yyy",
+ // "ranges": [
+ // { "to": "now-10M/M" },
+ // { "from": "now-10M/M" }
+ // ]
+ // }
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the { "date_range" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["date_range"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != nil {
+ src, err := a.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ opts["script"] = src
+ }
+
+ if a.keyed != nil {
+ opts["keyed"] = *a.keyed
+ }
+ if a.unmapped != nil {
+ opts["unmapped"] = *a.unmapped
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+
+ var ranges []interface{}
+ for _, ent := range a.entries {
+ r := make(map[string]interface{})
+ if ent.Key != "" {
+ r["key"] = ent.Key
+ }
+ if ent.From != nil {
+ switch from := ent.From.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["from"] = from
+ case *int, *int16, *int32, *int64, *float32, *float64:
+ r["from"] = from
+ case time.Time:
+ r["from"] = from.Format(time.RFC3339)
+ case *time.Time:
+ r["from"] = from.Format(time.RFC3339)
+ case string:
+ r["from"] = from
+ case *string:
+ r["from"] = from
+ }
+ }
+ if ent.To != nil {
+ switch to := ent.To.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["to"] = to
+ case *int, *int16, *int32, *int64, *float32, *float64:
+ r["to"] = to
+ case time.Time:
+ r["to"] = to.Format(time.RFC3339)
+ case *time.Time:
+ r["to"] = to.Format(time.RFC3339)
+ case string:
+ r["to"] = to
+ case *string:
+ r["to"] = to
+ }
+ }
+ ranges = append(ranges, r)
+ }
+ opts["ranges"] = ranges
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_date_range_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_date_range_test.go
new file mode 100644
index 000000000..d1c909f3e
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_date_range_test.go
@@ -0,0 +1,155 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestDateRangeAggregation(t *testing.T) {
+ agg := NewDateRangeAggregation().Field("created_at")
+ agg = agg.AddRange(nil, "2012-12-31")
+ agg = agg.AddRange("2013-01-01", "2013-12-31")
+ agg = agg.AddRange("2014-01-01", nil)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestDateRangeAggregationWithPointers(t *testing.T) {
+ d1 := "2012-12-31"
+ d2 := "2013-01-01"
+ d3 := "2013-12-31"
+ d4 := "2014-01-01"
+
+ agg := NewDateRangeAggregation().Field("created_at")
+ agg = agg.AddRange(nil, &d1)
+ agg = agg.AddRange(d2, &d3)
+ agg = agg.AddRange(d4, nil)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestDateRangeAggregationWithUnbounded(t *testing.T) {
+ agg := NewDateRangeAggregation().Field("created_at").
+ AddUnboundedFrom("2012-12-31").
+ AddRange("2013-01-01", "2013-12-31").
+ AddUnboundedTo("2014-01-01")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestDateRangeAggregationWithLtAndCo(t *testing.T) {
+ agg := NewDateRangeAggregation().Field("created_at").
+ Lt("2012-12-31").
+ Between("2013-01-01", "2013-12-31").
+ Gt("2014-01-01")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"date_range":{"field":"created_at","ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestDateRangeAggregationWithKeyedFlag(t *testing.T) {
+ agg := NewDateRangeAggregation().Field("created_at").
+ Keyed(true).
+ Lt("2012-12-31").
+ Between("2013-01-01", "2013-12-31").
+ Gt("2014-01-01")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"date_range":{"field":"created_at","keyed":true,"ranges":[{"to":"2012-12-31"},{"from":"2013-01-01","to":"2013-12-31"},{"from":"2014-01-01"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestDateRangeAggregationWithKeys(t *testing.T) {
+ agg := NewDateRangeAggregation().Field("created_at").
+ Keyed(true).
+ LtWithKey("pre-2012", "2012-12-31").
+ BetweenWithKey("2013", "2013-01-01", "2013-12-31").
+ GtWithKey("post-2013", "2014-01-01")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"date_range":{"field":"created_at","keyed":true,"ranges":[{"key":"pre-2012","to":"2012-12-31"},{"from":"2013-01-01","key":"2013","to":"2013-12-31"},{"from":"2014-01-01","key":"post-2013"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestDateRangeAggregationWithSpecialNames(t *testing.T) {
+ agg := NewDateRangeAggregation().Field("created_at").
+ AddRange("now-10M/M", "now+10M/M")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"date_range":{"field":"created_at","ranges":[{"from":"now-10M/M","to":"now+10M/M"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_filter.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_filter.go
new file mode 100644
index 000000000..e4fbc67da
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_filter.go
@@ -0,0 +1,77 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// FilterAggregation defines a single bucket of all the documents
+// in the current document set context that match a specified filter.
+// Often this will be used to narrow down the current aggregation context
+// to a specific set of documents.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-filter-aggregation.html
+type FilterAggregation struct {
+ filter Query
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+}
+
+func NewFilterAggregation() *FilterAggregation {
+ return &FilterAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+func (a *FilterAggregation) SubAggregation(name string, subAggregation Aggregation) *FilterAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *FilterAggregation) Meta(metaData map[string]interface{}) *FilterAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *FilterAggregation) Filter(filter Query) *FilterAggregation {
+ a.filter = filter
+ return a
+}
+
+func (a *FilterAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "in_stock_products" : {
+ // "filter" : { "range" : { "stock" : { "gt" : 0 } } }
+ // }
+ // }
+ // }
+ // This method returns only the { "filter" : {} } part.
+
+ src, err := a.filter.Source()
+ if err != nil {
+ return nil, err
+ }
+ source := make(map[string]interface{})
+ source["filter"] = src
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_filter_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_filter_test.go
new file mode 100644
index 000000000..6aa4fbb7c
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_filter_test.go
@@ -0,0 +1,66 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestFilterAggregation(t *testing.T) {
+ filter := NewRangeQuery("stock").Gt(0)
+ agg := NewFilterAggregation().Filter(filter)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"filter":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFilterAggregationWithSubAggregation(t *testing.T) {
+ avgPriceAgg := NewAvgAggregation().Field("price")
+ filter := NewRangeQuery("stock").Gt(0)
+ agg := NewFilterAggregation().Filter(filter).
+ SubAggregation("avg_price", avgPriceAgg)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"aggregations":{"avg_price":{"avg":{"field":"price"}}},"filter":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFilterAggregationWithMeta(t *testing.T) {
+ filter := NewRangeQuery("stock").Gt(0)
+ agg := NewFilterAggregation().Filter(filter).Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"filter":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},"meta":{"name":"Oliver"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_filters.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_filters.go
new file mode 100644
index 000000000..0d128ca17
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_filters.go
@@ -0,0 +1,138 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "errors"
+
+// FiltersAggregation defines a multi bucket aggregations where each bucket
+// is associated with a filter. Each bucket will collect all documents that
+// match its associated filter.
+//
+// Notice that the caller has to decide whether to add filters by name
+// (using FilterWithName) or unnamed filters (using Filter or Filters). One cannot
+// use both named and unnamed filters.
+//
+// For details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-filters-aggregation.html
+type FiltersAggregation struct {
+ unnamedFilters []Query
+ namedFilters map[string]Query
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+}
+
+// NewFiltersAggregation initializes a new FiltersAggregation.
+func NewFiltersAggregation() *FiltersAggregation {
+ return &FiltersAggregation{
+ unnamedFilters: make([]Query, 0),
+ namedFilters: make(map[string]Query),
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+// Filter adds an unnamed filter. Notice that you can
+// either use named or unnamed filters, but not both.
+func (a *FiltersAggregation) Filter(filter Query) *FiltersAggregation {
+ a.unnamedFilters = append(a.unnamedFilters, filter)
+ return a
+}
+
+// Filters adds one or more unnamed filters. Notice that you can
+// either use named or unnamed filters, but not both.
+func (a *FiltersAggregation) Filters(filters ...Query) *FiltersAggregation {
+ if len(filters) > 0 {
+ a.unnamedFilters = append(a.unnamedFilters, filters...)
+ }
+ return a
+}
+
+// FilterWithName adds a filter with a specific name. Notice that you can
+// either use named or unnamed filters, but not both.
+func (a *FiltersAggregation) FilterWithName(name string, filter Query) *FiltersAggregation {
+ a.namedFilters[name] = filter
+ return a
+}
+
+// SubAggregation adds a sub-aggregation to this aggregation.
+func (a *FiltersAggregation) SubAggregation(name string, subAggregation Aggregation) *FiltersAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *FiltersAggregation) Meta(metaData map[string]interface{}) *FiltersAggregation {
+ a.meta = metaData
+ return a
+}
+
+// Source returns the a JSON-serializable interface.
+// If the aggregation is invalid, an error is returned. This may e.g. happen
+// if you mixed named and unnamed filters.
+func (a *FiltersAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "messages" : {
+ // "filters" : {
+ // "filters" : {
+ // "errors" : { "term" : { "body" : "error" }},
+ // "warnings" : { "term" : { "body" : "warning" }}
+ // }
+ // }
+ // }
+ // }
+ // }
+ // This method returns only the (outer) { "filters" : {} } part.
+
+ source := make(map[string]interface{})
+ filters := make(map[string]interface{})
+ source["filters"] = filters
+
+ if len(a.unnamedFilters) > 0 && len(a.namedFilters) > 0 {
+ return nil, errors.New("elastic: use either named or unnamed filters with FiltersAggregation but not both")
+ }
+
+ if len(a.unnamedFilters) > 0 {
+ arr := make([]interface{}, len(a.unnamedFilters))
+ for i, filter := range a.unnamedFilters {
+ src, err := filter.Source()
+ if err != nil {
+ return nil, err
+ }
+ arr[i] = src
+ }
+ filters["filters"] = arr
+ } else {
+ dict := make(map[string]interface{})
+ for key, filter := range a.namedFilters {
+ src, err := filter.Source()
+ if err != nil {
+ return nil, err
+ }
+ dict[key] = src
+ }
+ filters["filters"] = dict
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_filters_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_filters_test.go
new file mode 100644
index 000000000..95cc8d7c3
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_filters_test.go
@@ -0,0 +1,99 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestFiltersAggregationFilters(t *testing.T) {
+ f1 := NewRangeQuery("stock").Gt(0)
+ f2 := NewTermQuery("symbol", "GOOG")
+ agg := NewFiltersAggregation().Filters(f1, f2)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"filters":{"filters":[{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},{"term":{"symbol":"GOOG"}}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFiltersAggregationFilterWithName(t *testing.T) {
+ f1 := NewRangeQuery("stock").Gt(0)
+ f2 := NewTermQuery("symbol", "GOOG")
+ agg := NewFiltersAggregation().
+ FilterWithName("f1", f1).
+ FilterWithName("f2", f2)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"filters":{"filters":{"f1":{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},"f2":{"term":{"symbol":"GOOG"}}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFiltersAggregationWithKeyedAndNonKeyedFilters(t *testing.T) {
+ agg := NewFiltersAggregation().
+ Filter(NewTermQuery("symbol", "MSFT")). // unnamed
+ FilterWithName("one", NewTermQuery("symbol", "GOOG")) // named filter
+ _, err := agg.Source()
+ if err == nil {
+ t.Fatal("expected error, got nil")
+ }
+}
+
+func TestFiltersAggregationWithSubAggregation(t *testing.T) {
+ avgPriceAgg := NewAvgAggregation().Field("price")
+ f1 := NewRangeQuery("stock").Gt(0)
+ f2 := NewTermQuery("symbol", "GOOG")
+ agg := NewFiltersAggregation().Filters(f1, f2).SubAggregation("avg_price", avgPriceAgg)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"aggregations":{"avg_price":{"avg":{"field":"price"}}},"filters":{"filters":[{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},{"term":{"symbol":"GOOG"}}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFiltersAggregationWithMetaData(t *testing.T) {
+ f1 := NewRangeQuery("stock").Gt(0)
+ f2 := NewTermQuery("symbol", "GOOG")
+ agg := NewFiltersAggregation().Filters(f1, f2).Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"filters":{"filters":[{"range":{"stock":{"from":0,"include_lower":false,"include_upper":true,"to":null}}},{"term":{"symbol":"GOOG"}}]},"meta":{"name":"Oliver"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_geo_distance.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_geo_distance.go
new file mode 100644
index 000000000..c082fb3f2
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_geo_distance.go
@@ -0,0 +1,198 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// GeoDistanceAggregation is a multi-bucket aggregation that works on geo_point fields
+// and conceptually works very similar to the range aggregation.
+// The user can define a point of origin and a set of distance range buckets.
+// The aggregation evaluate the distance of each document value from
+// the origin point and determines the buckets it belongs to based on
+// the ranges (a document belongs to a bucket if the distance between the
+// document and the origin falls within the distance range of the bucket).
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-geodistance-aggregation.html
+type GeoDistanceAggregation struct {
+ field string
+ unit string
+ distanceType string
+ point string
+ ranges []geoDistAggRange
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+}
+
+type geoDistAggRange struct {
+ Key string
+ From interface{}
+ To interface{}
+}
+
+func NewGeoDistanceAggregation() *GeoDistanceAggregation {
+ return &GeoDistanceAggregation{
+ subAggregations: make(map[string]Aggregation),
+ ranges: make([]geoDistAggRange, 0),
+ }
+}
+
+func (a *GeoDistanceAggregation) Field(field string) *GeoDistanceAggregation {
+ a.field = field
+ return a
+}
+
+func (a *GeoDistanceAggregation) Unit(unit string) *GeoDistanceAggregation {
+ a.unit = unit
+ return a
+}
+
+func (a *GeoDistanceAggregation) DistanceType(distanceType string) *GeoDistanceAggregation {
+ a.distanceType = distanceType
+ return a
+}
+
+func (a *GeoDistanceAggregation) Point(latLon string) *GeoDistanceAggregation {
+ a.point = latLon
+ return a
+}
+
+func (a *GeoDistanceAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoDistanceAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *GeoDistanceAggregation) Meta(metaData map[string]interface{}) *GeoDistanceAggregation {
+ a.meta = metaData
+ return a
+}
+func (a *GeoDistanceAggregation) AddRange(from, to interface{}) *GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to})
+ return a
+}
+
+func (a *GeoDistanceAggregation) AddRangeWithKey(key string, from, to interface{}) *GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to})
+ return a
+}
+
+func (a *GeoDistanceAggregation) AddUnboundedTo(from float64) *GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{From: from, To: nil})
+ return a
+}
+
+func (a *GeoDistanceAggregation) AddUnboundedToWithKey(key string, from float64) *GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: nil})
+ return a
+}
+
+func (a *GeoDistanceAggregation) AddUnboundedFrom(to float64) *GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{From: nil, To: to})
+ return a
+}
+
+func (a *GeoDistanceAggregation) AddUnboundedFromWithKey(key string, to float64) *GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: nil, To: to})
+ return a
+}
+
+func (a *GeoDistanceAggregation) Between(from, to interface{}) *GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to})
+ return a
+}
+
+func (a *GeoDistanceAggregation) BetweenWithKey(key string, from, to interface{}) *GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to})
+ return a
+}
+
+func (a *GeoDistanceAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "rings_around_amsterdam" : {
+ // "geo_distance" : {
+ // "field" : "location",
+ // "origin" : "52.3760, 4.894",
+ // "ranges" : [
+ // { "to" : 100 },
+ // { "from" : 100, "to" : 300 },
+ // { "from" : 300 }
+ // ]
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the { "range" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["geo_distance"] = opts
+
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.unit != "" {
+ opts["unit"] = a.unit
+ }
+ if a.distanceType != "" {
+ opts["distance_type"] = a.distanceType
+ }
+ if a.point != "" {
+ opts["origin"] = a.point
+ }
+
+ var ranges []interface{}
+ for _, ent := range a.ranges {
+ r := make(map[string]interface{})
+ if ent.Key != "" {
+ r["key"] = ent.Key
+ }
+ if ent.From != nil {
+ switch from := ent.From.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["from"] = from
+ case *int, *int16, *int32, *int64, *float32, *float64:
+ r["from"] = from
+ case string:
+ r["from"] = from
+ case *string:
+ r["from"] = from
+ }
+ }
+ if ent.To != nil {
+ switch to := ent.To.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["to"] = to
+ case *int, *int16, *int32, *int64, *float32, *float64:
+ r["to"] = to
+ case string:
+ r["to"] = to
+ case *string:
+ r["to"] = to
+ }
+ }
+ ranges = append(ranges, r)
+ }
+ opts["ranges"] = ranges
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_geo_distance_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_geo_distance_test.go
new file mode 100644
index 000000000..3918b9dd2
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_geo_distance_test.go
@@ -0,0 +1,93 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestGeoDistanceAggregation(t *testing.T) {
+ agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894")
+ agg = agg.AddRange(nil, 100)
+ agg = agg.AddRange(100, 300)
+ agg = agg.AddRange(300, nil)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestGeoDistanceAggregationWithPointers(t *testing.T) {
+ hundred := 100
+ threeHundred := 300
+ agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894")
+ agg = agg.AddRange(nil, &hundred)
+ agg = agg.AddRange(hundred, &threeHundred)
+ agg = agg.AddRange(threeHundred, nil)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestGeoDistanceAggregationWithUnbounded(t *testing.T) {
+ agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894")
+ agg = agg.AddUnboundedFrom(100)
+ agg = agg.AddRange(100, 300)
+ agg = agg.AddUnboundedTo(300)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestGeoDistanceAggregationWithMetaData(t *testing.T) {
+ agg := NewGeoDistanceAggregation().Field("location").Point("52.3760, 4.894")
+ agg = agg.AddRange(nil, 100)
+ agg = agg.AddRange(100, 300)
+ agg = agg.AddRange(300, nil)
+ agg = agg.Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_distance":{"field":"location","origin":"52.3760, 4.894","ranges":[{"to":100},{"from":100,"to":300},{"from":300}]},"meta":{"name":"Oliver"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_geohash_grid.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_geohash_grid.go
new file mode 100644
index 000000000..07f61b331
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_geohash_grid.go
@@ -0,0 +1,102 @@
+package elastic
+
+type GeoHashGridAggregation struct {
+ field string
+ precision int
+ size int
+ shardSize int
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+}
+
+func NewGeoHashGridAggregation() *GeoHashGridAggregation {
+ return &GeoHashGridAggregation{
+ subAggregations: make(map[string]Aggregation),
+ precision: -1,
+ size: -1,
+ shardSize: -1,
+ }
+}
+
+func (a *GeoHashGridAggregation) Field(field string) *GeoHashGridAggregation {
+ a.field = field
+ return a
+}
+
+func (a *GeoHashGridAggregation) Precision(precision int) *GeoHashGridAggregation {
+ a.precision = precision
+ return a
+}
+
+func (a *GeoHashGridAggregation) Size(size int) *GeoHashGridAggregation {
+ a.size = size
+ return a
+}
+
+func (a *GeoHashGridAggregation) ShardSize(shardSize int) *GeoHashGridAggregation {
+ a.shardSize = shardSize
+ return a
+}
+
+func (a *GeoHashGridAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoHashGridAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a *GeoHashGridAggregation) Meta(metaData map[string]interface{}) *GeoHashGridAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *GeoHashGridAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs": {
+ // "new_york": {
+ // "geohash_grid": {
+ // "field": "location",
+ // "precision": 5
+ // }
+ // }
+ // }
+ // }
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["geohash_grid"] = opts
+
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+
+ if a.precision != -1 {
+ opts["precision"] = a.precision
+ }
+
+ if a.size != -1 {
+ opts["size"] = a.size
+ }
+
+ if a.shardSize != -1 {
+ opts["shard_size"] = a.shardSize
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_geohash_grid_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_geohash_grid_test.go
new file mode 100644
index 000000000..044e211eb
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_geohash_grid_test.go
@@ -0,0 +1,84 @@
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestGeoHashGridAggregation(t *testing.T) {
+ agg := NewGeoHashGridAggregation().Field("location").Precision(5)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("Marshalling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geohash_grid":{"field":"location","precision":5}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestGeoHashGridAggregationWithMetaData(t *testing.T) {
+ agg := NewGeoHashGridAggregation().Field("location").Precision(5)
+ agg = agg.Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("Marshalling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geohash_grid":{"field":"location","precision":5},"meta":{"name":"Oliver"}}`
+
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestGeoHashGridAggregationWithSize(t *testing.T) {
+ agg := NewGeoHashGridAggregation().Field("location").Precision(5).Size(5)
+ agg = agg.Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("Marshalling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geohash_grid":{"field":"location","precision":5,"size":5},"meta":{"name":"Oliver"}}`
+
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestGeoHashGridAggregationWithShardSize(t *testing.T) {
+ agg := NewGeoHashGridAggregation().Field("location").Precision(5).ShardSize(5)
+ agg = agg.Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("Marshalling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geohash_grid":{"field":"location","precision":5,"shard_size":5},"meta":{"name":"Oliver"}}`
+
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_global.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_global.go
new file mode 100644
index 000000000..4bf2a63f8
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_global.go
@@ -0,0 +1,71 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// GlobalAggregation defines a single bucket of all the documents within
+// the search execution context. This context is defined by the indices
+// and the document types you’re searching on, but is not influenced
+// by the search query itself.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-global-aggregation.html
+type GlobalAggregation struct {
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+}
+
+func NewGlobalAggregation() *GlobalAggregation {
+ return &GlobalAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+func (a *GlobalAggregation) SubAggregation(name string, subAggregation Aggregation) *GlobalAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *GlobalAggregation) Meta(metaData map[string]interface{}) *GlobalAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *GlobalAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "all_products" : {
+ // "global" : {},
+ // "aggs" : {
+ // "avg_price" : { "avg" : { "field" : "price" } }
+ // }
+ // }
+ // }
+ // }
+ // This method returns only the { "global" : {} } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["global"] = opts
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_global_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_global_test.go
new file mode 100644
index 000000000..5f1e5e6cb
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_global_test.go
@@ -0,0 +1,44 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestGlobalAggregation(t *testing.T) {
+ agg := NewGlobalAggregation()
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"global":{}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestGlobalAggregationWithMetaData(t *testing.T) {
+ agg := NewGlobalAggregation().Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"global":{},"meta":{"name":"Oliver"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_histogram.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_histogram.go
new file mode 100644
index 000000000..8b698cff5
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_histogram.go
@@ -0,0 +1,265 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// HistogramAggregation is a multi-bucket values source based aggregation
+// that can be applied on numeric values extracted from the documents.
+// It dynamically builds fixed size (a.k.a. interval) buckets over the
+// values.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-histogram-aggregation.html
+type HistogramAggregation struct {
+ field string
+ script *Script
+ missing interface{}
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+
+ interval float64
+ order string
+ orderAsc bool
+ minDocCount *int64
+ minBounds *float64
+ maxBounds *float64
+ offset *float64
+}
+
+func NewHistogramAggregation() *HistogramAggregation {
+ return &HistogramAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+func (a *HistogramAggregation) Field(field string) *HistogramAggregation {
+ a.field = field
+ return a
+}
+
+func (a *HistogramAggregation) Script(script *Script) *HistogramAggregation {
+ a.script = script
+ return a
+}
+
+// Missing configures the value to use when documents miss a value.
+func (a *HistogramAggregation) Missing(missing interface{}) *HistogramAggregation {
+ a.missing = missing
+ return a
+}
+
+func (a *HistogramAggregation) SubAggregation(name string, subAggregation Aggregation) *HistogramAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *HistogramAggregation) Meta(metaData map[string]interface{}) *HistogramAggregation {
+ a.meta = metaData
+ return a
+}
+
+// Interval for this builder, must be greater than 0.
+func (a *HistogramAggregation) Interval(interval float64) *HistogramAggregation {
+ a.interval = interval
+ return a
+}
+
+// Order specifies the sort order. Valid values for order are:
+// "_key", "_count", a sub-aggregation name, or a sub-aggregation name
+// with a metric.
+func (a *HistogramAggregation) Order(order string, asc bool) *HistogramAggregation {
+ a.order = order
+ a.orderAsc = asc
+ return a
+}
+
+func (a *HistogramAggregation) OrderByCount(asc bool) *HistogramAggregation {
+ // "order" : { "_count" : "asc" }
+ a.order = "_count"
+ a.orderAsc = asc
+ return a
+}
+
+func (a *HistogramAggregation) OrderByCountAsc() *HistogramAggregation {
+ return a.OrderByCount(true)
+}
+
+func (a *HistogramAggregation) OrderByCountDesc() *HistogramAggregation {
+ return a.OrderByCount(false)
+}
+
+func (a *HistogramAggregation) OrderByKey(asc bool) *HistogramAggregation {
+ // "order" : { "_key" : "asc" }
+ a.order = "_key"
+ a.orderAsc = asc
+ return a
+}
+
+func (a *HistogramAggregation) OrderByKeyAsc() *HistogramAggregation {
+ return a.OrderByKey(true)
+}
+
+func (a *HistogramAggregation) OrderByKeyDesc() *HistogramAggregation {
+ return a.OrderByKey(false)
+}
+
+// OrderByAggregation creates a bucket ordering strategy which sorts buckets
+// based on a single-valued calc get.
+func (a *HistogramAggregation) OrderByAggregation(aggName string, asc bool) *HistogramAggregation {
+ // {
+ // "aggs" : {
+ // "genders" : {
+ // "terms" : {
+ // "field" : "gender",
+ // "order" : { "avg_height" : "desc" }
+ // },
+ // "aggs" : {
+ // "avg_height" : { "avg" : { "field" : "height" } }
+ // }
+ // }
+ // }
+ // }
+ a.order = aggName
+ a.orderAsc = asc
+ return a
+}
+
+// OrderByAggregationAndMetric creates a bucket ordering strategy which
+// sorts buckets based on a multi-valued calc get.
+func (a *HistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *HistogramAggregation {
+ // {
+ // "aggs" : {
+ // "genders" : {
+ // "terms" : {
+ // "field" : "gender",
+ // "order" : { "height_stats.avg" : "desc" }
+ // },
+ // "aggs" : {
+ // "height_stats" : { "stats" : { "field" : "height" } }
+ // }
+ // }
+ // }
+ // }
+ a.order = aggName + "." + metric
+ a.orderAsc = asc
+ return a
+}
+
+func (a *HistogramAggregation) MinDocCount(minDocCount int64) *HistogramAggregation {
+ a.minDocCount = &minDocCount
+ return a
+}
+
+func (a *HistogramAggregation) ExtendedBounds(min, max float64) *HistogramAggregation {
+ a.minBounds = &min
+ a.maxBounds = &max
+ return a
+}
+
+func (a *HistogramAggregation) ExtendedBoundsMin(min float64) *HistogramAggregation {
+ a.minBounds = &min
+ return a
+}
+
+func (a *HistogramAggregation) MinBounds(min float64) *HistogramAggregation {
+ a.minBounds = &min
+ return a
+}
+
+func (a *HistogramAggregation) ExtendedBoundsMax(max float64) *HistogramAggregation {
+ a.maxBounds = &max
+ return a
+}
+
+func (a *HistogramAggregation) MaxBounds(max float64) *HistogramAggregation {
+ a.maxBounds = &max
+ return a
+}
+
+// Offset into the histogram
+func (a *HistogramAggregation) Offset(offset float64) *HistogramAggregation {
+ a.offset = &offset
+ return a
+}
+
+func (a *HistogramAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "prices" : {
+ // "histogram" : {
+ // "field" : "price",
+ // "interval" : 50
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the { "histogram" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["histogram"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != nil {
+ src, err := a.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ opts["script"] = src
+ }
+ if a.missing != nil {
+ opts["missing"] = a.missing
+ }
+
+ opts["interval"] = a.interval
+ if a.order != "" {
+ o := make(map[string]interface{})
+ if a.orderAsc {
+ o[a.order] = "asc"
+ } else {
+ o[a.order] = "desc"
+ }
+ opts["order"] = o
+ }
+ if a.offset != nil {
+ opts["offset"] = *a.offset
+ }
+ if a.minDocCount != nil {
+ opts["min_doc_count"] = *a.minDocCount
+ }
+ if a.minBounds != nil || a.maxBounds != nil {
+ bounds := make(map[string]interface{})
+ if a.minBounds != nil {
+ bounds["min"] = a.minBounds
+ }
+ if a.maxBounds != nil {
+ bounds["max"] = a.maxBounds
+ }
+ opts["extended_bounds"] = bounds
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_histogram_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_histogram_test.go
new file mode 100644
index 000000000..aeb7eec54
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_histogram_test.go
@@ -0,0 +1,61 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestHistogramAggregation(t *testing.T) {
+ agg := NewHistogramAggregation().Field("price").Interval(50)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"histogram":{"field":"price","interval":50}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHistogramAggregationWithMetaData(t *testing.T) {
+ agg := NewHistogramAggregation().Field("price").Offset(10).Interval(50).Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"histogram":{"field":"price","interval":50,"offset":10},"meta":{"name":"Oliver"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestHistogramAggregationWithMissing(t *testing.T) {
+ agg := NewHistogramAggregation().Field("price").Interval(50).Missing("n/a")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"histogram":{"field":"price","interval":50,"missing":"n/a"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_ip_range.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_ip_range.go
new file mode 100644
index 000000000..3615e29c3
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_ip_range.go
@@ -0,0 +1,195 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// IPRangeAggregation is a range aggregation that is dedicated for
+// IP addresses.
+//
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-iprange-aggregation.html
+type IPRangeAggregation struct {
+ field string
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+ keyed *bool
+ entries []IPRangeAggregationEntry
+}
+
+type IPRangeAggregationEntry struct {
+ Key string
+ Mask string
+ From string
+ To string
+}
+
+func NewIPRangeAggregation() *IPRangeAggregation {
+ return &IPRangeAggregation{
+ subAggregations: make(map[string]Aggregation),
+ entries: make([]IPRangeAggregationEntry, 0),
+ }
+}
+
+func (a *IPRangeAggregation) Field(field string) *IPRangeAggregation {
+ a.field = field
+ return a
+}
+
+func (a *IPRangeAggregation) SubAggregation(name string, subAggregation Aggregation) *IPRangeAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *IPRangeAggregation) Meta(metaData map[string]interface{}) *IPRangeAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *IPRangeAggregation) Keyed(keyed bool) *IPRangeAggregation {
+ a.keyed = &keyed
+ return a
+}
+
+func (a *IPRangeAggregation) AddMaskRange(mask string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{Mask: mask})
+ return a
+}
+
+func (a *IPRangeAggregation) AddMaskRangeWithKey(key, mask string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, Mask: mask})
+ return a
+}
+
+func (a *IPRangeAggregation) AddRange(from, to string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{From: from, To: to})
+ return a
+}
+
+func (a *IPRangeAggregation) AddRangeWithKey(key, from, to string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: from, To: to})
+ return a
+}
+
+func (a *IPRangeAggregation) AddUnboundedTo(from string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{From: from, To: ""})
+ return a
+}
+
+func (a *IPRangeAggregation) AddUnboundedToWithKey(key, from string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: from, To: ""})
+ return a
+}
+
+func (a *IPRangeAggregation) AddUnboundedFrom(to string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{From: "", To: to})
+ return a
+}
+
+func (a *IPRangeAggregation) AddUnboundedFromWithKey(key, to string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: "", To: to})
+ return a
+}
+
+func (a *IPRangeAggregation) Lt(to string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{From: "", To: to})
+ return a
+}
+
+func (a *IPRangeAggregation) LtWithKey(key, to string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: "", To: to})
+ return a
+}
+
+func (a *IPRangeAggregation) Between(from, to string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{From: from, To: to})
+ return a
+}
+
+func (a *IPRangeAggregation) BetweenWithKey(key, from, to string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: from, To: to})
+ return a
+}
+
+func (a *IPRangeAggregation) Gt(from string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{From: from, To: ""})
+ return a
+}
+
+func (a *IPRangeAggregation) GtWithKey(key, from string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: from, To: ""})
+ return a
+}
+
+func (a *IPRangeAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "range" : {
+ // "ip_range": {
+ // "field": "ip",
+ // "ranges": [
+ // { "to": "10.0.0.5" },
+ // { "from": "10.0.0.5" }
+ // ]
+ // }
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the { "ip_range" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["ip_range"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+
+ if a.keyed != nil {
+ opts["keyed"] = *a.keyed
+ }
+
+ var ranges []interface{}
+ for _, ent := range a.entries {
+ r := make(map[string]interface{})
+ if ent.Key != "" {
+ r["key"] = ent.Key
+ }
+ if ent.Mask != "" {
+ r["mask"] = ent.Mask
+ } else {
+ if ent.From != "" {
+ r["from"] = ent.From
+ }
+ if ent.To != "" {
+ r["to"] = ent.To
+ }
+ }
+ ranges = append(ranges, r)
+ }
+ opts["ranges"] = ranges
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_ip_range_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_ip_range_test.go
new file mode 100644
index 000000000..7a2b49f4c
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_ip_range_test.go
@@ -0,0 +1,90 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestIPRangeAggregation(t *testing.T) {
+ agg := NewIPRangeAggregation().Field("remote_ip")
+ agg = agg.AddRange("", "10.0.0.0")
+ agg = agg.AddRange("10.1.0.0", "10.1.255.255")
+ agg = agg.AddRange("10.2.0.0", "")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"ip_range":{"field":"remote_ip","ranges":[{"to":"10.0.0.0"},{"from":"10.1.0.0","to":"10.1.255.255"},{"from":"10.2.0.0"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestIPRangeAggregationMask(t *testing.T) {
+ agg := NewIPRangeAggregation().Field("remote_ip")
+ agg = agg.AddMaskRange("10.0.0.0/25")
+ agg = agg.AddMaskRange("10.0.0.127/25")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"ip_range":{"field":"remote_ip","ranges":[{"mask":"10.0.0.0/25"},{"mask":"10.0.0.127/25"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestIPRangeAggregationWithKeyedFlag(t *testing.T) {
+ agg := NewIPRangeAggregation().Field("remote_ip")
+ agg = agg.Keyed(true)
+ agg = agg.AddRange("", "10.0.0.0")
+ agg = agg.AddRange("10.1.0.0", "10.1.255.255")
+ agg = agg.AddRange("10.2.0.0", "")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"ip_range":{"field":"remote_ip","keyed":true,"ranges":[{"to":"10.0.0.0"},{"from":"10.1.0.0","to":"10.1.255.255"},{"from":"10.2.0.0"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestIPRangeAggregationWithKeys(t *testing.T) {
+ agg := NewIPRangeAggregation().Field("remote_ip")
+ agg = agg.Keyed(true)
+ agg = agg.LtWithKey("infinity", "10.0.0.5")
+ agg = agg.GtWithKey("and-beyond", "10.0.0.5")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"ip_range":{"field":"remote_ip","keyed":true,"ranges":[{"key":"infinity","to":"10.0.0.5"},{"from":"10.0.0.5","key":"and-beyond"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_missing.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_missing.go
new file mode 100644
index 000000000..7ba3cb636
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_missing.go
@@ -0,0 +1,81 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MissingAggregation is a field data based single bucket aggregation,
+// that creates a bucket of all documents in the current document set context
+// that are missing a field value (effectively, missing a field or having
+// the configured NULL value set). This aggregator will often be used in
+// conjunction with other field data bucket aggregators (such as ranges)
+// to return information for all the documents that could not be placed
+// in any of the other buckets due to missing field data values.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-missing-aggregation.html
+type MissingAggregation struct {
+ field string
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+}
+
+func NewMissingAggregation() *MissingAggregation {
+ return &MissingAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+func (a *MissingAggregation) Field(field string) *MissingAggregation {
+ a.field = field
+ return a
+}
+
+func (a *MissingAggregation) SubAggregation(name string, subAggregation Aggregation) *MissingAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *MissingAggregation) Meta(metaData map[string]interface{}) *MissingAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *MissingAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "products_without_a_price" : {
+ // "missing" : { "field" : "price" }
+ // }
+ // }
+ // }
+ // This method returns only the { "missing" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["missing"] = opts
+
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_missing_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_missing_test.go
new file mode 100644
index 000000000..179c3084f
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_missing_test.go
@@ -0,0 +1,44 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestMissingAggregation(t *testing.T) {
+ agg := NewMissingAggregation().Field("price")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"missing":{"field":"price"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMissingAggregationWithMetaData(t *testing.T) {
+ agg := NewMissingAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"meta":{"name":"Oliver"},"missing":{"field":"price"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_nested.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_nested.go
new file mode 100644
index 000000000..926d493a1
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_nested.go
@@ -0,0 +1,82 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// NestedAggregation is a special single bucket aggregation that enables
+// aggregating nested documents.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-nested-aggregation.html
+type NestedAggregation struct {
+ path string
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+}
+
+func NewNestedAggregation() *NestedAggregation {
+ return &NestedAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+func (a *NestedAggregation) SubAggregation(name string, subAggregation Aggregation) *NestedAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *NestedAggregation) Meta(metaData map[string]interface{}) *NestedAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *NestedAggregation) Path(path string) *NestedAggregation {
+ a.path = path
+ return a
+}
+
+func (a *NestedAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "query" : {
+ // "match" : { "name" : "led tv" }
+ // }
+ // "aggs" : {
+ // "resellers" : {
+ // "nested" : {
+ // "path" : "resellers"
+ // },
+ // "aggs" : {
+ // "min_price" : { "min" : { "field" : "resellers.price" } }
+ // }
+ // }
+ // }
+ // }
+ // This method returns only the { "nested" : {} } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["nested"] = opts
+
+ opts["path"] = a.path
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_nested_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_nested_test.go
new file mode 100644
index 000000000..219943e3d
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_nested_test.go
@@ -0,0 +1,62 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestNestedAggregation(t *testing.T) {
+ agg := NewNestedAggregation().Path("resellers")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"nested":{"path":"resellers"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestNestedAggregationWithSubAggregation(t *testing.T) {
+ minPriceAgg := NewMinAggregation().Field("resellers.price")
+ agg := NewNestedAggregation().Path("resellers").SubAggregation("min_price", minPriceAgg)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"aggregations":{"min_price":{"min":{"field":"resellers.price"}}},"nested":{"path":"resellers"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestNestedAggregationWithMetaData(t *testing.T) {
+ agg := NewNestedAggregation().Path("resellers").Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"meta":{"name":"Oliver"},"nested":{"path":"resellers"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_range.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_range.go
new file mode 100644
index 000000000..28c3df78e
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_range.go
@@ -0,0 +1,244 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "time"
+)
+
+// RangeAggregation is a multi-bucket value source based aggregation that
+// enables the user to define a set of ranges - each representing a bucket.
+// During the aggregation process, the values extracted from each document
+// will be checked against each bucket range and "bucket" the
+// relevant/matching document. Note that this aggregration includes the
+// from value and excludes the to value for each range.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-range-aggregation.html
+type RangeAggregation struct {
+ field string
+ script *Script
+ missing interface{}
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+ keyed *bool
+ unmapped *bool
+ entries []rangeAggregationEntry
+}
+
+type rangeAggregationEntry struct {
+ Key string
+ From interface{}
+ To interface{}
+}
+
+func NewRangeAggregation() *RangeAggregation {
+ return &RangeAggregation{
+ subAggregations: make(map[string]Aggregation),
+ entries: make([]rangeAggregationEntry, 0),
+ }
+}
+
+func (a *RangeAggregation) Field(field string) *RangeAggregation {
+ a.field = field
+ return a
+}
+
+func (a *RangeAggregation) Script(script *Script) *RangeAggregation {
+ a.script = script
+ return a
+}
+
+// Missing configures the value to use when documents miss a value.
+func (a *RangeAggregation) Missing(missing interface{}) *RangeAggregation {
+ a.missing = missing
+ return a
+}
+
+func (a *RangeAggregation) SubAggregation(name string, subAggregation Aggregation) *RangeAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *RangeAggregation) Meta(metaData map[string]interface{}) *RangeAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *RangeAggregation) Keyed(keyed bool) *RangeAggregation {
+ a.keyed = &keyed
+ return a
+}
+
+func (a *RangeAggregation) Unmapped(unmapped bool) *RangeAggregation {
+ a.unmapped = &unmapped
+ return a
+}
+
+func (a *RangeAggregation) AddRange(from, to interface{}) *RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to})
+ return a
+}
+
+func (a *RangeAggregation) AddRangeWithKey(key string, from, to interface{}) *RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to})
+ return a
+}
+
+func (a *RangeAggregation) AddUnboundedTo(from interface{}) *RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil})
+ return a
+}
+
+func (a *RangeAggregation) AddUnboundedToWithKey(key string, from interface{}) *RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil})
+ return a
+}
+
+func (a *RangeAggregation) AddUnboundedFrom(to interface{}) *RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to})
+ return a
+}
+
+func (a *RangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) *RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to})
+ return a
+}
+
+func (a *RangeAggregation) Lt(to interface{}) *RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to})
+ return a
+}
+
+func (a *RangeAggregation) LtWithKey(key string, to interface{}) *RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to})
+ return a
+}
+
+func (a *RangeAggregation) Between(from, to interface{}) *RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to})
+ return a
+}
+
+func (a *RangeAggregation) BetweenWithKey(key string, from, to interface{}) *RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to})
+ return a
+}
+
+func (a *RangeAggregation) Gt(from interface{}) *RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil})
+ return a
+}
+
+func (a *RangeAggregation) GtWithKey(key string, from interface{}) *RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil})
+ return a
+}
+
+func (a *RangeAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "price_ranges" : {
+ // "range" : {
+ // "field" : "price",
+ // "ranges" : [
+ // { "to" : 50 },
+ // { "from" : 50, "to" : 100 },
+ // { "from" : 100 }
+ // ]
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the { "range" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["range"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != nil {
+ src, err := a.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ opts["script"] = src
+ }
+ if a.missing != nil {
+ opts["missing"] = a.missing
+ }
+
+ if a.keyed != nil {
+ opts["keyed"] = *a.keyed
+ }
+ if a.unmapped != nil {
+ opts["unmapped"] = *a.unmapped
+ }
+
+ var ranges []interface{}
+ for _, ent := range a.entries {
+ r := make(map[string]interface{})
+ if ent.Key != "" {
+ r["key"] = ent.Key
+ }
+ if ent.From != nil {
+ switch from := ent.From.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["from"] = from
+ case *int, *int16, *int32, *int64, *float32, *float64:
+ r["from"] = from
+ case time.Time:
+ r["from"] = from.Format(time.RFC3339)
+ case *time.Time:
+ r["from"] = from.Format(time.RFC3339)
+ case string:
+ r["from"] = from
+ case *string:
+ r["from"] = from
+ }
+ }
+ if ent.To != nil {
+ switch to := ent.To.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["to"] = to
+ case *int, *int16, *int32, *int64, *float32, *float64:
+ r["to"] = to
+ case time.Time:
+ r["to"] = to.Format(time.RFC3339)
+ case *time.Time:
+ r["to"] = to.Format(time.RFC3339)
+ case string:
+ r["to"] = to
+ case *string:
+ r["to"] = to
+ }
+ }
+ ranges = append(ranges, r)
+ }
+ opts["ranges"] = ranges
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_range_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_range_test.go
new file mode 100644
index 000000000..17fbcecf3
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_range_test.go
@@ -0,0 +1,178 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestRangeAggregation(t *testing.T) {
+ agg := NewRangeAggregation().Field("price")
+ agg = agg.AddRange(nil, 50)
+ agg = agg.AddRange(50, 100)
+ agg = agg.AddRange(100, nil)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"field":"price","ranges":[{"to":50},{"from":50,"to":100},{"from":100}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestRangeAggregationWithPointers(t *testing.T) {
+ fifty := 50
+ hundred := 100
+ agg := NewRangeAggregation().Field("price")
+ agg = agg.AddRange(nil, &fifty)
+ agg = agg.AddRange(fifty, &hundred)
+ agg = agg.AddRange(hundred, nil)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"field":"price","ranges":[{"to":50},{"from":50,"to":100},{"from":100}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestRangeAggregationWithUnbounded(t *testing.T) {
+ agg := NewRangeAggregation().Field("field_name").
+ AddUnboundedFrom(50).
+ AddRange(20, 70).
+ AddRange(70, 120).
+ AddUnboundedTo(150)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"field":"field_name","ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestRangeAggregationWithLtAndCo(t *testing.T) {
+ agg := NewRangeAggregation().Field("field_name").
+ Lt(50).
+ Between(20, 70).
+ Between(70, 120).
+ Gt(150)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"field":"field_name","ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestRangeAggregationWithKeyedFlag(t *testing.T) {
+ agg := NewRangeAggregation().Field("field_name").
+ Keyed(true).
+ Lt(50).
+ Between(20, 70).
+ Between(70, 120).
+ Gt(150)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"field":"field_name","keyed":true,"ranges":[{"to":50},{"from":20,"to":70},{"from":70,"to":120},{"from":150}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestRangeAggregationWithKeys(t *testing.T) {
+ agg := NewRangeAggregation().Field("field_name").
+ Keyed(true).
+ LtWithKey("cheap", 50).
+ BetweenWithKey("affordable", 20, 70).
+ BetweenWithKey("average", 70, 120).
+ GtWithKey("expensive", 150)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"field":"field_name","keyed":true,"ranges":[{"key":"cheap","to":50},{"from":20,"key":"affordable","to":70},{"from":70,"key":"average","to":120},{"from":150,"key":"expensive"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestRangeAggregationWithMetaData(t *testing.T) {
+ agg := NewRangeAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"})
+ agg = agg.AddRange(nil, 50)
+ agg = agg.AddRange(50, 100)
+ agg = agg.AddRange(100, nil)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"meta":{"name":"Oliver"},"range":{"field":"price","ranges":[{"to":50},{"from":50,"to":100},{"from":100}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestRangeAggregationWithMissing(t *testing.T) {
+ agg := NewRangeAggregation().Field("price").Missing(0)
+ agg = agg.AddRange(nil, 50)
+ agg = agg.AddRange(50, 100)
+ agg = agg.AddRange(100, nil)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"field":"price","missing":0,"ranges":[{"to":50},{"from":50,"to":100},{"from":100}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_reverse_nested.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_reverse_nested.go
new file mode 100644
index 000000000..9e4680195
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_reverse_nested.go
@@ -0,0 +1,86 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// ReverseNestedAggregation defines a special single bucket aggregation
+// that enables aggregating on parent docs from nested documents.
+// Effectively this aggregation can break out of the nested block
+// structure and link to other nested structures or the root document,
+// which allows nesting other aggregations that aren’t part of
+// the nested object in a nested aggregation.
+//
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-reverse-nested-aggregation.html
+type ReverseNestedAggregation struct {
+ path string
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+}
+
+// NewReverseNestedAggregation initializes a new ReverseNestedAggregation
+// bucket aggregation.
+func NewReverseNestedAggregation() *ReverseNestedAggregation {
+ return &ReverseNestedAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+// Path set the path to use for this nested aggregation. The path must match
+// the path to a nested object in the mappings. If it is not specified
+// then this aggregation will go back to the root document.
+func (a *ReverseNestedAggregation) Path(path string) *ReverseNestedAggregation {
+ a.path = path
+ return a
+}
+
+func (a *ReverseNestedAggregation) SubAggregation(name string, subAggregation Aggregation) *ReverseNestedAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *ReverseNestedAggregation) Meta(metaData map[string]interface{}) *ReverseNestedAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *ReverseNestedAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "reverse_nested" : {
+ // "path": "..."
+ // }
+ // }
+ // }
+ // This method returns only the { "reverse_nested" : {} } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["reverse_nested"] = opts
+
+ if a.path != "" {
+ opts["path"] = a.path
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_reverse_nested_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_reverse_nested_test.go
new file mode 100644
index 000000000..dc50bbc28
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_reverse_nested_test.go
@@ -0,0 +1,83 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestReverseNestedAggregation(t *testing.T) {
+ agg := NewReverseNestedAggregation()
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"reverse_nested":{}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestReverseNestedAggregationWithPath(t *testing.T) {
+ agg := NewReverseNestedAggregation().Path("comments")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"reverse_nested":{"path":"comments"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestReverseNestedAggregationWithSubAggregation(t *testing.T) {
+ avgPriceAgg := NewAvgAggregation().Field("price")
+ agg := NewReverseNestedAggregation().
+ Path("a_path").
+ SubAggregation("avg_price", avgPriceAgg)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"aggregations":{"avg_price":{"avg":{"field":"price"}}},"reverse_nested":{"path":"a_path"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestReverseNestedAggregationWithMeta(t *testing.T) {
+ agg := NewReverseNestedAggregation().
+ Path("a_path").
+ Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"meta":{"name":"Oliver"},"reverse_nested":{"path":"a_path"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_sampler.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_sampler.go
new file mode 100644
index 000000000..0fd729dfd
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_sampler.go
@@ -0,0 +1,111 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// SamplerAggregation is a filtering aggregation used to limit any
+// sub aggregations' processing to a sample of the top-scoring documents.
+// Optionally, diversity settings can be used to limit the number of matches
+// that share a common value such as an "author".
+//
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-sampler-aggregation.html
+type SamplerAggregation struct {
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+
+ shardSize int
+ maxDocsPerValue int
+ executionHint string
+}
+
+func NewSamplerAggregation() *SamplerAggregation {
+ return &SamplerAggregation{
+ shardSize: -1,
+ maxDocsPerValue: -1,
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+func (a *SamplerAggregation) SubAggregation(name string, subAggregation Aggregation) *SamplerAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *SamplerAggregation) Meta(metaData map[string]interface{}) *SamplerAggregation {
+ a.meta = metaData
+ return a
+}
+
+// ShardSize sets the maximum number of docs returned from each shard.
+func (a *SamplerAggregation) ShardSize(shardSize int) *SamplerAggregation {
+ a.shardSize = shardSize
+ return a
+}
+
+func (a *SamplerAggregation) MaxDocsPerValue(maxDocsPerValue int) *SamplerAggregation {
+ a.maxDocsPerValue = maxDocsPerValue
+ return a
+}
+
+func (a *SamplerAggregation) ExecutionHint(hint string) *SamplerAggregation {
+ a.executionHint = hint
+ return a
+}
+
+func (a *SamplerAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "sample" : {
+ // "sampler" : {
+ // "shard_size" : 200
+ // },
+ // "aggs": {
+ // "keywords": {
+ // "significant_terms": {
+ // "field": "text"
+ // }
+ // }
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the { "sampler" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["sampler"] = opts
+
+ if a.shardSize >= 0 {
+ opts["shard_size"] = a.shardSize
+ }
+ if a.maxDocsPerValue >= 0 {
+ opts["max_docs_per_value"] = a.maxDocsPerValue
+ }
+ if a.executionHint != "" {
+ opts["execution_hint"] = a.executionHint
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_sampler_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_sampler_test.go
new file mode 100644
index 000000000..c4dc1c7cc
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_sampler_test.go
@@ -0,0 +1,30 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestSamplerAggregation(t *testing.T) {
+ keywordsAgg := NewSignificantTermsAggregation().Field("text")
+ agg := NewSamplerAggregation().
+ ShardSize(200).
+ SubAggregation("keywords", keywordsAgg)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"aggregations":{"keywords":{"significant_terms":{"field":"text"}}},"sampler":{"shard_size":200}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_terms.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_terms.go
new file mode 100644
index 000000000..571a91217
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_terms.go
@@ -0,0 +1,389 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// SignificantTermsAggregation is an aggregation that returns interesting
+// or unusual occurrences of terms in a set.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html
+type SignificantTermsAggregation struct {
+ field string
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+
+ minDocCount *int
+ shardMinDocCount *int
+ requiredSize *int
+ shardSize *int
+ filter Query
+ executionHint string
+ significanceHeuristic SignificanceHeuristic
+}
+
+func NewSignificantTermsAggregation() *SignificantTermsAggregation {
+ return &SignificantTermsAggregation{
+ subAggregations: make(map[string]Aggregation, 0),
+ }
+}
+
+func (a *SignificantTermsAggregation) Field(field string) *SignificantTermsAggregation {
+ a.field = field
+ return a
+}
+
+func (a *SignificantTermsAggregation) SubAggregation(name string, subAggregation Aggregation) *SignificantTermsAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *SignificantTermsAggregation) Meta(metaData map[string]interface{}) *SignificantTermsAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *SignificantTermsAggregation) MinDocCount(minDocCount int) *SignificantTermsAggregation {
+ a.minDocCount = &minDocCount
+ return a
+}
+
+func (a *SignificantTermsAggregation) ShardMinDocCount(shardMinDocCount int) *SignificantTermsAggregation {
+ a.shardMinDocCount = &shardMinDocCount
+ return a
+}
+
+func (a *SignificantTermsAggregation) RequiredSize(requiredSize int) *SignificantTermsAggregation {
+ a.requiredSize = &requiredSize
+ return a
+}
+
+func (a *SignificantTermsAggregation) ShardSize(shardSize int) *SignificantTermsAggregation {
+ a.shardSize = &shardSize
+ return a
+}
+
+func (a *SignificantTermsAggregation) BackgroundFilter(filter Query) *SignificantTermsAggregation {
+ a.filter = filter
+ return a
+}
+
+func (a *SignificantTermsAggregation) ExecutionHint(hint string) *SignificantTermsAggregation {
+ a.executionHint = hint
+ return a
+}
+
+func (a *SignificantTermsAggregation) SignificanceHeuristic(heuristic SignificanceHeuristic) *SignificantTermsAggregation {
+ a.significanceHeuristic = heuristic
+ return a
+}
+
+func (a *SignificantTermsAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "query" : {
+ // "terms" : {"force" : [ "British Transport Police" ]}
+ // },
+ // "aggregations" : {
+ // "significantCrimeTypes" : {
+ // "significant_terms" : { "field" : "crime_type" }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the
+ // { "significant_terms" : { "field" : "crime_type" }
+ // part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["significant_terms"] = opts
+
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.requiredSize != nil {
+ opts["size"] = *a.requiredSize // not a typo!
+ }
+ if a.shardSize != nil {
+ opts["shard_size"] = *a.shardSize
+ }
+ if a.minDocCount != nil {
+ opts["min_doc_count"] = *a.minDocCount
+ }
+ if a.shardMinDocCount != nil {
+ opts["shard_min_doc_count"] = *a.shardMinDocCount
+ }
+ if a.executionHint != "" {
+ opts["execution_hint"] = a.executionHint
+ }
+ if a.filter != nil {
+ src, err := a.filter.Source()
+ if err != nil {
+ return nil, err
+ }
+ opts["background_filter"] = src
+ }
+ if a.significanceHeuristic != nil {
+ name := a.significanceHeuristic.Name()
+ src, err := a.significanceHeuristic.Source()
+ if err != nil {
+ return nil, err
+ }
+ opts[name] = src
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
+
+// -- Significance heuristics --
+
+type SignificanceHeuristic interface {
+ Name() string
+ Source() (interface{}, error)
+}
+
+// -- Chi Square --
+
+// ChiSquareSignificanceHeuristic implements Chi square as described
+// in "Information Retrieval", Manning et al., Chapter 13.5.2.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html#_chi_square
+// for details.
+type ChiSquareSignificanceHeuristic struct {
+ backgroundIsSuperset *bool
+ includeNegatives *bool
+}
+
+// NewChiSquareSignificanceHeuristic initializes a new ChiSquareSignificanceHeuristic.
+func NewChiSquareSignificanceHeuristic() *ChiSquareSignificanceHeuristic {
+ return &ChiSquareSignificanceHeuristic{}
+}
+
+// Name returns the name of the heuristic in the REST interface.
+func (sh *ChiSquareSignificanceHeuristic) Name() string {
+ return "chi_square"
+}
+
+// BackgroundIsSuperset indicates whether you defined a custom background
+// filter that represents a difference set of documents that you want to
+// compare to.
+func (sh *ChiSquareSignificanceHeuristic) BackgroundIsSuperset(backgroundIsSuperset bool) *ChiSquareSignificanceHeuristic {
+ sh.backgroundIsSuperset = &backgroundIsSuperset
+ return sh
+}
+
+// IncludeNegatives indicates whether to filter out the terms that appear
+// much less in the subset than in the background without the subset.
+func (sh *ChiSquareSignificanceHeuristic) IncludeNegatives(includeNegatives bool) *ChiSquareSignificanceHeuristic {
+ sh.includeNegatives = &includeNegatives
+ return sh
+}
+
+// Source returns the parameters that need to be added to the REST parameters.
+func (sh *ChiSquareSignificanceHeuristic) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ if sh.backgroundIsSuperset != nil {
+ source["background_is_superset"] = *sh.backgroundIsSuperset
+ }
+ if sh.includeNegatives != nil {
+ source["include_negatives"] = *sh.includeNegatives
+ }
+ return source, nil
+}
+
+// -- GND --
+
+// GNDSignificanceHeuristic implements the "Google Normalized Distance"
+// as described in "The Google Similarity Distance", Cilibrasi and Vitanyi,
+// 2007.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html#_google_normalized_distance
+// for details.
+type GNDSignificanceHeuristic struct {
+ backgroundIsSuperset *bool
+}
+
+// NewGNDSignificanceHeuristic implements a new GNDSignificanceHeuristic.
+func NewGNDSignificanceHeuristic() *GNDSignificanceHeuristic {
+ return &GNDSignificanceHeuristic{}
+}
+
+// Name returns the name of the heuristic in the REST interface.
+func (sh *GNDSignificanceHeuristic) Name() string {
+ return "gnd"
+}
+
+// BackgroundIsSuperset indicates whether you defined a custom background
+// filter that represents a difference set of documents that you want to
+// compare to.
+func (sh *GNDSignificanceHeuristic) BackgroundIsSuperset(backgroundIsSuperset bool) *GNDSignificanceHeuristic {
+ sh.backgroundIsSuperset = &backgroundIsSuperset
+ return sh
+}
+
+// Source returns the parameters that need to be added to the REST parameters.
+func (sh *GNDSignificanceHeuristic) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ if sh.backgroundIsSuperset != nil {
+ source["background_is_superset"] = *sh.backgroundIsSuperset
+ }
+ return source, nil
+}
+
+// -- JLH Score --
+
+// JLHScoreSignificanceHeuristic implements the JLH score as described in
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html#_jlh_score.
+type JLHScoreSignificanceHeuristic struct{}
+
+// NewJLHScoreSignificanceHeuristic initializes a new JLHScoreSignificanceHeuristic.
+func NewJLHScoreSignificanceHeuristic() *JLHScoreSignificanceHeuristic {
+ return &JLHScoreSignificanceHeuristic{}
+}
+
+// Name returns the name of the heuristic in the REST interface.
+func (sh *JLHScoreSignificanceHeuristic) Name() string {
+ return "jlh"
+}
+
+// Source returns the parameters that need to be added to the REST parameters.
+func (sh *JLHScoreSignificanceHeuristic) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ return source, nil
+}
+
+// -- Mutual Information --
+
+// MutualInformationSignificanceHeuristic implements Mutual information
+// as described in "Information Retrieval", Manning et al., Chapter 13.5.1.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html#_mutual_information
+// for details.
+type MutualInformationSignificanceHeuristic struct {
+ backgroundIsSuperset *bool
+ includeNegatives *bool
+}
+
+// NewMutualInformationSignificanceHeuristic initializes a new instance of
+// MutualInformationSignificanceHeuristic.
+func NewMutualInformationSignificanceHeuristic() *MutualInformationSignificanceHeuristic {
+ return &MutualInformationSignificanceHeuristic{}
+}
+
+// Name returns the name of the heuristic in the REST interface.
+func (sh *MutualInformationSignificanceHeuristic) Name() string {
+ return "mutual_information"
+}
+
+// BackgroundIsSuperset indicates whether you defined a custom background
+// filter that represents a difference set of documents that you want to
+// compare to.
+func (sh *MutualInformationSignificanceHeuristic) BackgroundIsSuperset(backgroundIsSuperset bool) *MutualInformationSignificanceHeuristic {
+ sh.backgroundIsSuperset = &backgroundIsSuperset
+ return sh
+}
+
+// IncludeNegatives indicates whether to filter out the terms that appear
+// much less in the subset than in the background without the subset.
+func (sh *MutualInformationSignificanceHeuristic) IncludeNegatives(includeNegatives bool) *MutualInformationSignificanceHeuristic {
+ sh.includeNegatives = &includeNegatives
+ return sh
+}
+
+// Source returns the parameters that need to be added to the REST parameters.
+func (sh *MutualInformationSignificanceHeuristic) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ if sh.backgroundIsSuperset != nil {
+ source["background_is_superset"] = *sh.backgroundIsSuperset
+ }
+ if sh.includeNegatives != nil {
+ source["include_negatives"] = *sh.includeNegatives
+ }
+ return source, nil
+}
+
+// -- Percentage Score --
+
+// PercentageScoreSignificanceHeuristic implements the algorithm described
+// in https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html#_percentage.
+type PercentageScoreSignificanceHeuristic struct{}
+
+// NewPercentageScoreSignificanceHeuristic initializes a new instance of
+// PercentageScoreSignificanceHeuristic.
+func NewPercentageScoreSignificanceHeuristic() *PercentageScoreSignificanceHeuristic {
+ return &PercentageScoreSignificanceHeuristic{}
+}
+
+// Name returns the name of the heuristic in the REST interface.
+func (sh *PercentageScoreSignificanceHeuristic) Name() string {
+ return "percentage"
+}
+
+// Source returns the parameters that need to be added to the REST parameters.
+func (sh *PercentageScoreSignificanceHeuristic) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ return source, nil
+}
+
+// -- Script --
+
+// ScriptSignificanceHeuristic implements a scripted significance heuristic.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html#_scripted
+// for details.
+type ScriptSignificanceHeuristic struct {
+ script *Script
+}
+
+// NewScriptSignificanceHeuristic initializes a new instance of
+// ScriptSignificanceHeuristic.
+func NewScriptSignificanceHeuristic() *ScriptSignificanceHeuristic {
+ return &ScriptSignificanceHeuristic{}
+}
+
+// Name returns the name of the heuristic in the REST interface.
+func (sh *ScriptSignificanceHeuristic) Name() string {
+ return "script_heuristic"
+}
+
+// Script specifies the script to use to get custom scores. The following
+// parameters are available in the script: `_subset_freq`, `_superset_freq`,
+// `_subset_size`, and `_superset_size`.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html#_scripted
+// for details.
+func (sh *ScriptSignificanceHeuristic) Script(script *Script) *ScriptSignificanceHeuristic {
+ sh.script = script
+ return sh
+}
+
+// Source returns the parameters that need to be added to the REST parameters.
+func (sh *ScriptSignificanceHeuristic) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ if sh.script != nil {
+ src, err := sh.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["script"] = src
+ }
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_terms_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_terms_test.go
new file mode 100644
index 000000000..a5b269671
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_terms_test.go
@@ -0,0 +1,211 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestSignificantTermsAggregation(t *testing.T) {
+ agg := NewSignificantTermsAggregation().Field("crime_type")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"significant_terms":{"field":"crime_type"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSignificantTermsAggregationWithArgs(t *testing.T) {
+ agg := NewSignificantTermsAggregation().
+ Field("crime_type").
+ ExecutionHint("map").
+ ShardSize(5).
+ MinDocCount(10).
+ BackgroundFilter(NewTermQuery("city", "London"))
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"significant_terms":{"background_filter":{"term":{"city":"London"}},"execution_hint":"map","field":"crime_type","min_doc_count":10,"shard_size":5}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSignificantTermsAggregationSubAggregation(t *testing.T) {
+ crimeTypesAgg := NewSignificantTermsAggregation().Field("crime_type")
+ agg := NewTermsAggregation().Field("force")
+ agg = agg.SubAggregation("significantCrimeTypes", crimeTypesAgg)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"aggregations":{"significantCrimeTypes":{"significant_terms":{"field":"crime_type"}}},"terms":{"field":"force"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSignificantTermsAggregationWithMetaData(t *testing.T) {
+ agg := NewSignificantTermsAggregation().Field("crime_type")
+ agg = agg.Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"meta":{"name":"Oliver"},"significant_terms":{"field":"crime_type"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSignificantTermsAggregationWithChiSquare(t *testing.T) {
+ agg := NewSignificantTermsAggregation().Field("crime_type")
+ agg = agg.SignificanceHeuristic(
+ NewChiSquareSignificanceHeuristic().
+ BackgroundIsSuperset(true).
+ IncludeNegatives(false),
+ )
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"significant_terms":{"chi_square":{"background_is_superset":true,"include_negatives":false},"field":"crime_type"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSignificantTermsAggregationWithGND(t *testing.T) {
+ agg := NewSignificantTermsAggregation().Field("crime_type")
+ agg = agg.SignificanceHeuristic(
+ NewGNDSignificanceHeuristic(),
+ )
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"significant_terms":{"field":"crime_type","gnd":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSignificantTermsAggregationWithJLH(t *testing.T) {
+ agg := NewSignificantTermsAggregation().Field("crime_type")
+ agg = agg.SignificanceHeuristic(
+ NewJLHScoreSignificanceHeuristic(),
+ )
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"significant_terms":{"field":"crime_type","jlh":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSignificantTermsAggregationWithMutualInformation(t *testing.T) {
+ agg := NewSignificantTermsAggregation().Field("crime_type")
+ agg = agg.SignificanceHeuristic(
+ NewMutualInformationSignificanceHeuristic().
+ BackgroundIsSuperset(false).
+ IncludeNegatives(true),
+ )
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"significant_terms":{"field":"crime_type","mutual_information":{"background_is_superset":false,"include_negatives":true}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSignificantTermsAggregationWithPercentageScore(t *testing.T) {
+ agg := NewSignificantTermsAggregation().Field("crime_type")
+ agg = agg.SignificanceHeuristic(
+ NewPercentageScoreSignificanceHeuristic(),
+ )
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"significant_terms":{"field":"crime_type","percentage":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSignificantTermsAggregationWithScript(t *testing.T) {
+ agg := NewSignificantTermsAggregation().Field("crime_type")
+ agg = agg.SignificanceHeuristic(
+ NewScriptSignificanceHeuristic().
+ Script(NewScript("_subset_freq/(_superset_freq - _subset_freq + 1)")),
+ )
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"significant_terms":{"field":"crime_type","script_heuristic":{"script":{"source":"_subset_freq/(_superset_freq - _subset_freq + 1)"}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_text.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_text.go
new file mode 100644
index 000000000..de761613c
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_text.go
@@ -0,0 +1,245 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// SignificantTextAggregation returns interesting or unusual occurrences
+// of free-text terms in a set.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significanttext-aggregation.html
+type SignificantTextAggregation struct {
+ field string
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+
+ sourceFieldNames []string
+ filterDuplicateText *bool
+ includeExclude *TermsAggregationIncludeExclude
+ filter Query
+ bucketCountThresholds *BucketCountThresholds
+ significanceHeuristic SignificanceHeuristic
+}
+
+func NewSignificantTextAggregation() *SignificantTextAggregation {
+ return &SignificantTextAggregation{
+ subAggregations: make(map[string]Aggregation, 0),
+ }
+}
+
+func (a *SignificantTextAggregation) Field(field string) *SignificantTextAggregation {
+ a.field = field
+ return a
+}
+
+func (a *SignificantTextAggregation) SubAggregation(name string, subAggregation Aggregation) *SignificantTextAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *SignificantTextAggregation) Meta(metaData map[string]interface{}) *SignificantTextAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *SignificantTextAggregation) SourceFieldNames(names ...string) *SignificantTextAggregation {
+ a.sourceFieldNames = names
+ return a
+}
+
+func (a *SignificantTextAggregation) FilterDuplicateText(filter bool) *SignificantTextAggregation {
+ a.filterDuplicateText = &filter
+ return a
+}
+
+func (a *SignificantTextAggregation) MinDocCount(minDocCount int64) *SignificantTextAggregation {
+ if a.bucketCountThresholds == nil {
+ a.bucketCountThresholds = &BucketCountThresholds{}
+ }
+ a.bucketCountThresholds.MinDocCount = &minDocCount
+ return a
+}
+
+func (a *SignificantTextAggregation) ShardMinDocCount(shardMinDocCount int64) *SignificantTextAggregation {
+ if a.bucketCountThresholds == nil {
+ a.bucketCountThresholds = &BucketCountThresholds{}
+ }
+ a.bucketCountThresholds.ShardMinDocCount = &shardMinDocCount
+ return a
+}
+
+func (a *SignificantTextAggregation) Size(size int) *SignificantTextAggregation {
+ if a.bucketCountThresholds == nil {
+ a.bucketCountThresholds = &BucketCountThresholds{}
+ }
+ a.bucketCountThresholds.RequiredSize = &size
+ return a
+}
+
+func (a *SignificantTextAggregation) ShardSize(shardSize int) *SignificantTextAggregation {
+ if a.bucketCountThresholds == nil {
+ a.bucketCountThresholds = &BucketCountThresholds{}
+ }
+ a.bucketCountThresholds.ShardSize = &shardSize
+ return a
+}
+
+func (a *SignificantTextAggregation) BackgroundFilter(filter Query) *SignificantTextAggregation {
+ a.filter = filter
+ return a
+}
+
+func (a *SignificantTextAggregation) SignificanceHeuristic(heuristic SignificanceHeuristic) *SignificantTextAggregation {
+ a.significanceHeuristic = heuristic
+ return a
+}
+
+func (a *SignificantTextAggregation) Include(regexp string) *SignificantTextAggregation {
+ if a.includeExclude == nil {
+ a.includeExclude = &TermsAggregationIncludeExclude{}
+ }
+ a.includeExclude.Include = regexp
+ return a
+}
+
+func (a *SignificantTextAggregation) IncludeValues(values ...interface{}) *SignificantTextAggregation {
+ if a.includeExclude == nil {
+ a.includeExclude = &TermsAggregationIncludeExclude{}
+ }
+ a.includeExclude.IncludeValues = append(a.includeExclude.IncludeValues, values...)
+ return a
+}
+
+func (a *SignificantTextAggregation) Exclude(regexp string) *SignificantTextAggregation {
+ if a.includeExclude == nil {
+ a.includeExclude = &TermsAggregationIncludeExclude{}
+ }
+ a.includeExclude.Exclude = regexp
+ return a
+}
+
+func (a *SignificantTextAggregation) ExcludeValues(values ...interface{}) *SignificantTextAggregation {
+ if a.includeExclude == nil {
+ a.includeExclude = &TermsAggregationIncludeExclude{}
+ }
+ a.includeExclude.ExcludeValues = append(a.includeExclude.ExcludeValues, values...)
+ return a
+}
+
+func (a *SignificantTextAggregation) Partition(p int) *SignificantTextAggregation {
+ if a.includeExclude == nil {
+ a.includeExclude = &TermsAggregationIncludeExclude{}
+ }
+ a.includeExclude.Partition = p
+ return a
+}
+
+func (a *SignificantTextAggregation) NumPartitions(n int) *SignificantTextAggregation {
+ if a.includeExclude == nil {
+ a.includeExclude = &TermsAggregationIncludeExclude{}
+ }
+ a.includeExclude.NumPartitions = n
+ return a
+}
+
+func (a *SignificantTextAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "query" : {
+ // "match" : {"content" : "Bird flu"}
+ // },
+ // "aggregations" : {
+ // "my_sample" : {
+ // "sampler": {
+ // "shard_size" : 100
+ // },
+ // "aggregations": {
+ // "keywords" : {
+ // "significant_text" : { "field" : "content" }
+ // }
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the
+ // { "significant_text" : { "field" : "content" }
+ // part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["significant_text"] = opts
+
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.bucketCountThresholds != nil {
+ if a.bucketCountThresholds.RequiredSize != nil {
+ opts["size"] = (*a.bucketCountThresholds).RequiredSize
+ }
+ if a.bucketCountThresholds.ShardSize != nil {
+ opts["shard_size"] = (*a.bucketCountThresholds).ShardSize
+ }
+ if a.bucketCountThresholds.MinDocCount != nil {
+ opts["min_doc_count"] = (*a.bucketCountThresholds).MinDocCount
+ }
+ if a.bucketCountThresholds.ShardMinDocCount != nil {
+ opts["shard_min_doc_count"] = (*a.bucketCountThresholds).ShardMinDocCount
+ }
+ }
+ if a.filter != nil {
+ src, err := a.filter.Source()
+ if err != nil {
+ return nil, err
+ }
+ opts["background_filter"] = src
+ }
+ if a.significanceHeuristic != nil {
+ name := a.significanceHeuristic.Name()
+ src, err := a.significanceHeuristic.Source()
+ if err != nil {
+ return nil, err
+ }
+ opts[name] = src
+ }
+ // Include/Exclude
+ if ie := a.includeExclude; ie != nil {
+ // Include
+ if ie.Include != "" {
+ opts["include"] = ie.Include
+ } else if len(ie.IncludeValues) > 0 {
+ opts["include"] = ie.IncludeValues
+ } else if ie.NumPartitions > 0 {
+ inc := make(map[string]interface{})
+ inc["partition"] = ie.Partition
+ inc["num_partitions"] = ie.NumPartitions
+ opts["include"] = inc
+ }
+ // Exclude
+ if ie.Exclude != "" {
+ opts["exclude"] = ie.Exclude
+ } else if len(ie.ExcludeValues) > 0 {
+ opts["exclude"] = ie.ExcludeValues
+ }
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_text_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_text_test.go
new file mode 100644
index 000000000..53ac4461d
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_significant_text_test.go
@@ -0,0 +1,66 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestSignificantTextAggregation(t *testing.T) {
+ agg := NewSignificantTextAggregation().Field("content")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"significant_text":{"field":"content"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSignificantTextAggregationWithArgs(t *testing.T) {
+ agg := NewSignificantTextAggregation().
+ Field("content").
+ ShardSize(5).
+ MinDocCount(10).
+ BackgroundFilter(NewTermQuery("city", "London"))
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"significant_text":{"background_filter":{"term":{"city":"London"}},"field":"content","min_doc_count":10,"shard_size":5}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSignificantTextAggregationWithMetaData(t *testing.T) {
+ agg := NewSignificantTextAggregation().Field("content")
+ agg = agg.Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"meta":{"name":"Oliver"},"significant_text":{"field":"content"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_terms.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_terms.go
new file mode 100644
index 000000000..6bcc322d0
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_terms.go
@@ -0,0 +1,368 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// TermsAggregation is a multi-bucket value source based aggregation
+// where buckets are dynamically built - one per unique value.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html
+type TermsAggregation struct {
+ field string
+ script *Script
+ missing interface{}
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+
+ size *int
+ shardSize *int
+ requiredSize *int
+ minDocCount *int
+ shardMinDocCount *int
+ valueType string
+ includeExclude *TermsAggregationIncludeExclude
+ executionHint string
+ collectionMode string
+ showTermDocCountError *bool
+ order []TermsOrder
+}
+
+func NewTermsAggregation() *TermsAggregation {
+ return &TermsAggregation{
+ subAggregations: make(map[string]Aggregation, 0),
+ }
+}
+
+func (a *TermsAggregation) Field(field string) *TermsAggregation {
+ a.field = field
+ return a
+}
+
+func (a *TermsAggregation) Script(script *Script) *TermsAggregation {
+ a.script = script
+ return a
+}
+
+// Missing configures the value to use when documents miss a value.
+func (a *TermsAggregation) Missing(missing interface{}) *TermsAggregation {
+ a.missing = missing
+ return a
+}
+
+func (a *TermsAggregation) SubAggregation(name string, subAggregation Aggregation) *TermsAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *TermsAggregation) Meta(metaData map[string]interface{}) *TermsAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *TermsAggregation) Size(size int) *TermsAggregation {
+ a.size = &size
+ return a
+}
+
+func (a *TermsAggregation) RequiredSize(requiredSize int) *TermsAggregation {
+ a.requiredSize = &requiredSize
+ return a
+}
+
+func (a *TermsAggregation) ShardSize(shardSize int) *TermsAggregation {
+ a.shardSize = &shardSize
+ return a
+}
+
+func (a *TermsAggregation) MinDocCount(minDocCount int) *TermsAggregation {
+ a.minDocCount = &minDocCount
+ return a
+}
+
+func (a *TermsAggregation) ShardMinDocCount(shardMinDocCount int) *TermsAggregation {
+ a.shardMinDocCount = &shardMinDocCount
+ return a
+}
+
+func (a *TermsAggregation) Include(regexp string) *TermsAggregation {
+ if a.includeExclude == nil {
+ a.includeExclude = &TermsAggregationIncludeExclude{}
+ }
+ a.includeExclude.Include = regexp
+ return a
+}
+
+func (a *TermsAggregation) IncludeValues(values ...interface{}) *TermsAggregation {
+ if a.includeExclude == nil {
+ a.includeExclude = &TermsAggregationIncludeExclude{}
+ }
+ a.includeExclude.IncludeValues = append(a.includeExclude.IncludeValues, values...)
+ return a
+}
+
+func (a *TermsAggregation) Exclude(regexp string) *TermsAggregation {
+ if a.includeExclude == nil {
+ a.includeExclude = &TermsAggregationIncludeExclude{}
+ }
+ a.includeExclude.Exclude = regexp
+ return a
+}
+
+func (a *TermsAggregation) ExcludeValues(values ...interface{}) *TermsAggregation {
+ if a.includeExclude == nil {
+ a.includeExclude = &TermsAggregationIncludeExclude{}
+ }
+ a.includeExclude.ExcludeValues = append(a.includeExclude.ExcludeValues, values...)
+ return a
+}
+
+func (a *TermsAggregation) Partition(p int) *TermsAggregation {
+ if a.includeExclude == nil {
+ a.includeExclude = &TermsAggregationIncludeExclude{}
+ }
+ a.includeExclude.Partition = p
+ return a
+}
+
+func (a *TermsAggregation) NumPartitions(n int) *TermsAggregation {
+ if a.includeExclude == nil {
+ a.includeExclude = &TermsAggregationIncludeExclude{}
+ }
+ a.includeExclude.NumPartitions = n
+ return a
+}
+
+// ValueType can be string, long, or double.
+func (a *TermsAggregation) ValueType(valueType string) *TermsAggregation {
+ a.valueType = valueType
+ return a
+}
+
+func (a *TermsAggregation) Order(order string, asc bool) *TermsAggregation {
+ a.order = append(a.order, TermsOrder{Field: order, Ascending: asc})
+ return a
+}
+
+func (a *TermsAggregation) OrderByCount(asc bool) *TermsAggregation {
+ // "order" : { "_count" : "asc" }
+ a.order = append(a.order, TermsOrder{Field: "_count", Ascending: asc})
+ return a
+}
+
+func (a *TermsAggregation) OrderByCountAsc() *TermsAggregation {
+ return a.OrderByCount(true)
+}
+
+func (a *TermsAggregation) OrderByCountDesc() *TermsAggregation {
+ return a.OrderByCount(false)
+}
+
+func (a *TermsAggregation) OrderByTerm(asc bool) *TermsAggregation {
+ // "order" : { "_term" : "asc" }
+ a.order = append(a.order, TermsOrder{Field: "_term", Ascending: asc})
+ return a
+}
+
+func (a *TermsAggregation) OrderByTermAsc() *TermsAggregation {
+ return a.OrderByTerm(true)
+}
+
+func (a *TermsAggregation) OrderByTermDesc() *TermsAggregation {
+ return a.OrderByTerm(false)
+}
+
+// OrderByAggregation creates a bucket ordering strategy which sorts buckets
+// based on a single-valued calc get.
+func (a *TermsAggregation) OrderByAggregation(aggName string, asc bool) *TermsAggregation {
+ // {
+ // "aggs" : {
+ // "genders" : {
+ // "terms" : {
+ // "field" : "gender",
+ // "order" : { "avg_height" : "desc" }
+ // },
+ // "aggs" : {
+ // "avg_height" : { "avg" : { "field" : "height" } }
+ // }
+ // }
+ // }
+ // }
+ a.order = append(a.order, TermsOrder{Field: aggName, Ascending: asc})
+ return a
+}
+
+// OrderByAggregationAndMetric creates a bucket ordering strategy which
+// sorts buckets based on a multi-valued calc get.
+func (a *TermsAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *TermsAggregation {
+ // {
+ // "aggs" : {
+ // "genders" : {
+ // "terms" : {
+ // "field" : "gender",
+ // "order" : { "height_stats.avg" : "desc" }
+ // },
+ // "aggs" : {
+ // "height_stats" : { "stats" : { "field" : "height" } }
+ // }
+ // }
+ // }
+ // }
+ a.order = append(a.order, TermsOrder{Field: aggName + "." + metric, Ascending: asc})
+ return a
+}
+
+func (a *TermsAggregation) ExecutionHint(hint string) *TermsAggregation {
+ a.executionHint = hint
+ return a
+}
+
+// Collection mode can be depth_first or breadth_first as of 1.4.0.
+func (a *TermsAggregation) CollectionMode(collectionMode string) *TermsAggregation {
+ a.collectionMode = collectionMode
+ return a
+}
+
+func (a *TermsAggregation) ShowTermDocCountError(showTermDocCountError bool) *TermsAggregation {
+ a.showTermDocCountError = &showTermDocCountError
+ return a
+}
+
+func (a *TermsAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "genders" : {
+ // "terms" : { "field" : "gender" }
+ // }
+ // }
+ // }
+ // This method returns only the { "terms" : { "field" : "gender" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["terms"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != nil {
+ src, err := a.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ opts["script"] = src
+ }
+ if a.missing != nil {
+ opts["missing"] = a.missing
+ }
+
+ // TermsBuilder
+ if a.size != nil && *a.size >= 0 {
+ opts["size"] = *a.size
+ }
+ if a.shardSize != nil && *a.shardSize >= 0 {
+ opts["shard_size"] = *a.shardSize
+ }
+ if a.requiredSize != nil && *a.requiredSize >= 0 {
+ opts["required_size"] = *a.requiredSize
+ }
+ if a.minDocCount != nil && *a.minDocCount >= 0 {
+ opts["min_doc_count"] = *a.minDocCount
+ }
+ if a.shardMinDocCount != nil && *a.shardMinDocCount >= 0 {
+ opts["shard_min_doc_count"] = *a.shardMinDocCount
+ }
+ if a.showTermDocCountError != nil {
+ opts["show_term_doc_count_error"] = *a.showTermDocCountError
+ }
+ if a.collectionMode != "" {
+ opts["collect_mode"] = a.collectionMode
+ }
+ if a.valueType != "" {
+ opts["value_type"] = a.valueType
+ }
+ if len(a.order) > 0 {
+ var orderSlice []interface{}
+ for _, order := range a.order {
+ src, err := order.Source()
+ if err != nil {
+ return nil, err
+ }
+ orderSlice = append(orderSlice, src)
+ }
+ opts["order"] = orderSlice
+ }
+ // Include/Exclude
+ if ie := a.includeExclude; ie != nil {
+ // Include
+ if ie.Include != "" {
+ opts["include"] = ie.Include
+ } else if len(ie.IncludeValues) > 0 {
+ opts["include"] = ie.IncludeValues
+ } else if ie.NumPartitions > 0 {
+ inc := make(map[string]interface{})
+ inc["partition"] = ie.Partition
+ inc["num_partitions"] = ie.NumPartitions
+ opts["include"] = inc
+ }
+ // Exclude
+ if ie.Exclude != "" {
+ opts["exclude"] = ie.Exclude
+ } else if len(ie.ExcludeValues) > 0 {
+ opts["exclude"] = ie.ExcludeValues
+ }
+ }
+
+ if a.executionHint != "" {
+ opts["execution_hint"] = a.executionHint
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
+
+// TermsAggregationIncludeExclude allows for include/exclude in a TermsAggregation.
+type TermsAggregationIncludeExclude struct {
+ Include string
+ Exclude string
+ IncludeValues []interface{}
+ ExcludeValues []interface{}
+ Partition int
+ NumPartitions int
+}
+
+// TermsOrder specifies a single order field for a terms aggregation.
+type TermsOrder struct {
+ Field string
+ Ascending bool
+}
+
+// Source returns serializable JSON of the TermsOrder.
+func (order *TermsOrder) Source() (interface{}, error) {
+ source := make(map[string]string)
+ if order.Ascending {
+ source[order.Field] = "asc"
+ } else {
+ source[order.Field] = "desc"
+ }
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_bucket_terms_test.go b/vendor/github.com/olivere/elastic/search_aggs_bucket_terms_test.go
new file mode 100644
index 000000000..351cbf63b
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_bucket_terms_test.go
@@ -0,0 +1,155 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestTermsAggregation(t *testing.T) {
+ agg := NewTermsAggregation().Field("gender").Size(10).OrderByTermDesc()
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"terms":{"field":"gender","order":[{"_term":"desc"}],"size":10}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestTermsAggregationWithSubAggregation(t *testing.T) {
+ subAgg := NewAvgAggregation().Field("height")
+ agg := NewTermsAggregation().Field("gender").Size(10).
+ OrderByAggregation("avg_height", false)
+ agg = agg.SubAggregation("avg_height", subAgg)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"aggregations":{"avg_height":{"avg":{"field":"height"}}},"terms":{"field":"gender","order":[{"avg_height":"desc"}],"size":10}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestTermsAggregationWithMultipleSubAggregation(t *testing.T) {
+ subAgg1 := NewAvgAggregation().Field("height")
+ subAgg2 := NewAvgAggregation().Field("width")
+ agg := NewTermsAggregation().Field("gender").Size(10).
+ OrderByAggregation("avg_height", false)
+ agg = agg.SubAggregation("avg_height", subAgg1)
+ agg = agg.SubAggregation("avg_width", subAgg2)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"aggregations":{"avg_height":{"avg":{"field":"height"}},"avg_width":{"avg":{"field":"width"}}},"terms":{"field":"gender","order":[{"avg_height":"desc"}],"size":10}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestTermsAggregationWithMetaData(t *testing.T) {
+ agg := NewTermsAggregation().Field("gender").Size(10).OrderByTermDesc()
+ agg = agg.Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"meta":{"name":"Oliver"},"terms":{"field":"gender","order":[{"_term":"desc"}],"size":10}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestTermsAggregationWithMissing(t *testing.T) {
+ agg := NewTermsAggregation().Field("gender").Size(10).Missing("n/a")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"terms":{"field":"gender","missing":"n/a","size":10}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestTermsAggregationWithIncludeExclude(t *testing.T) {
+ agg := NewTermsAggregation().Field("tags").Include(".*sport.*").Exclude("water_.*")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"terms":{"exclude":"water_.*","field":"tags","include":".*sport.*"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestTermsAggregationWithIncludeExcludeValues(t *testing.T) {
+ agg := NewTermsAggregation().Field("make").IncludeValues("mazda", "honda").ExcludeValues("rover", "jensen")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"terms":{"exclude":["rover","jensen"],"field":"make","include":["mazda","honda"]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestTermsAggregationWithPartitions(t *testing.T) {
+ agg := NewTermsAggregation().Field("account_id").Partition(0).NumPartitions(20)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"terms":{"field":"account_id","include":{"num_partitions":20,"partition":0}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_matrix_stats.go b/vendor/github.com/olivere/elastic/search_aggs_matrix_stats.go
new file mode 100644
index 000000000..785f392b6
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_matrix_stats.go
@@ -0,0 +1,120 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MatrixMatrixStatsAggregation ...
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-stats-aggregation.html
+// for details.
+type MatrixStatsAggregation struct {
+ fields []string
+ missing interface{}
+ format string
+ valueType interface{}
+ mode string
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+}
+
+// NewMatrixStatsAggregation initializes a new MatrixStatsAggregation.
+func NewMatrixStatsAggregation() *MatrixStatsAggregation {
+ return &MatrixStatsAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+func (a *MatrixStatsAggregation) Fields(fields ...string) *MatrixStatsAggregation {
+ a.fields = append(a.fields, fields...)
+ return a
+}
+
+// Missing configures the value to use when documents miss a value.
+func (a *MatrixStatsAggregation) Missing(missing interface{}) *MatrixStatsAggregation {
+ a.missing = missing
+ return a
+}
+
+// Mode specifies how to operate. Valid values are: sum, avg, median, min, or max.
+func (a *MatrixStatsAggregation) Mode(mode string) *MatrixStatsAggregation {
+ a.mode = mode
+ return a
+}
+
+func (a *MatrixStatsAggregation) Format(format string) *MatrixStatsAggregation {
+ a.format = format
+ return a
+}
+
+func (a *MatrixStatsAggregation) ValueType(valueType interface{}) *MatrixStatsAggregation {
+ a.valueType = valueType
+ return a
+}
+
+func (a *MatrixStatsAggregation) SubAggregation(name string, subAggregation Aggregation) *MatrixStatsAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *MatrixStatsAggregation) Meta(metaData map[string]interface{}) *MatrixStatsAggregation {
+ a.meta = metaData
+ return a
+}
+
+// Source returns the JSON to serialize into the request, or an error.
+func (a *MatrixStatsAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "matrixstats" : {
+ // "matrix_stats" : {
+ // "fields" : ["poverty", "income"],
+ // "missing": {"income": 50000},
+ // "mode": "avg",
+ // ...
+ // }
+ // }
+ // }
+ // }
+ // This method returns only the { "matrix_stats" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["matrix_stats"] = opts
+
+ // MatrixStatsAggregationBuilder
+ opts["fields"] = a.fields
+ if a.missing != nil {
+ opts["missing"] = a.missing
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if a.valueType != nil {
+ opts["value_type"] = a.valueType
+ }
+ if a.mode != "" {
+ opts["mode"] = a.mode
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_matrix_stats_test.go b/vendor/github.com/olivere/elastic/search_aggs_matrix_stats_test.go
new file mode 100644
index 000000000..28138fe02
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_matrix_stats_test.go
@@ -0,0 +1,53 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestMatrixStatsAggregation(t *testing.T) {
+ agg := NewMatrixStatsAggregation().
+ Fields("poverty", "income").
+ Missing(map[string]interface{}{
+ "income": 50000,
+ }).
+ Mode("avg").
+ Format("0000.0").
+ ValueType("double")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"matrix_stats":{"fields":["poverty","income"],"format":"0000.0","missing":{"income":50000},"mode":"avg","value_type":"double"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMatrixStatsAggregationWithMetaData(t *testing.T) {
+ agg := NewMatrixStatsAggregation().
+ Fields("poverty", "income").
+ Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"matrix_stats":{"fields":["poverty","income"]},"meta":{"name":"Oliver"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_avg.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_avg.go
new file mode 100644
index 000000000..2b764e065
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_avg.go
@@ -0,0 +1,101 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// AvgAggregation is a single-value metrics aggregation that computes
+// the average of numeric values that are extracted from the
+// aggregated documents. These values can be extracted either from
+// specific numeric fields in the documents, or be generated by
+// a provided script.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-avg-aggregation.html
+type AvgAggregation struct {
+ field string
+ script *Script
+ format string
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+}
+
+func NewAvgAggregation() *AvgAggregation {
+ return &AvgAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+func (a *AvgAggregation) Field(field string) *AvgAggregation {
+ a.field = field
+ return a
+}
+
+func (a *AvgAggregation) Script(script *Script) *AvgAggregation {
+ a.script = script
+ return a
+}
+
+func (a *AvgAggregation) Format(format string) *AvgAggregation {
+ a.format = format
+ return a
+}
+
+func (a *AvgAggregation) SubAggregation(name string, subAggregation Aggregation) *AvgAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *AvgAggregation) Meta(metaData map[string]interface{}) *AvgAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *AvgAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "avg_grade" : { "avg" : { "field" : "grade" } }
+ // }
+ // }
+ // This method returns only the { "avg" : { "field" : "grade" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["avg"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != nil {
+ src, err := a.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ opts["script"] = src
+ }
+
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_avg_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_avg_test.go
new file mode 100644
index 000000000..784ff45dd
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_avg_test.go
@@ -0,0 +1,61 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestAvgAggregation(t *testing.T) {
+ agg := NewAvgAggregation().Field("grade")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"avg":{"field":"grade"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestAvgAggregationWithFormat(t *testing.T) {
+ agg := NewAvgAggregation().Field("grade").Format("000.0")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"avg":{"field":"grade","format":"000.0"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestAvgAggregationWithMetaData(t *testing.T) {
+ agg := NewAvgAggregation().Field("grade").Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"avg":{"field":"grade"},"meta":{"name":"Oliver"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_cardinality.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_cardinality.go
new file mode 100644
index 000000000..3b999c849
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_cardinality.go
@@ -0,0 +1,120 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// CardinalityAggregation is a single-value metrics aggregation that
+// calculates an approximate count of distinct values.
+// Values can be extracted either from specific fields in the document
+// or generated by a script.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-cardinality-aggregation.html
+type CardinalityAggregation struct {
+ field string
+ script *Script
+ format string
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+ precisionThreshold *int64
+ rehash *bool
+}
+
+func NewCardinalityAggregation() *CardinalityAggregation {
+ return &CardinalityAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+func (a *CardinalityAggregation) Field(field string) *CardinalityAggregation {
+ a.field = field
+ return a
+}
+
+func (a *CardinalityAggregation) Script(script *Script) *CardinalityAggregation {
+ a.script = script
+ return a
+}
+
+func (a *CardinalityAggregation) Format(format string) *CardinalityAggregation {
+ a.format = format
+ return a
+}
+
+func (a *CardinalityAggregation) SubAggregation(name string, subAggregation Aggregation) *CardinalityAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *CardinalityAggregation) Meta(metaData map[string]interface{}) *CardinalityAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *CardinalityAggregation) PrecisionThreshold(threshold int64) *CardinalityAggregation {
+ a.precisionThreshold = &threshold
+ return a
+}
+
+func (a *CardinalityAggregation) Rehash(rehash bool) *CardinalityAggregation {
+ a.rehash = &rehash
+ return a
+}
+
+func (a *CardinalityAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "author_count" : {
+ // "cardinality" : { "field" : "author" }
+ // }
+ // }
+ // }
+ // This method returns only the "cardinality" : { "field" : "author" } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["cardinality"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != nil {
+ src, err := a.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ opts["script"] = src
+ }
+
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if a.precisionThreshold != nil {
+ opts["precision_threshold"] = *a.precisionThreshold
+ }
+ if a.rehash != nil {
+ opts["rehash"] = *a.rehash
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_cardinality_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_cardinality_test.go
new file mode 100644
index 000000000..b5f8490b5
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_cardinality_test.go
@@ -0,0 +1,78 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestCardinalityAggregation(t *testing.T) {
+ agg := NewCardinalityAggregation().Field("author.hash")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"cardinality":{"field":"author.hash"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestCardinalityAggregationWithOptions(t *testing.T) {
+ agg := NewCardinalityAggregation().Field("author.hash").PrecisionThreshold(100).Rehash(true)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"cardinality":{"field":"author.hash","precision_threshold":100,"rehash":true}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestCardinalityAggregationWithFormat(t *testing.T) {
+ agg := NewCardinalityAggregation().Field("author.hash").Format("00000")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"cardinality":{"field":"author.hash","format":"00000"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestCardinalityAggregationWithMetaData(t *testing.T) {
+ agg := NewCardinalityAggregation().Field("author.hash").Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"cardinality":{"field":"author.hash"},"meta":{"name":"Oliver"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_extended_stats.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_extended_stats.go
new file mode 100644
index 000000000..4e0bbe65a
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_extended_stats.go
@@ -0,0 +1,99 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// ExtendedExtendedStatsAggregation is a multi-value metrics aggregation that
+// computes stats over numeric values extracted from the aggregated documents.
+// These values can be extracted either from specific numeric fields
+// in the documents, or be generated by a provided script.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-extendedstats-aggregation.html
+type ExtendedStatsAggregation struct {
+ field string
+ script *Script
+ format string
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+}
+
+func NewExtendedStatsAggregation() *ExtendedStatsAggregation {
+ return &ExtendedStatsAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+func (a *ExtendedStatsAggregation) Field(field string) *ExtendedStatsAggregation {
+ a.field = field
+ return a
+}
+
+func (a *ExtendedStatsAggregation) Script(script *Script) *ExtendedStatsAggregation {
+ a.script = script
+ return a
+}
+
+func (a *ExtendedStatsAggregation) Format(format string) *ExtendedStatsAggregation {
+ a.format = format
+ return a
+}
+
+func (a *ExtendedStatsAggregation) SubAggregation(name string, subAggregation Aggregation) *ExtendedStatsAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *ExtendedStatsAggregation) Meta(metaData map[string]interface{}) *ExtendedStatsAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *ExtendedStatsAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "grades_stats" : { "extended_stats" : { "field" : "grade" } }
+ // }
+ // }
+ // This method returns only the { "extended_stats" : { "field" : "grade" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["extended_stats"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != nil {
+ src, err := a.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ opts["script"] = src
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_extended_stats_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_extended_stats_test.go
new file mode 100644
index 000000000..76489630d
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_extended_stats_test.go
@@ -0,0 +1,44 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestExtendedStatsAggregation(t *testing.T) {
+ agg := NewExtendedStatsAggregation().Field("grade")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"extended_stats":{"field":"grade"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestExtendedStatsAggregationWithFormat(t *testing.T) {
+ agg := NewExtendedStatsAggregation().Field("grade").Format("000.0")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"extended_stats":{"field":"grade","format":"000.0"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_geo_bounds.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_geo_bounds.go
new file mode 100644
index 000000000..406f2d000
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_geo_bounds.go
@@ -0,0 +1,105 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// GeoBoundsAggregation is a metric aggregation that computes the
+// bounding box containing all geo_point values for a field.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-geobounds-aggregation.html
+type GeoBoundsAggregation struct {
+ field string
+ script *Script
+ wrapLongitude *bool
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+}
+
+func NewGeoBoundsAggregation() *GeoBoundsAggregation {
+ return &GeoBoundsAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+func (a *GeoBoundsAggregation) Field(field string) *GeoBoundsAggregation {
+ a.field = field
+ return a
+}
+
+func (a *GeoBoundsAggregation) Script(script *Script) *GeoBoundsAggregation {
+ a.script = script
+ return a
+}
+
+func (a *GeoBoundsAggregation) WrapLongitude(wrapLongitude bool) *GeoBoundsAggregation {
+ a.wrapLongitude = &wrapLongitude
+ return a
+}
+
+func (a *GeoBoundsAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoBoundsAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *GeoBoundsAggregation) Meta(metaData map[string]interface{}) *GeoBoundsAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *GeoBoundsAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "query" : {
+ // "match" : { "business_type" : "shop" }
+ // },
+ // "aggs" : {
+ // "viewport" : {
+ // "geo_bounds" : {
+ // "field" : "location"
+ // "wrap_longitude" : "true"
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the { "geo_bounds" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["geo_bounds"] = opts
+
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != nil {
+ src, err := a.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ opts["script"] = src
+ }
+ if a.wrapLongitude != nil {
+ opts["wrap_longitude"] = *a.wrapLongitude
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_geo_bounds_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_geo_bounds_test.go
new file mode 100644
index 000000000..ea713c604
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_geo_bounds_test.go
@@ -0,0 +1,61 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestGeoBoundsAggregation(t *testing.T) {
+ agg := NewGeoBoundsAggregation().Field("location")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_bounds":{"field":"location"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestGeoBoundsAggregationWithWrapLongitude(t *testing.T) {
+ agg := NewGeoBoundsAggregation().Field("location").WrapLongitude(true)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_bounds":{"field":"location","wrap_longitude":true}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestGeoBoundsAggregationWithMetaData(t *testing.T) {
+ agg := NewGeoBoundsAggregation().Field("location").Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_bounds":{"field":"location"},"meta":{"name":"Oliver"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_max.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_max.go
new file mode 100644
index 000000000..acdfa14a8
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_max.go
@@ -0,0 +1,99 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MaxAggregation is a single-value metrics aggregation that keeps track and
+// returns the maximum value among the numeric values extracted from
+// the aggregated documents. These values can be extracted either from
+// specific numeric fields in the documents, or be generated by
+// a provided script.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-max-aggregation.html
+type MaxAggregation struct {
+ field string
+ script *Script
+ format string
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+}
+
+func NewMaxAggregation() *MaxAggregation {
+ return &MaxAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+func (a *MaxAggregation) Field(field string) *MaxAggregation {
+ a.field = field
+ return a
+}
+
+func (a *MaxAggregation) Script(script *Script) *MaxAggregation {
+ a.script = script
+ return a
+}
+
+func (a *MaxAggregation) Format(format string) *MaxAggregation {
+ a.format = format
+ return a
+}
+
+func (a *MaxAggregation) SubAggregation(name string, subAggregation Aggregation) *MaxAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *MaxAggregation) Meta(metaData map[string]interface{}) *MaxAggregation {
+ a.meta = metaData
+ return a
+}
+func (a *MaxAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "max_price" : { "max" : { "field" : "price" } }
+ // }
+ // }
+ // This method returns only the { "max" : { "field" : "price" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["max"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != nil {
+ src, err := a.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ opts["script"] = src
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_max_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_max_test.go
new file mode 100644
index 000000000..773cc2e4b
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_max_test.go
@@ -0,0 +1,61 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestMaxAggregation(t *testing.T) {
+ agg := NewMaxAggregation().Field("price")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"max":{"field":"price"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMaxAggregationWithFormat(t *testing.T) {
+ agg := NewMaxAggregation().Field("price").Format("00000.00")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"max":{"field":"price","format":"00000.00"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMaxAggregationWithMetaData(t *testing.T) {
+ agg := NewMaxAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"max":{"field":"price"},"meta":{"name":"Oliver"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_min.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_min.go
new file mode 100644
index 000000000..af63585da
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_min.go
@@ -0,0 +1,100 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MinAggregation is a single-value metrics aggregation that keeps track and
+// returns the minimum value among numeric values extracted from the
+// aggregated documents. These values can be extracted either from
+// specific numeric fields in the documents, or be generated by a
+// provided script.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-min-aggregation.html
+type MinAggregation struct {
+ field string
+ script *Script
+ format string
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+}
+
+func NewMinAggregation() *MinAggregation {
+ return &MinAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+func (a *MinAggregation) Field(field string) *MinAggregation {
+ a.field = field
+ return a
+}
+
+func (a *MinAggregation) Script(script *Script) *MinAggregation {
+ a.script = script
+ return a
+}
+
+func (a *MinAggregation) Format(format string) *MinAggregation {
+ a.format = format
+ return a
+}
+
+func (a *MinAggregation) SubAggregation(name string, subAggregation Aggregation) *MinAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *MinAggregation) Meta(metaData map[string]interface{}) *MinAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *MinAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "min_price" : { "min" : { "field" : "price" } }
+ // }
+ // }
+ // This method returns only the { "min" : { "field" : "price" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["min"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != nil {
+ src, err := a.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ opts["script"] = src
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_min_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_min_test.go
new file mode 100644
index 000000000..fcde3817c
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_min_test.go
@@ -0,0 +1,61 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestMinAggregation(t *testing.T) {
+ agg := NewMinAggregation().Field("price")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"min":{"field":"price"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMinAggregationWithFormat(t *testing.T) {
+ agg := NewMinAggregation().Field("price").Format("00000.00")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"min":{"field":"price","format":"00000.00"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMinAggregationWithMetaData(t *testing.T) {
+ agg := NewMinAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"meta":{"name":"Oliver"},"min":{"field":"price"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks.go
new file mode 100644
index 000000000..674fc41f9
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks.go
@@ -0,0 +1,131 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// PercentileRanksAggregation
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-percentile-rank-aggregation.html
+type PercentileRanksAggregation struct {
+ field string
+ script *Script
+ format string
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+ values []float64
+ compression *float64
+ estimator string
+}
+
+func NewPercentileRanksAggregation() *PercentileRanksAggregation {
+ return &PercentileRanksAggregation{
+ subAggregations: make(map[string]Aggregation),
+ values: make([]float64, 0),
+ }
+}
+
+func (a *PercentileRanksAggregation) Field(field string) *PercentileRanksAggregation {
+ a.field = field
+ return a
+}
+
+func (a *PercentileRanksAggregation) Script(script *Script) *PercentileRanksAggregation {
+ a.script = script
+ return a
+}
+
+func (a *PercentileRanksAggregation) Format(format string) *PercentileRanksAggregation {
+ a.format = format
+ return a
+}
+
+func (a *PercentileRanksAggregation) SubAggregation(name string, subAggregation Aggregation) *PercentileRanksAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *PercentileRanksAggregation) Meta(metaData map[string]interface{}) *PercentileRanksAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *PercentileRanksAggregation) Values(values ...float64) *PercentileRanksAggregation {
+ a.values = append(a.values, values...)
+ return a
+}
+
+func (a *PercentileRanksAggregation) Compression(compression float64) *PercentileRanksAggregation {
+ a.compression = &compression
+ return a
+}
+
+func (a *PercentileRanksAggregation) Estimator(estimator string) *PercentileRanksAggregation {
+ a.estimator = estimator
+ return a
+}
+
+func (a *PercentileRanksAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "load_time_outlier" : {
+ // "percentile_ranks" : {
+ // "field" : "load_time"
+ // "values" : [15, 30]
+ // }
+ // }
+ // }
+ // }
+ // This method returns only the
+ // { "percentile_ranks" : { "field" : "load_time", "values" : [15, 30] } }
+ // part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["percentile_ranks"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != nil {
+ src, err := a.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ opts["script"] = src
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if len(a.values) > 0 {
+ opts["values"] = a.values
+ }
+ if a.compression != nil {
+ opts["compression"] = *a.compression
+ }
+ if a.estimator != "" {
+ opts["estimator"] = a.estimator
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks_test.go
new file mode 100644
index 000000000..a4bac02b5
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_percentile_ranks_test.go
@@ -0,0 +1,78 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestPercentileRanksAggregation(t *testing.T) {
+ agg := NewPercentileRanksAggregation().Field("load_time")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"percentile_ranks":{"field":"load_time"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestPercentileRanksAggregationWithCustomValues(t *testing.T) {
+ agg := NewPercentileRanksAggregation().Field("load_time").Values(15, 30)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"percentile_ranks":{"field":"load_time","values":[15,30]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestPercentileRanksAggregationWithFormat(t *testing.T) {
+ agg := NewPercentileRanksAggregation().Field("load_time").Format("000.0")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"percentile_ranks":{"field":"load_time","format":"000.0"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestPercentileRanksAggregationWithMetaData(t *testing.T) {
+ agg := NewPercentileRanksAggregation().Field("load_time").Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"meta":{"name":"Oliver"},"percentile_ranks":{"field":"load_time"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_percentiles.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_percentiles.go
new file mode 100644
index 000000000..a1d78c8f2
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_percentiles.go
@@ -0,0 +1,130 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// PercentilesAggregation
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-percentile-aggregation.html
+type PercentilesAggregation struct {
+ field string
+ script *Script
+ format string
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+ percentiles []float64
+ compression *float64
+ estimator string
+}
+
+func NewPercentilesAggregation() *PercentilesAggregation {
+ return &PercentilesAggregation{
+ subAggregations: make(map[string]Aggregation),
+ percentiles: make([]float64, 0),
+ }
+}
+
+func (a *PercentilesAggregation) Field(field string) *PercentilesAggregation {
+ a.field = field
+ return a
+}
+
+func (a *PercentilesAggregation) Script(script *Script) *PercentilesAggregation {
+ a.script = script
+ return a
+}
+
+func (a *PercentilesAggregation) Format(format string) *PercentilesAggregation {
+ a.format = format
+ return a
+}
+
+func (a *PercentilesAggregation) SubAggregation(name string, subAggregation Aggregation) *PercentilesAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *PercentilesAggregation) Meta(metaData map[string]interface{}) *PercentilesAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *PercentilesAggregation) Percentiles(percentiles ...float64) *PercentilesAggregation {
+ a.percentiles = append(a.percentiles, percentiles...)
+ return a
+}
+
+func (a *PercentilesAggregation) Compression(compression float64) *PercentilesAggregation {
+ a.compression = &compression
+ return a
+}
+
+func (a *PercentilesAggregation) Estimator(estimator string) *PercentilesAggregation {
+ a.estimator = estimator
+ return a
+}
+
+func (a *PercentilesAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "load_time_outlier" : {
+ // "percentiles" : {
+ // "field" : "load_time"
+ // }
+ // }
+ // }
+ // }
+ // This method returns only the
+ // { "percentiles" : { "field" : "load_time" } }
+ // part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["percentiles"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != nil {
+ src, err := a.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ opts["script"] = src
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if len(a.percentiles) > 0 {
+ opts["percents"] = a.percentiles
+ }
+ if a.compression != nil {
+ opts["compression"] = *a.compression
+ }
+ if a.estimator != "" {
+ opts["estimator"] = a.estimator
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_percentiles_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_percentiles_test.go
new file mode 100644
index 000000000..93df1dd29
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_percentiles_test.go
@@ -0,0 +1,78 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestPercentilesAggregation(t *testing.T) {
+ agg := NewPercentilesAggregation().Field("price")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"percentiles":{"field":"price"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestPercentilesAggregationWithCustomPercents(t *testing.T) {
+ agg := NewPercentilesAggregation().Field("price").Percentiles(0.2, 0.5, 0.9)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"percentiles":{"field":"price","percents":[0.2,0.5,0.9]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestPercentilesAggregationWithFormat(t *testing.T) {
+ agg := NewPercentilesAggregation().Field("price").Format("00000.00")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"percentiles":{"field":"price","format":"00000.00"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestPercentilesAggregationWithMetaData(t *testing.T) {
+ agg := NewPercentilesAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"meta":{"name":"Oliver"},"percentiles":{"field":"price"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_stats.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_stats.go
new file mode 100644
index 000000000..b9bbe7cff
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_stats.go
@@ -0,0 +1,99 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// StatsAggregation is a multi-value metrics aggregation that computes stats
+// over numeric values extracted from the aggregated documents.
+// These values can be extracted either from specific numeric fields
+// in the documents, or be generated by a provided script.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-stats-aggregation.html
+type StatsAggregation struct {
+ field string
+ script *Script
+ format string
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+}
+
+func NewStatsAggregation() *StatsAggregation {
+ return &StatsAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+func (a *StatsAggregation) Field(field string) *StatsAggregation {
+ a.field = field
+ return a
+}
+
+func (a *StatsAggregation) Script(script *Script) *StatsAggregation {
+ a.script = script
+ return a
+}
+
+func (a *StatsAggregation) Format(format string) *StatsAggregation {
+ a.format = format
+ return a
+}
+
+func (a *StatsAggregation) SubAggregation(name string, subAggregation Aggregation) *StatsAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *StatsAggregation) Meta(metaData map[string]interface{}) *StatsAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *StatsAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "grades_stats" : { "stats" : { "field" : "grade" } }
+ // }
+ // }
+ // This method returns only the { "stats" : { "field" : "grade" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["stats"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != nil {
+ src, err := a.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ opts["script"] = src
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_stats_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_stats_test.go
new file mode 100644
index 000000000..5cff372d4
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_stats_test.go
@@ -0,0 +1,61 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestStatsAggregation(t *testing.T) {
+ agg := NewStatsAggregation().Field("grade")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"stats":{"field":"grade"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestStatsAggregationWithFormat(t *testing.T) {
+ agg := NewStatsAggregation().Field("grade").Format("0000.0")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"stats":{"field":"grade","format":"0000.0"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestStatsAggregationWithMetaData(t *testing.T) {
+ agg := NewStatsAggregation().Field("grade").Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"meta":{"name":"Oliver"},"stats":{"field":"grade"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_sum.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_sum.go
new file mode 100644
index 000000000..e1c07c9c1
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_sum.go
@@ -0,0 +1,99 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// SumAggregation is a single-value metrics aggregation that sums up
+// numeric values that are extracted from the aggregated documents.
+// These values can be extracted either from specific numeric fields
+// in the documents, or be generated by a provided script.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-sum-aggregation.html
+type SumAggregation struct {
+ field string
+ script *Script
+ format string
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+}
+
+func NewSumAggregation() *SumAggregation {
+ return &SumAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+func (a *SumAggregation) Field(field string) *SumAggregation {
+ a.field = field
+ return a
+}
+
+func (a *SumAggregation) Script(script *Script) *SumAggregation {
+ a.script = script
+ return a
+}
+
+func (a *SumAggregation) Format(format string) *SumAggregation {
+ a.format = format
+ return a
+}
+
+func (a *SumAggregation) SubAggregation(name string, subAggregation Aggregation) *SumAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *SumAggregation) Meta(metaData map[string]interface{}) *SumAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *SumAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "intraday_return" : { "sum" : { "field" : "change" } }
+ // }
+ // }
+ // This method returns only the { "sum" : { "field" : "change" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["sum"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != nil {
+ src, err := a.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ opts["script"] = src
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_sum_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_sum_test.go
new file mode 100644
index 000000000..ff0e42545
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_sum_test.go
@@ -0,0 +1,61 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestSumAggregation(t *testing.T) {
+ agg := NewSumAggregation().Field("price")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"sum":{"field":"price"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSumAggregationWithFormat(t *testing.T) {
+ agg := NewSumAggregation().Field("price").Format("00000.00")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"sum":{"field":"price","format":"00000.00"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSumAggregationWithMetaData(t *testing.T) {
+ agg := NewSumAggregation().Field("price").Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"meta":{"name":"Oliver"},"sum":{"field":"price"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_top_hits.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_top_hits.go
new file mode 100644
index 000000000..2b181895e
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_top_hits.go
@@ -0,0 +1,143 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// TopHitsAggregation keeps track of the most relevant document
+// being aggregated. This aggregator is intended to be used as a
+// sub aggregator, so that the top matching documents
+// can be aggregated per bucket.
+//
+// It can effectively be used to group result sets by certain fields via
+// a bucket aggregator. One or more bucket aggregators determines by
+// which properties a result set get sliced into.
+//
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-top-hits-aggregation.html
+type TopHitsAggregation struct {
+ searchSource *SearchSource
+}
+
+func NewTopHitsAggregation() *TopHitsAggregation {
+ return &TopHitsAggregation{
+ searchSource: NewSearchSource(),
+ }
+}
+
+func (a *TopHitsAggregation) From(from int) *TopHitsAggregation {
+ a.searchSource = a.searchSource.From(from)
+ return a
+}
+
+func (a *TopHitsAggregation) Size(size int) *TopHitsAggregation {
+ a.searchSource = a.searchSource.Size(size)
+ return a
+}
+
+func (a *TopHitsAggregation) TrackScores(trackScores bool) *TopHitsAggregation {
+ a.searchSource = a.searchSource.TrackScores(trackScores)
+ return a
+}
+
+func (a *TopHitsAggregation) Explain(explain bool) *TopHitsAggregation {
+ a.searchSource = a.searchSource.Explain(explain)
+ return a
+}
+
+func (a *TopHitsAggregation) Version(version bool) *TopHitsAggregation {
+ a.searchSource = a.searchSource.Version(version)
+ return a
+}
+
+func (a *TopHitsAggregation) NoStoredFields() *TopHitsAggregation {
+ a.searchSource = a.searchSource.NoStoredFields()
+ return a
+}
+
+func (a *TopHitsAggregation) FetchSource(fetchSource bool) *TopHitsAggregation {
+ a.searchSource = a.searchSource.FetchSource(fetchSource)
+ return a
+}
+
+func (a *TopHitsAggregation) FetchSourceContext(fetchSourceContext *FetchSourceContext) *TopHitsAggregation {
+ a.searchSource = a.searchSource.FetchSourceContext(fetchSourceContext)
+ return a
+}
+
+func (a *TopHitsAggregation) DocvalueFields(docvalueFields ...string) *TopHitsAggregation {
+ a.searchSource = a.searchSource.DocvalueFields(docvalueFields...)
+ return a
+}
+
+func (a *TopHitsAggregation) DocvalueField(docvalueField string) *TopHitsAggregation {
+ a.searchSource = a.searchSource.DocvalueField(docvalueField)
+ return a
+}
+
+func (a *TopHitsAggregation) ScriptFields(scriptFields ...*ScriptField) *TopHitsAggregation {
+ a.searchSource = a.searchSource.ScriptFields(scriptFields...)
+ return a
+}
+
+func (a *TopHitsAggregation) ScriptField(scriptField *ScriptField) *TopHitsAggregation {
+ a.searchSource = a.searchSource.ScriptField(scriptField)
+ return a
+}
+
+func (a *TopHitsAggregation) Sort(field string, ascending bool) *TopHitsAggregation {
+ a.searchSource = a.searchSource.Sort(field, ascending)
+ return a
+}
+
+func (a *TopHitsAggregation) SortWithInfo(info SortInfo) *TopHitsAggregation {
+ a.searchSource = a.searchSource.SortWithInfo(info)
+ return a
+}
+
+func (a *TopHitsAggregation) SortBy(sorter ...Sorter) *TopHitsAggregation {
+ a.searchSource = a.searchSource.SortBy(sorter...)
+ return a
+}
+
+func (a *TopHitsAggregation) Highlight(highlight *Highlight) *TopHitsAggregation {
+ a.searchSource = a.searchSource.Highlight(highlight)
+ return a
+}
+
+func (a *TopHitsAggregation) Highlighter() *Highlight {
+ return a.searchSource.Highlighter()
+}
+
+func (a *TopHitsAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs": {
+ // "top_tag_hits": {
+ // "top_hits": {
+ // "sort": [
+ // {
+ // "last_activity_date": {
+ // "order": "desc"
+ // }
+ // }
+ // ],
+ // "_source": {
+ // "include": [
+ // "title"
+ // ]
+ // },
+ // "size" : 1
+ // }
+ // }
+ // }
+ // }
+ // This method returns only the { "top_hits" : { ... } } part.
+
+ source := make(map[string]interface{})
+ src, err := a.searchSource.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["top_hits"] = src
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_top_hits_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_top_hits_test.go
new file mode 100644
index 000000000..861f079fe
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_top_hits_test.go
@@ -0,0 +1,31 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestTopHitsAggregation(t *testing.T) {
+ fsc := NewFetchSourceContext(true).Include("title")
+ agg := NewTopHitsAggregation().
+ Sort("last_activity_date", false).
+ FetchSourceContext(fsc).
+ Size(1)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"top_hits":{"_source":{"includes":["title"]},"size":1,"sort":[{"last_activity_date":{"order":"desc"}}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_value_count.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_value_count.go
new file mode 100644
index 000000000..d56f1f873
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_value_count.go
@@ -0,0 +1,102 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// ValueCountAggregation is a single-value metrics aggregation that counts
+// the number of values that are extracted from the aggregated documents.
+// These values can be extracted either from specific fields in the documents,
+// or be generated by a provided script. Typically, this aggregator will be
+// used in conjunction with other single-value aggregations.
+// For example, when computing the avg one might be interested in the
+// number of values the average is computed over.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-valuecount-aggregation.html
+type ValueCountAggregation struct {
+ field string
+ script *Script
+ format string
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+}
+
+func NewValueCountAggregation() *ValueCountAggregation {
+ return &ValueCountAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+func (a *ValueCountAggregation) Field(field string) *ValueCountAggregation {
+ a.field = field
+ return a
+}
+
+func (a *ValueCountAggregation) Script(script *Script) *ValueCountAggregation {
+ a.script = script
+ return a
+}
+
+func (a *ValueCountAggregation) Format(format string) *ValueCountAggregation {
+ a.format = format
+ return a
+}
+
+func (a *ValueCountAggregation) SubAggregation(name string, subAggregation Aggregation) *ValueCountAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *ValueCountAggregation) Meta(metaData map[string]interface{}) *ValueCountAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *ValueCountAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "grades_count" : { "value_count" : { "field" : "grade" } }
+ // }
+ // }
+ // This method returns only the { "value_count" : { "field" : "grade" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["value_count"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != nil {
+ src, err := a.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ opts["script"] = src
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_metrics_value_count_test.go b/vendor/github.com/olivere/elastic/search_aggs_metrics_value_count_test.go
new file mode 100644
index 000000000..18d2ba119
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_metrics_value_count_test.go
@@ -0,0 +1,63 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestValueCountAggregation(t *testing.T) {
+ agg := NewValueCountAggregation().Field("grade")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"value_count":{"field":"grade"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestValueCountAggregationWithFormat(t *testing.T) {
+ // Format comes with 1.5.0+
+ agg := NewValueCountAggregation().Field("grade").Format("0000.0")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"value_count":{"field":"grade","format":"0000.0"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestValueCountAggregationWithMetaData(t *testing.T) {
+ agg := NewValueCountAggregation().Field("grade")
+ agg = agg.Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"meta":{"name":"Oliver"},"value_count":{"field":"grade"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket.go
new file mode 100644
index 000000000..f37a9bdb8
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket.go
@@ -0,0 +1,113 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// AvgBucketAggregation is a sibling pipeline aggregation which calculates
+// the (mean) average value of a specified metric in a sibling aggregation.
+// The specified metric must be numeric and the sibling aggregation must
+// be a multi-bucket aggregation.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-avg-bucket-aggregation.html
+type AvgBucketAggregation struct {
+ format string
+ gapPolicy string
+
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+ bucketsPaths []string
+}
+
+// NewAvgBucketAggregation creates and initializes a new AvgBucketAggregation.
+func NewAvgBucketAggregation() *AvgBucketAggregation {
+ return &AvgBucketAggregation{
+ subAggregations: make(map[string]Aggregation),
+ bucketsPaths: make([]string, 0),
+ }
+}
+
+func (a *AvgBucketAggregation) Format(format string) *AvgBucketAggregation {
+ a.format = format
+ return a
+}
+
+// GapPolicy defines what should be done when a gap in the series is discovered.
+// Valid values include "insert_zeros" or "skip". Default is "insert_zeros".
+func (a *AvgBucketAggregation) GapPolicy(gapPolicy string) *AvgBucketAggregation {
+ a.gapPolicy = gapPolicy
+ return a
+}
+
+// GapInsertZeros inserts zeros for gaps in the series.
+func (a *AvgBucketAggregation) GapInsertZeros() *AvgBucketAggregation {
+ a.gapPolicy = "insert_zeros"
+ return a
+}
+
+// GapSkip skips gaps in the series.
+func (a *AvgBucketAggregation) GapSkip() *AvgBucketAggregation {
+ a.gapPolicy = "skip"
+ return a
+}
+
+// SubAggregation adds a sub-aggregation to this aggregation.
+func (a *AvgBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *AvgBucketAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *AvgBucketAggregation) Meta(metaData map[string]interface{}) *AvgBucketAggregation {
+ a.meta = metaData
+ return a
+}
+
+// BucketsPath sets the paths to the buckets to use for this pipeline aggregator.
+func (a *AvgBucketAggregation) BucketsPath(bucketsPaths ...string) *AvgBucketAggregation {
+ a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...)
+ return a
+}
+
+func (a *AvgBucketAggregation) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["avg_bucket"] = params
+
+ if a.format != "" {
+ params["format"] = a.format
+ }
+ if a.gapPolicy != "" {
+ params["gap_policy"] = a.gapPolicy
+ }
+
+ // Add buckets paths
+ switch len(a.bucketsPaths) {
+ case 0:
+ case 1:
+ params["buckets_path"] = a.bucketsPaths[0]
+ default:
+ params["buckets_path"] = a.bucketsPaths
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket_test.go
new file mode 100644
index 000000000..019b8f1ad
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_avg_bucket_test.go
@@ -0,0 +1,27 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestAvgBucketAggregation(t *testing.T) {
+ agg := NewAvgBucketAggregation().BucketsPath("the_sum").GapPolicy("skip")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"avg_bucket":{"buckets_path":"the_sum","gap_policy":"skip"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_script.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_script.go
new file mode 100644
index 000000000..34e356964
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_script.go
@@ -0,0 +1,132 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// BucketScriptAggregation is a parent pipeline aggregation which executes
+// a script which can perform per bucket computations on specified metrics
+// in the parent multi-bucket aggregation. The specified metric must be
+// numeric and the script must return a numeric value.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-bucket-script-aggregation.html
+type BucketScriptAggregation struct {
+ format string
+ gapPolicy string
+ script *Script
+
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+ bucketsPathsMap map[string]string
+}
+
+// NewBucketScriptAggregation creates and initializes a new BucketScriptAggregation.
+func NewBucketScriptAggregation() *BucketScriptAggregation {
+ return &BucketScriptAggregation{
+ subAggregations: make(map[string]Aggregation),
+ bucketsPathsMap: make(map[string]string),
+ }
+}
+
+func (a *BucketScriptAggregation) Format(format string) *BucketScriptAggregation {
+ a.format = format
+ return a
+}
+
+// GapPolicy defines what should be done when a gap in the series is discovered.
+// Valid values include "insert_zeros" or "skip". Default is "insert_zeros".
+func (a *BucketScriptAggregation) GapPolicy(gapPolicy string) *BucketScriptAggregation {
+ a.gapPolicy = gapPolicy
+ return a
+}
+
+// GapInsertZeros inserts zeros for gaps in the series.
+func (a *BucketScriptAggregation) GapInsertZeros() *BucketScriptAggregation {
+ a.gapPolicy = "insert_zeros"
+ return a
+}
+
+// GapSkip skips gaps in the series.
+func (a *BucketScriptAggregation) GapSkip() *BucketScriptAggregation {
+ a.gapPolicy = "skip"
+ return a
+}
+
+// Script is the script to run.
+func (a *BucketScriptAggregation) Script(script *Script) *BucketScriptAggregation {
+ a.script = script
+ return a
+}
+
+// SubAggregation adds a sub-aggregation to this aggregation.
+func (a *BucketScriptAggregation) SubAggregation(name string, subAggregation Aggregation) *BucketScriptAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *BucketScriptAggregation) Meta(metaData map[string]interface{}) *BucketScriptAggregation {
+ a.meta = metaData
+ return a
+}
+
+// BucketsPathsMap sets the paths to the buckets to use for this pipeline aggregator.
+func (a *BucketScriptAggregation) BucketsPathsMap(bucketsPathsMap map[string]string) *BucketScriptAggregation {
+ a.bucketsPathsMap = bucketsPathsMap
+ return a
+}
+
+// AddBucketsPath adds a bucket path to use for this pipeline aggregator.
+func (a *BucketScriptAggregation) AddBucketsPath(name, path string) *BucketScriptAggregation {
+ if a.bucketsPathsMap == nil {
+ a.bucketsPathsMap = make(map[string]string)
+ }
+ a.bucketsPathsMap[name] = path
+ return a
+}
+
+func (a *BucketScriptAggregation) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["bucket_script"] = params
+
+ if a.format != "" {
+ params["format"] = a.format
+ }
+ if a.gapPolicy != "" {
+ params["gap_policy"] = a.gapPolicy
+ }
+ if a.script != nil {
+ src, err := a.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ params["script"] = src
+ }
+
+ // Add buckets paths
+ if len(a.bucketsPathsMap) > 0 {
+ params["buckets_path"] = a.bucketsPathsMap
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_script_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_script_test.go
new file mode 100644
index 000000000..3c101c706
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_script_test.go
@@ -0,0 +1,30 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestBucketScriptAggregation(t *testing.T) {
+ agg := NewBucketScriptAggregation().
+ AddBucketsPath("tShirtSales", "t-shirts>sales").
+ AddBucketsPath("totalSales", "total_sales").
+ Script(NewScript("tShirtSales / totalSales * 100"))
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"bucket_script":{"buckets_path":{"tShirtSales":"t-shirts\u003esales","totalSales":"total_sales"},"script":{"source":"tShirtSales / totalSales * 100"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector.go
new file mode 100644
index 000000000..233414d70
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector.go
@@ -0,0 +1,134 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// BucketSelectorAggregation is a parent pipeline aggregation which
+// determines whether the current bucket will be retained in the parent
+// multi-bucket aggregation. The specific metric must be numeric and
+// the script must return a boolean value. If the script language is
+// expression then a numeric return value is permitted. In this case 0.0
+// will be evaluated as false and all other values will evaluate to true.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-bucket-selector-aggregation.html
+type BucketSelectorAggregation struct {
+ format string
+ gapPolicy string
+ script *Script
+
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+ bucketsPathsMap map[string]string
+}
+
+// NewBucketSelectorAggregation creates and initializes a new BucketSelectorAggregation.
+func NewBucketSelectorAggregation() *BucketSelectorAggregation {
+ return &BucketSelectorAggregation{
+ subAggregations: make(map[string]Aggregation),
+ bucketsPathsMap: make(map[string]string),
+ }
+}
+
+func (a *BucketSelectorAggregation) Format(format string) *BucketSelectorAggregation {
+ a.format = format
+ return a
+}
+
+// GapPolicy defines what should be done when a gap in the series is discovered.
+// Valid values include "insert_zeros" or "skip". Default is "insert_zeros".
+func (a *BucketSelectorAggregation) GapPolicy(gapPolicy string) *BucketSelectorAggregation {
+ a.gapPolicy = gapPolicy
+ return a
+}
+
+// GapInsertZeros inserts zeros for gaps in the series.
+func (a *BucketSelectorAggregation) GapInsertZeros() *BucketSelectorAggregation {
+ a.gapPolicy = "insert_zeros"
+ return a
+}
+
+// GapSkip skips gaps in the series.
+func (a *BucketSelectorAggregation) GapSkip() *BucketSelectorAggregation {
+ a.gapPolicy = "skip"
+ return a
+}
+
+// Script is the script to run.
+func (a *BucketSelectorAggregation) Script(script *Script) *BucketSelectorAggregation {
+ a.script = script
+ return a
+}
+
+// SubAggregation adds a sub-aggregation to this aggregation.
+func (a *BucketSelectorAggregation) SubAggregation(name string, subAggregation Aggregation) *BucketSelectorAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *BucketSelectorAggregation) Meta(metaData map[string]interface{}) *BucketSelectorAggregation {
+ a.meta = metaData
+ return a
+}
+
+// BucketsPathsMap sets the paths to the buckets to use for this pipeline aggregator.
+func (a *BucketSelectorAggregation) BucketsPathsMap(bucketsPathsMap map[string]string) *BucketSelectorAggregation {
+ a.bucketsPathsMap = bucketsPathsMap
+ return a
+}
+
+// AddBucketsPath adds a bucket path to use for this pipeline aggregator.
+func (a *BucketSelectorAggregation) AddBucketsPath(name, path string) *BucketSelectorAggregation {
+ if a.bucketsPathsMap == nil {
+ a.bucketsPathsMap = make(map[string]string)
+ }
+ a.bucketsPathsMap[name] = path
+ return a
+}
+
+func (a *BucketSelectorAggregation) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["bucket_selector"] = params
+
+ if a.format != "" {
+ params["format"] = a.format
+ }
+ if a.gapPolicy != "" {
+ params["gap_policy"] = a.gapPolicy
+ }
+ if a.script != nil {
+ src, err := a.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ params["script"] = src
+ }
+
+ // Add buckets paths
+ if len(a.bucketsPathsMap) > 0 {
+ params["buckets_path"] = a.bucketsPathsMap
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector_test.go
new file mode 100644
index 000000000..e378c2832
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_bucket_selector_test.go
@@ -0,0 +1,29 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestBucketSelectorAggregation(t *testing.T) {
+ agg := NewBucketSelectorAggregation().
+ AddBucketsPath("totalSales", "total_sales").
+ Script(NewScript("totalSales >= 1000"))
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"bucket_selector":{"buckets_path":{"totalSales":"total_sales"},"script":{"source":"totalSales \u003e= 1000"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum.go
new file mode 100644
index 000000000..80a1db42d
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum.go
@@ -0,0 +1,90 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// CumulativeSumAggregation is a parent pipeline aggregation which calculates
+// the cumulative sum of a specified metric in a parent histogram (or date_histogram)
+// aggregation. The specified metric must be numeric and the enclosing
+// histogram must have min_doc_count set to 0 (default for histogram aggregations).
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-cumulative-sum-aggregation.html
+type CumulativeSumAggregation struct {
+ format string
+
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+ bucketsPaths []string
+}
+
+// NewCumulativeSumAggregation creates and initializes a new CumulativeSumAggregation.
+func NewCumulativeSumAggregation() *CumulativeSumAggregation {
+ return &CumulativeSumAggregation{
+ subAggregations: make(map[string]Aggregation),
+ bucketsPaths: make([]string, 0),
+ }
+}
+
+func (a *CumulativeSumAggregation) Format(format string) *CumulativeSumAggregation {
+ a.format = format
+ return a
+}
+
+// SubAggregation adds a sub-aggregation to this aggregation.
+func (a *CumulativeSumAggregation) SubAggregation(name string, subAggregation Aggregation) *CumulativeSumAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *CumulativeSumAggregation) Meta(metaData map[string]interface{}) *CumulativeSumAggregation {
+ a.meta = metaData
+ return a
+}
+
+// BucketsPath sets the paths to the buckets to use for this pipeline aggregator.
+func (a *CumulativeSumAggregation) BucketsPath(bucketsPaths ...string) *CumulativeSumAggregation {
+ a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...)
+ return a
+}
+
+func (a *CumulativeSumAggregation) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["cumulative_sum"] = params
+
+ if a.format != "" {
+ params["format"] = a.format
+ }
+
+ // Add buckets paths
+ switch len(a.bucketsPaths) {
+ case 0:
+ case 1:
+ params["buckets_path"] = a.bucketsPaths[0]
+ default:
+ params["buckets_path"] = a.bucketsPaths
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum_test.go
new file mode 100644
index 000000000..69a215d43
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_cumulative_sum_test.go
@@ -0,0 +1,27 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestCumulativeSumAggregation(t *testing.T) {
+ agg := NewCumulativeSumAggregation().BucketsPath("sales")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"cumulative_sum":{"buckets_path":"sales"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_derivative.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_derivative.go
new file mode 100644
index 000000000..ee7114e25
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_derivative.go
@@ -0,0 +1,124 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// DerivativeAggregation is a parent pipeline aggregation which calculates
+// the derivative of a specified metric in a parent histogram (or date_histogram)
+// aggregation. The specified metric must be numeric and the enclosing
+// histogram must have min_doc_count set to 0 (default for histogram aggregations).
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-derivative-aggregation.html
+type DerivativeAggregation struct {
+ format string
+ gapPolicy string
+ unit string
+
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+ bucketsPaths []string
+}
+
+// NewDerivativeAggregation creates and initializes a new DerivativeAggregation.
+func NewDerivativeAggregation() *DerivativeAggregation {
+ return &DerivativeAggregation{
+ subAggregations: make(map[string]Aggregation),
+ bucketsPaths: make([]string, 0),
+ }
+}
+
+func (a *DerivativeAggregation) Format(format string) *DerivativeAggregation {
+ a.format = format
+ return a
+}
+
+// GapPolicy defines what should be done when a gap in the series is discovered.
+// Valid values include "insert_zeros" or "skip". Default is "insert_zeros".
+func (a *DerivativeAggregation) GapPolicy(gapPolicy string) *DerivativeAggregation {
+ a.gapPolicy = gapPolicy
+ return a
+}
+
+// GapInsertZeros inserts zeros for gaps in the series.
+func (a *DerivativeAggregation) GapInsertZeros() *DerivativeAggregation {
+ a.gapPolicy = "insert_zeros"
+ return a
+}
+
+// GapSkip skips gaps in the series.
+func (a *DerivativeAggregation) GapSkip() *DerivativeAggregation {
+ a.gapPolicy = "skip"
+ return a
+}
+
+// Unit sets the unit provided, e.g. "1d" or "1y".
+// It is only useful when calculating the derivative using a date_histogram.
+func (a *DerivativeAggregation) Unit(unit string) *DerivativeAggregation {
+ a.unit = unit
+ return a
+}
+
+// SubAggregation adds a sub-aggregation to this aggregation.
+func (a *DerivativeAggregation) SubAggregation(name string, subAggregation Aggregation) *DerivativeAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *DerivativeAggregation) Meta(metaData map[string]interface{}) *DerivativeAggregation {
+ a.meta = metaData
+ return a
+}
+
+// BucketsPath sets the paths to the buckets to use for this pipeline aggregator.
+func (a *DerivativeAggregation) BucketsPath(bucketsPaths ...string) *DerivativeAggregation {
+ a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...)
+ return a
+}
+
+func (a *DerivativeAggregation) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["derivative"] = params
+
+ if a.format != "" {
+ params["format"] = a.format
+ }
+ if a.gapPolicy != "" {
+ params["gap_policy"] = a.gapPolicy
+ }
+ if a.unit != "" {
+ params["unit"] = a.unit
+ }
+
+ // Add buckets paths
+ switch len(a.bucketsPaths) {
+ case 0:
+ case 1:
+ params["buckets_path"] = a.bucketsPaths[0]
+ default:
+ params["buckets_path"] = a.bucketsPaths
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_derivative_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_derivative_test.go
new file mode 100644
index 000000000..7e7b26749
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_derivative_test.go
@@ -0,0 +1,27 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestDerivativeAggregation(t *testing.T) {
+ agg := NewDerivativeAggregation().BucketsPath("sales")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"derivative":{"buckets_path":"sales"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_max_bucket.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_max_bucket.go
new file mode 100644
index 000000000..5da049561
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_max_bucket.go
@@ -0,0 +1,114 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MaxBucketAggregation is a sibling pipeline aggregation which identifies
+// the bucket(s) with the maximum value of a specified metric in a sibling
+// aggregation and outputs both the value and the key(s) of the bucket(s).
+// The specified metric must be numeric and the sibling aggregation must
+// be a multi-bucket aggregation.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-max-bucket-aggregation.html
+type MaxBucketAggregation struct {
+ format string
+ gapPolicy string
+
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+ bucketsPaths []string
+}
+
+// NewMaxBucketAggregation creates and initializes a new MaxBucketAggregation.
+func NewMaxBucketAggregation() *MaxBucketAggregation {
+ return &MaxBucketAggregation{
+ subAggregations: make(map[string]Aggregation),
+ bucketsPaths: make([]string, 0),
+ }
+}
+
+func (a *MaxBucketAggregation) Format(format string) *MaxBucketAggregation {
+ a.format = format
+ return a
+}
+
+// GapPolicy defines what should be done when a gap in the series is discovered.
+// Valid values include "insert_zeros" or "skip". Default is "insert_zeros".
+func (a *MaxBucketAggregation) GapPolicy(gapPolicy string) *MaxBucketAggregation {
+ a.gapPolicy = gapPolicy
+ return a
+}
+
+// GapInsertZeros inserts zeros for gaps in the series.
+func (a *MaxBucketAggregation) GapInsertZeros() *MaxBucketAggregation {
+ a.gapPolicy = "insert_zeros"
+ return a
+}
+
+// GapSkip skips gaps in the series.
+func (a *MaxBucketAggregation) GapSkip() *MaxBucketAggregation {
+ a.gapPolicy = "skip"
+ return a
+}
+
+// SubAggregation adds a sub-aggregation to this aggregation.
+func (a *MaxBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *MaxBucketAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *MaxBucketAggregation) Meta(metaData map[string]interface{}) *MaxBucketAggregation {
+ a.meta = metaData
+ return a
+}
+
+// BucketsPath sets the paths to the buckets to use for this pipeline aggregator.
+func (a *MaxBucketAggregation) BucketsPath(bucketsPaths ...string) *MaxBucketAggregation {
+ a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...)
+ return a
+}
+
+func (a *MaxBucketAggregation) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["max_bucket"] = params
+
+ if a.format != "" {
+ params["format"] = a.format
+ }
+ if a.gapPolicy != "" {
+ params["gap_policy"] = a.gapPolicy
+ }
+
+ // Add buckets paths
+ switch len(a.bucketsPaths) {
+ case 0:
+ case 1:
+ params["buckets_path"] = a.bucketsPaths[0]
+ default:
+ params["buckets_path"] = a.bucketsPaths
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_max_bucket_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_max_bucket_test.go
new file mode 100644
index 000000000..aa9bf2f6d
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_max_bucket_test.go
@@ -0,0 +1,27 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestMaxBucketAggregation(t *testing.T) {
+ agg := NewMaxBucketAggregation().BucketsPath("the_sum").GapPolicy("skip")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"max_bucket":{"buckets_path":"the_sum","gap_policy":"skip"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_min_bucket.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_min_bucket.go
new file mode 100644
index 000000000..463bb919e
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_min_bucket.go
@@ -0,0 +1,114 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MinBucketAggregation is a sibling pipeline aggregation which identifies
+// the bucket(s) with the maximum value of a specified metric in a sibling
+// aggregation and outputs both the value and the key(s) of the bucket(s).
+// The specified metric must be numeric and the sibling aggregation must
+// be a multi-bucket aggregation.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-min-bucket-aggregation.html
+type MinBucketAggregation struct {
+ format string
+ gapPolicy string
+
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+ bucketsPaths []string
+}
+
+// NewMinBucketAggregation creates and initializes a new MinBucketAggregation.
+func NewMinBucketAggregation() *MinBucketAggregation {
+ return &MinBucketAggregation{
+ subAggregations: make(map[string]Aggregation),
+ bucketsPaths: make([]string, 0),
+ }
+}
+
+func (a *MinBucketAggregation) Format(format string) *MinBucketAggregation {
+ a.format = format
+ return a
+}
+
+// GapPolicy defines what should be done when a gap in the series is discovered.
+// Valid values include "insert_zeros" or "skip". Default is "insert_zeros".
+func (a *MinBucketAggregation) GapPolicy(gapPolicy string) *MinBucketAggregation {
+ a.gapPolicy = gapPolicy
+ return a
+}
+
+// GapInsertZeros inserts zeros for gaps in the series.
+func (a *MinBucketAggregation) GapInsertZeros() *MinBucketAggregation {
+ a.gapPolicy = "insert_zeros"
+ return a
+}
+
+// GapSkip skips gaps in the series.
+func (a *MinBucketAggregation) GapSkip() *MinBucketAggregation {
+ a.gapPolicy = "skip"
+ return a
+}
+
+// SubAggregation adds a sub-aggregation to this aggregation.
+func (a *MinBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *MinBucketAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *MinBucketAggregation) Meta(metaData map[string]interface{}) *MinBucketAggregation {
+ a.meta = metaData
+ return a
+}
+
+// BucketsPath sets the paths to the buckets to use for this pipeline aggregator.
+func (a *MinBucketAggregation) BucketsPath(bucketsPaths ...string) *MinBucketAggregation {
+ a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...)
+ return a
+}
+
+func (a *MinBucketAggregation) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["min_bucket"] = params
+
+ if a.format != "" {
+ params["format"] = a.format
+ }
+ if a.gapPolicy != "" {
+ params["gap_policy"] = a.gapPolicy
+ }
+
+ // Add buckets paths
+ switch len(a.bucketsPaths) {
+ case 0:
+ case 1:
+ params["buckets_path"] = a.bucketsPaths[0]
+ default:
+ params["buckets_path"] = a.bucketsPaths
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_min_bucket_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_min_bucket_test.go
new file mode 100644
index 000000000..ff4abf2b2
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_min_bucket_test.go
@@ -0,0 +1,27 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestMinBucketAggregation(t *testing.T) {
+ agg := NewMinBucketAggregation().BucketsPath("sales_per_month>sales").GapPolicy("skip")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"min_bucket":{"buckets_path":"sales_per_month\u003esales","gap_policy":"skip"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_mov_avg.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_mov_avg.go
new file mode 100644
index 000000000..821d73842
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_mov_avg.go
@@ -0,0 +1,393 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MovAvgAggregation operates on a series of data. It will slide a window
+// across the data and emit the average value of that window.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html
+type MovAvgAggregation struct {
+ format string
+ gapPolicy string
+ model MovAvgModel
+ window *int
+ predict *int
+ minimize *bool
+
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+ bucketsPaths []string
+}
+
+// NewMovAvgAggregation creates and initializes a new MovAvgAggregation.
+func NewMovAvgAggregation() *MovAvgAggregation {
+ return &MovAvgAggregation{
+ subAggregations: make(map[string]Aggregation),
+ bucketsPaths: make([]string, 0),
+ }
+}
+
+func (a *MovAvgAggregation) Format(format string) *MovAvgAggregation {
+ a.format = format
+ return a
+}
+
+// GapPolicy defines what should be done when a gap in the series is discovered.
+// Valid values include "insert_zeros" or "skip". Default is "insert_zeros".
+func (a *MovAvgAggregation) GapPolicy(gapPolicy string) *MovAvgAggregation {
+ a.gapPolicy = gapPolicy
+ return a
+}
+
+// GapInsertZeros inserts zeros for gaps in the series.
+func (a *MovAvgAggregation) GapInsertZeros() *MovAvgAggregation {
+ a.gapPolicy = "insert_zeros"
+ return a
+}
+
+// GapSkip skips gaps in the series.
+func (a *MovAvgAggregation) GapSkip() *MovAvgAggregation {
+ a.gapPolicy = "skip"
+ return a
+}
+
+// Model is used to define what type of moving average you want to use
+// in the series.
+func (a *MovAvgAggregation) Model(model MovAvgModel) *MovAvgAggregation {
+ a.model = model
+ return a
+}
+
+// Window sets the window size for the moving average. This window will
+// "slide" across the series, and the values inside that window will
+// be used to calculate the moving avg value.
+func (a *MovAvgAggregation) Window(window int) *MovAvgAggregation {
+ a.window = &window
+ return a
+}
+
+// Predict sets the number of predictions that should be returned.
+// Each prediction will be spaced at the intervals in the histogram.
+// E.g. a predict of 2 will return two new buckets at the end of the
+// histogram with the predicted values.
+func (a *MovAvgAggregation) Predict(numPredictions int) *MovAvgAggregation {
+ a.predict = &numPredictions
+ return a
+}
+
+// Minimize determines if the model should be fit to the data using a
+// cost minimizing algorithm.
+func (a *MovAvgAggregation) Minimize(minimize bool) *MovAvgAggregation {
+ a.minimize = &minimize
+ return a
+}
+
+// SubAggregation adds a sub-aggregation to this aggregation.
+func (a *MovAvgAggregation) SubAggregation(name string, subAggregation Aggregation) *MovAvgAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *MovAvgAggregation) Meta(metaData map[string]interface{}) *MovAvgAggregation {
+ a.meta = metaData
+ return a
+}
+
+// BucketsPath sets the paths to the buckets to use for this pipeline aggregator.
+func (a *MovAvgAggregation) BucketsPath(bucketsPaths ...string) *MovAvgAggregation {
+ a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...)
+ return a
+}
+
+func (a *MovAvgAggregation) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["moving_avg"] = params
+
+ if a.format != "" {
+ params["format"] = a.format
+ }
+ if a.gapPolicy != "" {
+ params["gap_policy"] = a.gapPolicy
+ }
+ if a.model != nil {
+ params["model"] = a.model.Name()
+ settings := a.model.Settings()
+ if len(settings) > 0 {
+ params["settings"] = settings
+ }
+ }
+ if a.window != nil {
+ params["window"] = *a.window
+ }
+ if a.predict != nil {
+ params["predict"] = *a.predict
+ }
+ if a.minimize != nil {
+ params["minimize"] = *a.minimize
+ }
+
+ // Add buckets paths
+ switch len(a.bucketsPaths) {
+ case 0:
+ case 1:
+ params["buckets_path"] = a.bucketsPaths[0]
+ default:
+ params["buckets_path"] = a.bucketsPaths
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
+
+// -- Models for moving averages --
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html#_models
+
+// MovAvgModel specifies the model to use with the MovAvgAggregation.
+type MovAvgModel interface {
+ Name() string
+ Settings() map[string]interface{}
+}
+
+// -- EWMA --
+
+// EWMAMovAvgModel calculates an exponentially weighted moving average.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html#_ewma_exponentially_weighted
+type EWMAMovAvgModel struct {
+ alpha *float64
+}
+
+// NewEWMAMovAvgModel creates and initializes a new EWMAMovAvgModel.
+func NewEWMAMovAvgModel() *EWMAMovAvgModel {
+ return &EWMAMovAvgModel{}
+}
+
+// Alpha controls the smoothing of the data. Alpha = 1 retains no memory
+// of past values (e.g. a random walk), while alpha = 0 retains infinite
+// memory of past values (e.g. the series mean). Useful values are somewhere
+// in between. Defaults to 0.5.
+func (m *EWMAMovAvgModel) Alpha(alpha float64) *EWMAMovAvgModel {
+ m.alpha = &alpha
+ return m
+}
+
+// Name of the model.
+func (m *EWMAMovAvgModel) Name() string {
+ return "ewma"
+}
+
+// Settings of the model.
+func (m *EWMAMovAvgModel) Settings() map[string]interface{} {
+ settings := make(map[string]interface{})
+ if m.alpha != nil {
+ settings["alpha"] = *m.alpha
+ }
+ return settings
+}
+
+// -- Holt linear --
+
+// HoltLinearMovAvgModel calculates a doubly exponential weighted moving average.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html#_holt_linear
+type HoltLinearMovAvgModel struct {
+ alpha *float64
+ beta *float64
+}
+
+// NewHoltLinearMovAvgModel creates and initializes a new HoltLinearMovAvgModel.
+func NewHoltLinearMovAvgModel() *HoltLinearMovAvgModel {
+ return &HoltLinearMovAvgModel{}
+}
+
+// Alpha controls the smoothing of the data. Alpha = 1 retains no memory
+// of past values (e.g. a random walk), while alpha = 0 retains infinite
+// memory of past values (e.g. the series mean). Useful values are somewhere
+// in between. Defaults to 0.5.
+func (m *HoltLinearMovAvgModel) Alpha(alpha float64) *HoltLinearMovAvgModel {
+ m.alpha = &alpha
+ return m
+}
+
+// Beta is equivalent to Alpha but controls the smoothing of the trend
+// instead of the data.
+func (m *HoltLinearMovAvgModel) Beta(beta float64) *HoltLinearMovAvgModel {
+ m.beta = &beta
+ return m
+}
+
+// Name of the model.
+func (m *HoltLinearMovAvgModel) Name() string {
+ return "holt"
+}
+
+// Settings of the model.
+func (m *HoltLinearMovAvgModel) Settings() map[string]interface{} {
+ settings := make(map[string]interface{})
+ if m.alpha != nil {
+ settings["alpha"] = *m.alpha
+ }
+ if m.beta != nil {
+ settings["beta"] = *m.beta
+ }
+ return settings
+}
+
+// -- Holt Winters --
+
+// HoltWintersMovAvgModel calculates a triple exponential weighted moving average.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html#_holt_winters
+type HoltWintersMovAvgModel struct {
+ alpha *float64
+ beta *float64
+ gamma *float64
+ period *int
+ seasonalityType string
+ pad *bool
+}
+
+// NewHoltWintersMovAvgModel creates and initializes a new HoltWintersMovAvgModel.
+func NewHoltWintersMovAvgModel() *HoltWintersMovAvgModel {
+ return &HoltWintersMovAvgModel{}
+}
+
+// Alpha controls the smoothing of the data. Alpha = 1 retains no memory
+// of past values (e.g. a random walk), while alpha = 0 retains infinite
+// memory of past values (e.g. the series mean). Useful values are somewhere
+// in between. Defaults to 0.5.
+func (m *HoltWintersMovAvgModel) Alpha(alpha float64) *HoltWintersMovAvgModel {
+ m.alpha = &alpha
+ return m
+}
+
+// Beta is equivalent to Alpha but controls the smoothing of the trend
+// instead of the data.
+func (m *HoltWintersMovAvgModel) Beta(beta float64) *HoltWintersMovAvgModel {
+ m.beta = &beta
+ return m
+}
+
+func (m *HoltWintersMovAvgModel) Gamma(gamma float64) *HoltWintersMovAvgModel {
+ m.gamma = &gamma
+ return m
+}
+
+func (m *HoltWintersMovAvgModel) Period(period int) *HoltWintersMovAvgModel {
+ m.period = &period
+ return m
+}
+
+func (m *HoltWintersMovAvgModel) SeasonalityType(typ string) *HoltWintersMovAvgModel {
+ m.seasonalityType = typ
+ return m
+}
+
+func (m *HoltWintersMovAvgModel) Pad(pad bool) *HoltWintersMovAvgModel {
+ m.pad = &pad
+ return m
+}
+
+// Name of the model.
+func (m *HoltWintersMovAvgModel) Name() string {
+ return "holt_winters"
+}
+
+// Settings of the model.
+func (m *HoltWintersMovAvgModel) Settings() map[string]interface{} {
+ settings := make(map[string]interface{})
+ if m.alpha != nil {
+ settings["alpha"] = *m.alpha
+ }
+ if m.beta != nil {
+ settings["beta"] = *m.beta
+ }
+ if m.gamma != nil {
+ settings["gamma"] = *m.gamma
+ }
+ if m.period != nil {
+ settings["period"] = *m.period
+ }
+ if m.pad != nil {
+ settings["pad"] = *m.pad
+ }
+ if m.seasonalityType != "" {
+ settings["type"] = m.seasonalityType
+ }
+ return settings
+}
+
+// -- Linear --
+
+// LinearMovAvgModel calculates a linearly weighted moving average, such
+// that older values are linearly less important. "Time" is determined
+// by position in collection.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html#_linear
+type LinearMovAvgModel struct {
+}
+
+// NewLinearMovAvgModel creates and initializes a new LinearMovAvgModel.
+func NewLinearMovAvgModel() *LinearMovAvgModel {
+ return &LinearMovAvgModel{}
+}
+
+// Name of the model.
+func (m *LinearMovAvgModel) Name() string {
+ return "linear"
+}
+
+// Settings of the model.
+func (m *LinearMovAvgModel) Settings() map[string]interface{} {
+ return nil
+}
+
+// -- Simple --
+
+// SimpleMovAvgModel calculates a simple unweighted (arithmetic) moving average.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html#_simple
+type SimpleMovAvgModel struct {
+}
+
+// NewSimpleMovAvgModel creates and initializes a new SimpleMovAvgModel.
+func NewSimpleMovAvgModel() *SimpleMovAvgModel {
+ return &SimpleMovAvgModel{}
+}
+
+// Name of the model.
+func (m *SimpleMovAvgModel) Name() string {
+ return "simple"
+}
+
+// Settings of the model.
+func (m *SimpleMovAvgModel) Settings() map[string]interface{} {
+ return nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_mov_avg_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_mov_avg_test.go
new file mode 100644
index 000000000..af2fc7c27
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_mov_avg_test.go
@@ -0,0 +1,132 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestMovAvgAggregation(t *testing.T) {
+ agg := NewMovAvgAggregation().BucketsPath("the_sum")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"moving_avg":{"buckets_path":"the_sum"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMovAvgAggregationWithSimpleModel(t *testing.T) {
+ agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Model(NewSimpleMovAvgModel())
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"moving_avg":{"buckets_path":"the_sum","model":"simple","window":30}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMovAvgAggregationWithLinearModel(t *testing.T) {
+ agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Model(NewLinearMovAvgModel())
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"moving_avg":{"buckets_path":"the_sum","model":"linear","window":30}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMovAvgAggregationWithEWMAModel(t *testing.T) {
+ agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Model(NewEWMAMovAvgModel().Alpha(0.5))
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"moving_avg":{"buckets_path":"the_sum","model":"ewma","settings":{"alpha":0.5},"window":30}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMovAvgAggregationWithHoltLinearModel(t *testing.T) {
+ agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).
+ Model(NewHoltLinearMovAvgModel().Alpha(0.5).Beta(0.4))
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"moving_avg":{"buckets_path":"the_sum","model":"holt","settings":{"alpha":0.5,"beta":0.4},"window":30}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMovAvgAggregationWithHoltWintersModel(t *testing.T) {
+ agg := NewMovAvgAggregation().BucketsPath("the_sum").Window(30).Predict(10).Minimize(true).
+ Model(NewHoltWintersMovAvgModel().Alpha(0.5).Beta(0.4).Gamma(0.3).Period(7).Pad(true))
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"moving_avg":{"buckets_path":"the_sum","minimize":true,"model":"holt_winters","predict":10,"settings":{"alpha":0.5,"beta":0.4,"gamma":0.3,"pad":true,"period":7},"window":30}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMovAvgAggregationWithSubAggs(t *testing.T) {
+ agg := NewMovAvgAggregation().BucketsPath("the_sum")
+ agg = agg.SubAggregation("avg_sum", NewAvgAggregation().Field("height"))
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"aggregations":{"avg_sum":{"avg":{"field":"height"}}},"moving_avg":{"buckets_path":"the_sum"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_percentiles_bucket.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_percentiles_bucket.go
new file mode 100644
index 000000000..9a3556269
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_percentiles_bucket.go
@@ -0,0 +1,125 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// PercentilesBucketAggregation is a sibling pipeline aggregation which calculates
+// percentiles across all bucket of a specified metric in a sibling aggregation.
+// The specified metric must be numeric and the sibling aggregation must
+// be a multi-bucket aggregation.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-percentiles-bucket-aggregation.html
+type PercentilesBucketAggregation struct {
+ format string
+ gapPolicy string
+ percents []float64
+ bucketsPaths []string
+
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+}
+
+// NewPercentilesBucketAggregation creates and initializes a new PercentilesBucketAggregation.
+func NewPercentilesBucketAggregation() *PercentilesBucketAggregation {
+ return &PercentilesBucketAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+// Format to apply the output value of this aggregation.
+func (p *PercentilesBucketAggregation) Format(format string) *PercentilesBucketAggregation {
+ p.format = format
+ return p
+}
+
+// Percents to calculate percentiles for in this aggregation.
+func (p *PercentilesBucketAggregation) Percents(percents ...float64) *PercentilesBucketAggregation {
+ p.percents = percents
+ return p
+}
+
+// GapPolicy defines what should be done when a gap in the series is discovered.
+// Valid values include "insert_zeros" or "skip". Default is "insert_zeros".
+func (p *PercentilesBucketAggregation) GapPolicy(gapPolicy string) *PercentilesBucketAggregation {
+ p.gapPolicy = gapPolicy
+ return p
+}
+
+// GapInsertZeros inserts zeros for gaps in the series.
+func (p *PercentilesBucketAggregation) GapInsertZeros() *PercentilesBucketAggregation {
+ p.gapPolicy = "insert_zeros"
+ return p
+}
+
+// GapSkip skips gaps in the series.
+func (p *PercentilesBucketAggregation) GapSkip() *PercentilesBucketAggregation {
+ p.gapPolicy = "skip"
+ return p
+}
+
+// SubAggregation adds a sub-aggregation to this aggregation.
+func (p *PercentilesBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *PercentilesBucketAggregation {
+ p.subAggregations[name] = subAggregation
+ return p
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (p *PercentilesBucketAggregation) Meta(metaData map[string]interface{}) *PercentilesBucketAggregation {
+ p.meta = metaData
+ return p
+}
+
+// BucketsPath sets the paths to the buckets to use for this pipeline aggregator.
+func (p *PercentilesBucketAggregation) BucketsPath(bucketsPaths ...string) *PercentilesBucketAggregation {
+ p.bucketsPaths = append(p.bucketsPaths, bucketsPaths...)
+ return p
+}
+
+func (p *PercentilesBucketAggregation) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["percentiles_bucket"] = params
+
+ if p.format != "" {
+ params["format"] = p.format
+ }
+ if p.gapPolicy != "" {
+ params["gap_policy"] = p.gapPolicy
+ }
+
+ // Add buckets paths
+ switch len(p.bucketsPaths) {
+ case 0:
+ case 1:
+ params["buckets_path"] = p.bucketsPaths[0]
+ default:
+ params["buckets_path"] = p.bucketsPaths
+ }
+
+ // Add percents
+ if len(p.percents) > 0 {
+ params["percents"] = p.percents
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(p.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range p.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(p.meta) > 0 {
+ source["meta"] = p.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_percentiles_bucket_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_percentiles_bucket_test.go
new file mode 100644
index 000000000..5fa2639de
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_percentiles_bucket_test.go
@@ -0,0 +1,44 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestPercentilesBucketAggregation(t *testing.T) {
+ agg := NewPercentilesBucketAggregation().BucketsPath("the_sum").GapPolicy("skip")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"percentiles_bucket":{"buckets_path":"the_sum","gap_policy":"skip"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestPercentilesBucketAggregationWithPercents(t *testing.T) {
+ agg := NewPercentilesBucketAggregation().BucketsPath("the_sum").Percents(0.1, 1.0, 5.0, 25, 50)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"percentiles_bucket":{"buckets_path":"the_sum","percents":[0.1,1,5,25,50]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_serial_diff.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_serial_diff.go
new file mode 100644
index 000000000..e13b94ea9
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_serial_diff.go
@@ -0,0 +1,124 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// SerialDiffAggregation implements serial differencing.
+// Serial differencing is a technique where values in a time series are
+// subtracted from itself at different time lags or periods.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-serialdiff-aggregation.html
+type SerialDiffAggregation struct {
+ format string
+ gapPolicy string
+ lag *int
+
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+ bucketsPaths []string
+}
+
+// NewSerialDiffAggregation creates and initializes a new SerialDiffAggregation.
+func NewSerialDiffAggregation() *SerialDiffAggregation {
+ return &SerialDiffAggregation{
+ subAggregations: make(map[string]Aggregation),
+ bucketsPaths: make([]string, 0),
+ }
+}
+
+func (a *SerialDiffAggregation) Format(format string) *SerialDiffAggregation {
+ a.format = format
+ return a
+}
+
+// GapPolicy defines what should be done when a gap in the series is discovered.
+// Valid values include "insert_zeros" or "skip". Default is "insert_zeros".
+func (a *SerialDiffAggregation) GapPolicy(gapPolicy string) *SerialDiffAggregation {
+ a.gapPolicy = gapPolicy
+ return a
+}
+
+// GapInsertZeros inserts zeros for gaps in the series.
+func (a *SerialDiffAggregation) GapInsertZeros() *SerialDiffAggregation {
+ a.gapPolicy = "insert_zeros"
+ return a
+}
+
+// GapSkip skips gaps in the series.
+func (a *SerialDiffAggregation) GapSkip() *SerialDiffAggregation {
+ a.gapPolicy = "skip"
+ return a
+}
+
+// Lag specifies the historical bucket to subtract from the current value.
+// E.g. a lag of 7 will subtract the current value from the value 7 buckets
+// ago. Lag must be a positive, non-zero integer.
+func (a *SerialDiffAggregation) Lag(lag int) *SerialDiffAggregation {
+ a.lag = &lag
+ return a
+}
+
+// SubAggregation adds a sub-aggregation to this aggregation.
+func (a *SerialDiffAggregation) SubAggregation(name string, subAggregation Aggregation) *SerialDiffAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *SerialDiffAggregation) Meta(metaData map[string]interface{}) *SerialDiffAggregation {
+ a.meta = metaData
+ return a
+}
+
+// BucketsPath sets the paths to the buckets to use for this pipeline aggregator.
+func (a *SerialDiffAggregation) BucketsPath(bucketsPaths ...string) *SerialDiffAggregation {
+ a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...)
+ return a
+}
+
+func (a *SerialDiffAggregation) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["serial_diff"] = params
+
+ if a.format != "" {
+ params["format"] = a.format
+ }
+ if a.gapPolicy != "" {
+ params["gap_policy"] = a.gapPolicy
+ }
+ if a.lag != nil {
+ params["lag"] = *a.lag
+ }
+
+ // Add buckets paths
+ switch len(a.bucketsPaths) {
+ case 0:
+ case 1:
+ params["buckets_path"] = a.bucketsPaths[0]
+ default:
+ params["buckets_path"] = a.bucketsPaths
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_serial_diff_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_serial_diff_test.go
new file mode 100644
index 000000000..6d336a2ee
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_serial_diff_test.go
@@ -0,0 +1,27 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestSerialDiffAggregation(t *testing.T) {
+ agg := NewSerialDiffAggregation().BucketsPath("the_sum").Lag(7)
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"serial_diff":{"buckets_path":"the_sum","lag":7}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_stats_bucket.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_stats_bucket.go
new file mode 100644
index 000000000..e68a420f2
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_stats_bucket.go
@@ -0,0 +1,113 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// StatsBucketAggregation is a sibling pipeline aggregation which calculates
+// a variety of stats across all bucket of a specified metric in a sibling aggregation.
+// The specified metric must be numeric and the sibling aggregation must
+// be a multi-bucket aggregation.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-stats-bucket-aggregation.html
+type StatsBucketAggregation struct {
+ format string
+ gapPolicy string
+
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+ bucketsPaths []string
+}
+
+// NewStatsBucketAggregation creates and initializes a new StatsBucketAggregation.
+func NewStatsBucketAggregation() *StatsBucketAggregation {
+ return &StatsBucketAggregation{
+ subAggregations: make(map[string]Aggregation),
+ bucketsPaths: make([]string, 0),
+ }
+}
+
+func (s *StatsBucketAggregation) Format(format string) *StatsBucketAggregation {
+ s.format = format
+ return s
+}
+
+// GapPolicy defines what should be done when a gap in the series is discovered.
+// Valid values include "insert_zeros" or "skip". Default is "insert_zeros".
+func (s *StatsBucketAggregation) GapPolicy(gapPolicy string) *StatsBucketAggregation {
+ s.gapPolicy = gapPolicy
+ return s
+}
+
+// GapInsertZeros inserts zeros for gaps in the series.
+func (s *StatsBucketAggregation) GapInsertZeros() *StatsBucketAggregation {
+ s.gapPolicy = "insert_zeros"
+ return s
+}
+
+// GapSkip skips gaps in the series.
+func (s *StatsBucketAggregation) GapSkip() *StatsBucketAggregation {
+ s.gapPolicy = "skip"
+ return s
+}
+
+// SubAggregation adds a sub-aggregation to this aggregation.
+func (s *StatsBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *StatsBucketAggregation {
+ s.subAggregations[name] = subAggregation
+ return s
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (s *StatsBucketAggregation) Meta(metaData map[string]interface{}) *StatsBucketAggregation {
+ s.meta = metaData
+ return s
+}
+
+// BucketsPath sets the paths to the buckets to use for this pipeline aggregator.
+func (s *StatsBucketAggregation) BucketsPath(bucketsPaths ...string) *StatsBucketAggregation {
+ s.bucketsPaths = append(s.bucketsPaths, bucketsPaths...)
+ return s
+}
+
+func (s *StatsBucketAggregation) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["stats_bucket"] = params
+
+ if s.format != "" {
+ params["format"] = s.format
+ }
+ if s.gapPolicy != "" {
+ params["gap_policy"] = s.gapPolicy
+ }
+
+ // Add buckets paths
+ switch len(s.bucketsPaths) {
+ case 0:
+ case 1:
+ params["buckets_path"] = s.bucketsPaths[0]
+ default:
+ params["buckets_path"] = s.bucketsPaths
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(s.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range s.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(s.meta) > 0 {
+ source["meta"] = s.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_stats_bucket_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_stats_bucket_test.go
new file mode 100644
index 000000000..117a73885
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_stats_bucket_test.go
@@ -0,0 +1,27 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestStatsBucketAggregation(t *testing.T) {
+ agg := NewStatsBucketAggregation().BucketsPath("the_sum").GapPolicy("skip")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"stats_bucket":{"buckets_path":"the_sum","gap_policy":"skip"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket.go
new file mode 100644
index 000000000..c22ae8f50
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket.go
@@ -0,0 +1,113 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// SumBucketAggregation is a sibling pipeline aggregation which calculates
+// the sum across all buckets of a specified metric in a sibling aggregation.
+// The specified metric must be numeric and the sibling aggregation must
+// be a multi-bucket aggregation.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-sum-bucket-aggregation.html
+type SumBucketAggregation struct {
+ format string
+ gapPolicy string
+
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+ bucketsPaths []string
+}
+
+// NewSumBucketAggregation creates and initializes a new SumBucketAggregation.
+func NewSumBucketAggregation() *SumBucketAggregation {
+ return &SumBucketAggregation{
+ subAggregations: make(map[string]Aggregation),
+ bucketsPaths: make([]string, 0),
+ }
+}
+
+func (a *SumBucketAggregation) Format(format string) *SumBucketAggregation {
+ a.format = format
+ return a
+}
+
+// GapPolicy defines what should be done when a gap in the series is discovered.
+// Valid values include "insert_zeros" or "skip". Default is "insert_zeros".
+func (a *SumBucketAggregation) GapPolicy(gapPolicy string) *SumBucketAggregation {
+ a.gapPolicy = gapPolicy
+ return a
+}
+
+// GapInsertZeros inserts zeros for gaps in the series.
+func (a *SumBucketAggregation) GapInsertZeros() *SumBucketAggregation {
+ a.gapPolicy = "insert_zeros"
+ return a
+}
+
+// GapSkip skips gaps in the series.
+func (a *SumBucketAggregation) GapSkip() *SumBucketAggregation {
+ a.gapPolicy = "skip"
+ return a
+}
+
+// SubAggregation adds a sub-aggregation to this aggregation.
+func (a *SumBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *SumBucketAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *SumBucketAggregation) Meta(metaData map[string]interface{}) *SumBucketAggregation {
+ a.meta = metaData
+ return a
+}
+
+// BucketsPath sets the paths to the buckets to use for this pipeline aggregator.
+func (a *SumBucketAggregation) BucketsPath(bucketsPaths ...string) *SumBucketAggregation {
+ a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...)
+ return a
+}
+
+func (a *SumBucketAggregation) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["sum_bucket"] = params
+
+ if a.format != "" {
+ params["format"] = a.format
+ }
+ if a.gapPolicy != "" {
+ params["gap_policy"] = a.gapPolicy
+ }
+
+ // Add buckets paths
+ switch len(a.bucketsPaths) {
+ case 0:
+ case 1:
+ params["buckets_path"] = a.bucketsPaths[0]
+ default:
+ params["buckets_path"] = a.bucketsPaths
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket_test.go
new file mode 100644
index 000000000..be8275c81
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_sum_bucket_test.go
@@ -0,0 +1,27 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestSumBucketAggregation(t *testing.T) {
+ agg := NewSumBucketAggregation().BucketsPath("the_sum")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"sum_bucket":{"buckets_path":"the_sum"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_pipeline_test.go b/vendor/github.com/olivere/elastic/search_aggs_pipeline_test.go
new file mode 100644
index 000000000..24dd4eb0f
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_pipeline_test.go
@@ -0,0 +1,903 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestAggsIntegrationAvgBucket(t *testing.T) {
+ //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ // Match all should return all documents
+ builder := client.Search().
+ Index(testOrderIndex).
+ Type("doc").
+ Query(NewMatchAllQuery()).
+ Pretty(true)
+ h := NewDateHistogramAggregation().Field("time").Interval("month")
+ h = h.SubAggregation("sales", NewSumAggregation().Field("price"))
+ builder = builder.Aggregation("sales_per_month", h)
+ builder = builder.Aggregation("avg_monthly_sales", NewAvgBucketAggregation().BucketsPath("sales_per_month>sales"))
+
+ res, err := builder.Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.Hits == nil {
+ t.Errorf("expected Hits != nil; got: nil")
+ }
+
+ aggs := res.Aggregations
+ if aggs == nil {
+ t.Fatal("expected aggregations != nil; got: nil")
+ }
+
+ agg, found := aggs.AvgBucket("avg_monthly_sales")
+ if !found {
+ t.Fatal("expected avg_monthly_sales aggregation")
+ }
+ if agg == nil {
+ t.Fatal("expected avg_monthly_sales aggregation")
+ }
+ if agg.Value == nil {
+ t.Fatal("expected avg_monthly_sales.value != nil")
+ }
+ if got, want := *agg.Value, float64(939.2); got != want {
+ t.Fatalf("expected avg_monthly_sales.value=%v; got: %v", want, got)
+ }
+}
+
+func TestAggsIntegrationDerivative(t *testing.T) {
+ //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ // Match all should return all documents
+ builder := client.Search().
+ Index(testOrderIndex).
+ Type("doc").
+ Query(NewMatchAllQuery()).
+ Pretty(true)
+ h := NewDateHistogramAggregation().Field("time").Interval("month")
+ h = h.SubAggregation("sales", NewSumAggregation().Field("price"))
+ h = h.SubAggregation("sales_deriv", NewDerivativeAggregation().BucketsPath("sales"))
+ builder = builder.Aggregation("sales_per_month", h)
+
+ res, err := builder.Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.Hits == nil {
+ t.Errorf("expected Hits != nil; got: nil")
+ }
+
+ aggs := res.Aggregations
+ if aggs == nil {
+ t.Fatal("expected aggregations != nil; got: nil")
+ }
+
+ agg, found := aggs.DateHistogram("sales_per_month")
+ if !found {
+ t.Fatal("expected sales_per_month aggregation")
+ }
+ if agg == nil {
+ t.Fatal("expected sales_per_month aggregation")
+ }
+ if got, want := len(agg.Buckets), 6; got != want {
+ t.Fatalf("expected %d buckets; got: %d", want, got)
+ }
+
+ if got, want := agg.Buckets[0].DocCount, int64(1); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+ if got, want := agg.Buckets[1].DocCount, int64(0); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+ if got, want := agg.Buckets[2].DocCount, int64(1); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+ if got, want := agg.Buckets[3].DocCount, int64(3); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+ if got, want := agg.Buckets[4].DocCount, int64(1); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+ if got, want := agg.Buckets[5].DocCount, int64(2); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+
+ d, found := agg.Buckets[0].Derivative("sales_deriv")
+ if found {
+ t.Fatal("expected no sales_deriv aggregation")
+ }
+ if d != nil {
+ t.Fatal("expected no sales_deriv aggregation")
+ }
+
+ d, found = agg.Buckets[1].Derivative("sales_deriv")
+ if !found {
+ t.Fatal("expected sales_deriv aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected sales_deriv aggregation")
+ }
+ if d.Value != nil {
+ t.Fatal("expected sales_deriv value == nil")
+ }
+
+ d, found = agg.Buckets[2].Derivative("sales_deriv")
+ if !found {
+ t.Fatal("expected sales_deriv aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected sales_deriv aggregation")
+ }
+ if d.Value != nil {
+ t.Fatal("expected sales_deriv value == nil")
+ }
+
+ d, found = agg.Buckets[3].Derivative("sales_deriv")
+ if !found {
+ t.Fatal("expected sales_deriv aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected sales_deriv aggregation")
+ }
+ if d.Value == nil {
+ t.Fatal("expected sales_deriv value != nil")
+ }
+ if got, want := *d.Value, float64(2348.0); got != want {
+ t.Fatalf("expected sales_deriv.value=%v; got: %v", want, got)
+ }
+
+ d, found = agg.Buckets[4].Derivative("sales_deriv")
+ if !found {
+ t.Fatal("expected sales_deriv aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected sales_deriv aggregation")
+ }
+ if d.Value == nil {
+ t.Fatal("expected sales_deriv value != nil")
+ }
+ if got, want := *d.Value, float64(-1658.0); got != want {
+ t.Fatalf("expected sales_deriv.value=%v; got: %v", want, got)
+ }
+
+ d, found = agg.Buckets[5].Derivative("sales_deriv")
+ if !found {
+ t.Fatal("expected sales_deriv aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected sales_deriv aggregation")
+ }
+ if d.Value == nil {
+ t.Fatal("expected sales_deriv value != nil")
+ }
+ if got, want := *d.Value, float64(-722.0); got != want {
+ t.Fatalf("expected sales_deriv.value=%v; got: %v", want, got)
+ }
+}
+
+func TestAggsIntegrationMaxBucket(t *testing.T) {
+ //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ // Match all should return all documents
+ builder := client.Search().
+ Index(testOrderIndex).
+ Type("doc").
+ Query(NewMatchAllQuery()).
+ Pretty(true)
+ h := NewDateHistogramAggregation().Field("time").Interval("month")
+ h = h.SubAggregation("sales", NewSumAggregation().Field("price"))
+ builder = builder.Aggregation("sales_per_month", h)
+ builder = builder.Aggregation("max_monthly_sales", NewMaxBucketAggregation().BucketsPath("sales_per_month>sales"))
+
+ res, err := builder.Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.Hits == nil {
+ t.Errorf("expected Hits != nil; got: nil")
+ }
+
+ aggs := res.Aggregations
+ if aggs == nil {
+ t.Fatal("expected aggregations != nil; got: nil")
+ }
+
+ agg, found := aggs.MaxBucket("max_monthly_sales")
+ if !found {
+ t.Fatal("expected max_monthly_sales aggregation")
+ }
+ if agg == nil {
+ t.Fatal("expected max_monthly_sales aggregation")
+ }
+ if got, want := len(agg.Keys), 1; got != want {
+ t.Fatalf("expected len(max_monthly_sales.keys)=%d; got: %d", want, got)
+ }
+ if got, want := agg.Keys[0], "2015-04-01"; got != want {
+ t.Fatalf("expected max_monthly_sales.keys[0]=%v; got: %v", want, got)
+ }
+ if agg.Value == nil {
+ t.Fatal("expected max_monthly_sales.value != nil")
+ }
+ if got, want := *agg.Value, float64(2448); got != want {
+ t.Fatalf("expected max_monthly_sales.value=%v; got: %v", want, got)
+ }
+}
+
+func TestAggsIntegrationMinBucket(t *testing.T) {
+ //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ // Match all should return all documents
+ builder := client.Search().
+ Index(testOrderIndex).
+ Type("doc").
+ Query(NewMatchAllQuery()).
+ Pretty(true)
+ h := NewDateHistogramAggregation().Field("time").Interval("month")
+ h = h.SubAggregation("sales", NewSumAggregation().Field("price"))
+ builder = builder.Aggregation("sales_per_month", h)
+ builder = builder.Aggregation("min_monthly_sales", NewMinBucketAggregation().BucketsPath("sales_per_month>sales"))
+
+ res, err := builder.Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.Hits == nil {
+ t.Errorf("expected Hits != nil; got: nil")
+ }
+
+ aggs := res.Aggregations
+ if aggs == nil {
+ t.Fatal("expected aggregations != nil; got: nil")
+ }
+
+ agg, found := aggs.MinBucket("min_monthly_sales")
+ if !found {
+ t.Fatal("expected min_monthly_sales aggregation")
+ }
+ if agg == nil {
+ t.Fatal("expected min_monthly_sales aggregation")
+ }
+ if got, want := len(agg.Keys), 1; got != want {
+ t.Fatalf("expected len(min_monthly_sales.keys)=%d; got: %d", want, got)
+ }
+ if got, want := agg.Keys[0], "2015-06-01"; got != want {
+ t.Fatalf("expected min_monthly_sales.keys[0]=%v; got: %v", want, got)
+ }
+ if agg.Value == nil {
+ t.Fatal("expected min_monthly_sales.value != nil")
+ }
+ if got, want := *agg.Value, float64(68); got != want {
+ t.Fatalf("expected min_monthly_sales.value=%v; got: %v", want, got)
+ }
+}
+
+func TestAggsIntegrationSumBucket(t *testing.T) {
+ //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ // Match all should return all documents
+ builder := client.Search().
+ Index(testOrderIndex).
+ Type("doc").
+ Query(NewMatchAllQuery()).
+ Pretty(true)
+ h := NewDateHistogramAggregation().Field("time").Interval("month")
+ h = h.SubAggregation("sales", NewSumAggregation().Field("price"))
+ builder = builder.Aggregation("sales_per_month", h)
+ builder = builder.Aggregation("sum_monthly_sales", NewSumBucketAggregation().BucketsPath("sales_per_month>sales"))
+
+ res, err := builder.Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.Hits == nil {
+ t.Errorf("expected Hits != nil; got: nil")
+ }
+
+ aggs := res.Aggregations
+ if aggs == nil {
+ t.Fatal("expected aggregations != nil; got: nil")
+ }
+
+ agg, found := aggs.SumBucket("sum_monthly_sales")
+ if !found {
+ t.Fatal("expected sum_monthly_sales aggregation")
+ }
+ if agg == nil {
+ t.Fatal("expected sum_monthly_sales aggregation")
+ }
+ if agg.Value == nil {
+ t.Fatal("expected sum_monthly_sales.value != nil")
+ }
+ if got, want := *agg.Value, float64(4696.0); got != want {
+ t.Fatalf("expected sum_monthly_sales.value=%v; got: %v", want, got)
+ }
+}
+
+func TestAggsIntegrationMovAvg(t *testing.T) {
+ //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ // Match all should return all documents
+ builder := client.Search().
+ Index(testOrderIndex).
+ Type("doc").
+ Query(NewMatchAllQuery()).
+ Pretty(true)
+ h := NewDateHistogramAggregation().Field("time").Interval("month")
+ h = h.SubAggregation("the_sum", NewSumAggregation().Field("price"))
+ h = h.SubAggregation("the_movavg", NewMovAvgAggregation().BucketsPath("the_sum"))
+ builder = builder.Aggregation("my_date_histo", h)
+
+ res, err := builder.Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.Hits == nil {
+ t.Errorf("expected Hits != nil; got: nil")
+ }
+
+ aggs := res.Aggregations
+ if aggs == nil {
+ t.Fatal("expected aggregations != nil; got: nil")
+ }
+
+ agg, found := aggs.DateHistogram("my_date_histo")
+ if !found {
+ t.Fatal("expected sum_monthly_sales aggregation")
+ }
+ if agg == nil {
+ t.Fatal("expected sum_monthly_sales aggregation")
+ }
+ if got, want := len(agg.Buckets), 6; got != want {
+ t.Fatalf("expected %d buckets; got: %d", want, got)
+ }
+
+ d, found := agg.Buckets[0].MovAvg("the_movavg")
+ if found {
+ t.Fatal("expected no the_movavg aggregation")
+ }
+ if d != nil {
+ t.Fatal("expected no the_movavg aggregation")
+ }
+
+ d, found = agg.Buckets[1].MovAvg("the_movavg")
+ if found {
+ t.Fatal("expected no the_movavg aggregation")
+ }
+ if d != nil {
+ t.Fatal("expected no the_movavg aggregation")
+ }
+
+ d, found = agg.Buckets[2].MovAvg("the_movavg")
+ if !found {
+ t.Fatal("expected the_movavg aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected the_movavg aggregation")
+ }
+ if d.Value == nil {
+ t.Fatal("expected the_movavg value")
+ }
+ if got, want := *d.Value, float64(1290.0); got != want {
+ t.Fatalf("expected %v buckets; got: %v", want, got)
+ }
+
+ d, found = agg.Buckets[3].MovAvg("the_movavg")
+ if !found {
+ t.Fatal("expected the_movavg aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected the_movavg aggregation")
+ }
+ if d.Value == nil {
+ t.Fatal("expected the_movavg value")
+ }
+ if got, want := *d.Value, float64(695.0); got != want {
+ t.Fatalf("expected %v buckets; got: %v", want, got)
+ }
+
+ d, found = agg.Buckets[4].MovAvg("the_movavg")
+ if !found {
+ t.Fatal("expected the_movavg aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected the_movavg aggregation")
+ }
+ if d.Value == nil {
+ t.Fatal("expected the_movavg value")
+ }
+ if got, want := *d.Value, float64(1279.3333333333333); got != want {
+ t.Fatalf("expected %v buckets; got: %v", want, got)
+ }
+
+ d, found = agg.Buckets[5].MovAvg("the_movavg")
+ if !found {
+ t.Fatal("expected the_movavg aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected the_movavg aggregation")
+ }
+ if d.Value == nil {
+ t.Fatal("expected the_movavg value")
+ }
+ if got, want := *d.Value, float64(1157.0); got != want {
+ t.Fatalf("expected %v buckets; got: %v", want, got)
+ }
+}
+
+func TestAggsIntegrationCumulativeSum(t *testing.T) {
+ //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ // Match all should return all documents
+ builder := client.Search().
+ Index(testOrderIndex).
+ Type("doc").
+ Query(NewMatchAllQuery()).
+ Pretty(true)
+ h := NewDateHistogramAggregation().Field("time").Interval("month")
+ h = h.SubAggregation("sales", NewSumAggregation().Field("price"))
+ h = h.SubAggregation("cumulative_sales", NewCumulativeSumAggregation().BucketsPath("sales"))
+ builder = builder.Aggregation("sales_per_month", h)
+
+ res, err := builder.Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.Hits == nil {
+ t.Errorf("expected Hits != nil; got: nil")
+ }
+
+ aggs := res.Aggregations
+ if aggs == nil {
+ t.Fatal("expected aggregations != nil; got: nil")
+ }
+
+ agg, found := aggs.DateHistogram("sales_per_month")
+ if !found {
+ t.Fatal("expected sales_per_month aggregation")
+ }
+ if agg == nil {
+ t.Fatal("expected sales_per_month aggregation")
+ }
+ if got, want := len(agg.Buckets), 6; got != want {
+ t.Fatalf("expected %d buckets; got: %d", want, got)
+ }
+
+ if got, want := agg.Buckets[0].DocCount, int64(1); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+ if got, want := agg.Buckets[1].DocCount, int64(0); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+ if got, want := agg.Buckets[2].DocCount, int64(1); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+ if got, want := agg.Buckets[3].DocCount, int64(3); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+ if got, want := agg.Buckets[4].DocCount, int64(1); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+ if got, want := agg.Buckets[5].DocCount, int64(2); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+
+ d, found := agg.Buckets[0].CumulativeSum("cumulative_sales")
+ if !found {
+ t.Fatal("expected cumulative_sales aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected cumulative_sales aggregation")
+ }
+ if d.Value == nil {
+ t.Fatal("expected cumulative_sales value != nil")
+ }
+ if got, want := *d.Value, float64(1290.0); got != want {
+ t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got)
+ }
+
+ d, found = agg.Buckets[1].CumulativeSum("cumulative_sales")
+ if !found {
+ t.Fatal("expected cumulative_sales aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected cumulative_sales aggregation")
+ }
+ if d.Value == nil {
+ t.Fatal("expected cumulative_sales value != nil")
+ }
+ if got, want := *d.Value, float64(1290.0); got != want {
+ t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got)
+ }
+
+ d, found = agg.Buckets[2].CumulativeSum("cumulative_sales")
+ if !found {
+ t.Fatal("expected cumulative_sales aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected cumulative_sales aggregation")
+ }
+ if d.Value == nil {
+ t.Fatal("expected cumulative_sales value != nil")
+ }
+ if got, want := *d.Value, float64(1390.0); got != want {
+ t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got)
+ }
+
+ d, found = agg.Buckets[3].CumulativeSum("cumulative_sales")
+ if !found {
+ t.Fatal("expected cumulative_sales aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected cumulative_sales aggregation")
+ }
+ if d.Value == nil {
+ t.Fatal("expected cumulative_sales value != nil")
+ }
+ if got, want := *d.Value, float64(3838.0); got != want {
+ t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got)
+ }
+
+ d, found = agg.Buckets[4].CumulativeSum("cumulative_sales")
+ if !found {
+ t.Fatal("expected cumulative_sales aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected cumulative_sales aggregation")
+ }
+ if d.Value == nil {
+ t.Fatal("expected cumulative_sales value != nil")
+ }
+ if got, want := *d.Value, float64(4628.0); got != want {
+ t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got)
+ }
+
+ d, found = agg.Buckets[5].CumulativeSum("cumulative_sales")
+ if !found {
+ t.Fatal("expected cumulative_sales aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected cumulative_sales aggregation")
+ }
+ if d.Value == nil {
+ t.Fatal("expected cumulative_sales value != nil")
+ }
+ if got, want := *d.Value, float64(4696.0); got != want {
+ t.Fatalf("expected cumulative_sales.value=%v; got: %v", want, got)
+ }
+}
+
+func TestAggsIntegrationBucketScript(t *testing.T) {
+ // client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ // Match all should return all documents
+ builder := client.Search().
+ Index(testOrderIndex).
+ Type("doc").
+ Query(NewMatchAllQuery()).
+ Pretty(true)
+ h := NewDateHistogramAggregation().Field("time").Interval("month")
+ h = h.SubAggregation("total_sales", NewSumAggregation().Field("price"))
+ appleFilter := NewFilterAggregation().Filter(NewTermQuery("manufacturer", "Apple"))
+ appleFilter = appleFilter.SubAggregation("sales", NewSumAggregation().Field("price"))
+ h = h.SubAggregation("apple_sales", appleFilter)
+ h = h.SubAggregation("apple_percentage",
+ NewBucketScriptAggregation().
+ GapPolicy("insert_zeros").
+ AddBucketsPath("appleSales", "apple_sales>sales").
+ AddBucketsPath("totalSales", "total_sales").
+ Script(NewScript("params.appleSales / params.totalSales * 100")))
+ builder = builder.Aggregation("sales_per_month", h)
+
+ res, err := builder.Pretty(true).Do(context.TODO())
+ if err != nil {
+ t.Fatalf("%v (maybe scripting is disabled?)", err)
+ }
+ if res.Hits == nil {
+ t.Errorf("expected Hits != nil; got: nil")
+ }
+
+ aggs := res.Aggregations
+ if aggs == nil {
+ t.Fatal("expected aggregations != nil; got: nil")
+ }
+
+ agg, found := aggs.DateHistogram("sales_per_month")
+ if !found {
+ t.Fatal("expected sales_per_month aggregation")
+ }
+ if agg == nil {
+ t.Fatal("expected sales_per_month aggregation")
+ }
+ if got, want := len(agg.Buckets), 6; got != want {
+ t.Fatalf("expected %d buckets; got: %d", want, got)
+ }
+
+ if got, want := agg.Buckets[0].DocCount, int64(1); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+ if got, want := agg.Buckets[1].DocCount, int64(0); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+ if got, want := agg.Buckets[2].DocCount, int64(1); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+ if got, want := agg.Buckets[3].DocCount, int64(3); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+ if got, want := agg.Buckets[4].DocCount, int64(1); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+ if got, want := agg.Buckets[5].DocCount, int64(2); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+
+ d, found := agg.Buckets[0].BucketScript("apple_percentage")
+ if !found {
+ t.Fatal("expected apple_percentage aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected apple_percentage aggregation")
+ }
+ if d.Value == nil {
+ t.Fatal("expected apple_percentage value != nil")
+ }
+ if got, want := *d.Value, float64(100.0); got != want {
+ t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got)
+ }
+
+ d, found = agg.Buckets[1].BucketScript("apple_percentage")
+ if !found {
+ t.Fatal("expected apple_percentage aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected apple_percentage aggregation")
+ }
+ if d.Value != nil {
+ t.Fatal("expected apple_percentage value == nil")
+ }
+
+ d, found = agg.Buckets[2].BucketScript("apple_percentage")
+ if !found {
+ t.Fatal("expected apple_percentage aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected apple_percentage aggregation")
+ }
+ if d.Value == nil {
+ t.Fatal("expected apple_percentage value != nil")
+ }
+ if got, want := *d.Value, float64(0.0); got != want {
+ t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got)
+ }
+
+ d, found = agg.Buckets[3].BucketScript("apple_percentage")
+ if !found {
+ t.Fatal("expected apple_percentage aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected apple_percentage aggregation")
+ }
+ if d.Value == nil {
+ t.Fatal("expected apple_percentage value != nil")
+ }
+ if got, want := *d.Value, float64(34.64052287581699); got != want {
+ t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got)
+ }
+
+ d, found = agg.Buckets[4].BucketScript("apple_percentage")
+ if !found {
+ t.Fatal("expected apple_percentage aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected apple_percentage aggregation")
+ }
+ if d.Value == nil {
+ t.Fatal("expected apple_percentage value != nil")
+ }
+ if got, want := *d.Value, float64(0.0); got != want {
+ t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got)
+ }
+
+ d, found = agg.Buckets[5].BucketScript("apple_percentage")
+ if !found {
+ t.Fatal("expected apple_percentage aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected apple_percentage aggregation")
+ }
+ if d.Value == nil {
+ t.Fatal("expected apple_percentage value != nil")
+ }
+ if got, want := *d.Value, float64(0.0); got != want {
+ t.Fatalf("expected apple_percentage.value=%v; got: %v", want, got)
+ }
+}
+
+func TestAggsIntegrationBucketSelector(t *testing.T) {
+ //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ // Match all should return all documents
+ builder := client.Search().
+ Index(testOrderIndex).
+ Type("doc").
+ Query(NewMatchAllQuery()).
+ Pretty(true)
+ h := NewDateHistogramAggregation().Field("time").Interval("month")
+ h = h.SubAggregation("total_sales", NewSumAggregation().Field("price"))
+ h = h.SubAggregation("sales_bucket_filter",
+ NewBucketSelectorAggregation().
+ AddBucketsPath("totalSales", "total_sales").
+ Script(NewScript("params.totalSales <= 100")))
+ builder = builder.Aggregation("sales_per_month", h)
+
+ res, err := builder.Do(context.TODO())
+ if err != nil {
+ t.Fatalf("%v (maybe scripting is disabled?)", err)
+ }
+ if res.Hits == nil {
+ t.Errorf("expected Hits != nil; got: nil")
+ }
+
+ aggs := res.Aggregations
+ if aggs == nil {
+ t.Fatal("expected aggregations != nil; got: nil")
+ }
+
+ agg, found := aggs.DateHistogram("sales_per_month")
+ if !found {
+ t.Fatal("expected sales_per_month aggregation")
+ }
+ if agg == nil {
+ t.Fatal("expected sales_per_month aggregation")
+ }
+ if got, want := len(agg.Buckets), 2; got != want {
+ t.Fatalf("expected %d buckets; got: %d", want, got)
+ }
+
+ if got, want := agg.Buckets[0].DocCount, int64(1); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+ if got, want := agg.Buckets[1].DocCount, int64(2); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+}
+
+func TestAggsIntegrationSerialDiff(t *testing.T) {
+ //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ // Match all should return all documents
+ builder := client.Search().
+ Index(testOrderIndex).
+ Type("doc").
+ Query(NewMatchAllQuery()).
+ Pretty(true)
+ h := NewDateHistogramAggregation().Field("time").Interval("month")
+ h = h.SubAggregation("sales", NewSumAggregation().Field("price"))
+ h = h.SubAggregation("the_diff", NewSerialDiffAggregation().BucketsPath("sales").Lag(1))
+ builder = builder.Aggregation("sales_per_month", h)
+
+ res, err := builder.Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.Hits == nil {
+ t.Errorf("expected Hits != nil; got: nil")
+ }
+
+ aggs := res.Aggregations
+ if aggs == nil {
+ t.Fatal("expected aggregations != nil; got: nil")
+ }
+
+ agg, found := aggs.DateHistogram("sales_per_month")
+ if !found {
+ t.Fatal("expected sales_per_month aggregation")
+ }
+ if agg == nil {
+ t.Fatal("expected sales_per_month aggregation")
+ }
+ if got, want := len(agg.Buckets), 6; got != want {
+ t.Fatalf("expected %d buckets; got: %d", want, got)
+ }
+
+ if got, want := agg.Buckets[0].DocCount, int64(1); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+ if got, want := agg.Buckets[1].DocCount, int64(0); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+ if got, want := agg.Buckets[2].DocCount, int64(1); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+ if got, want := agg.Buckets[3].DocCount, int64(3); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+ if got, want := agg.Buckets[4].DocCount, int64(1); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+ if got, want := agg.Buckets[5].DocCount, int64(2); got != want {
+ t.Fatalf("expected DocCount=%d; got: %d", want, got)
+ }
+
+ d, found := agg.Buckets[0].SerialDiff("the_diff")
+ if found {
+ t.Fatal("expected no the_diff aggregation")
+ }
+ if d != nil {
+ t.Fatal("expected no the_diff aggregation")
+ }
+
+ d, found = agg.Buckets[1].SerialDiff("the_diff")
+ if found {
+ t.Fatal("expected no the_diff aggregation")
+ }
+ if d != nil {
+ t.Fatal("expected no the_diff aggregation")
+ }
+
+ d, found = agg.Buckets[2].SerialDiff("the_diff")
+ if found {
+ t.Fatal("expected no the_diff aggregation")
+ }
+ if d != nil {
+ t.Fatal("expected no the_diff aggregation")
+ }
+
+ d, found = agg.Buckets[3].SerialDiff("the_diff")
+ if !found {
+ t.Fatal("expected the_diff aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected the_diff aggregation")
+ }
+ if d.Value == nil {
+ t.Fatal("expected the_diff value != nil")
+ }
+ if got, want := *d.Value, float64(2348.0); got != want {
+ t.Fatalf("expected the_diff.value=%v; got: %v", want, got)
+ }
+
+ d, found = agg.Buckets[4].SerialDiff("the_diff")
+ if !found {
+ t.Fatal("expected the_diff aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected the_diff aggregation")
+ }
+ if d.Value == nil {
+ t.Fatal("expected the_diff value != nil")
+ }
+ if got, want := *d.Value, float64(-1658.0); got != want {
+ t.Fatalf("expected the_diff.value=%v; got: %v", want, got)
+ }
+
+ d, found = agg.Buckets[5].SerialDiff("the_diff")
+ if !found {
+ t.Fatal("expected the_diff aggregation")
+ }
+ if d == nil {
+ t.Fatal("expected the_diff aggregation")
+ }
+ if d.Value == nil {
+ t.Fatal("expected the_diff value != nil")
+ }
+ if got, want := *d.Value, float64(-722.0); got != want {
+ t.Fatalf("expected the_diff.value=%v; got: %v", want, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_aggs_test.go b/vendor/github.com/olivere/elastic/search_aggs_test.go
new file mode 100644
index 000000000..9d6fa8d27
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_aggs_test.go
@@ -0,0 +1,3233 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestAggs(t *testing.T) {
+ // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
+ client := setupTestClientAndCreateIndex(t)
+
+ esversion, err := client.ElasticsearchVersion(DefaultURL)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tweet1 := tweet{
+ User: "olivere",
+ Retweets: 108,
+ Message: "Welcome to Golang and Elasticsearch.",
+ Image: "http://golang.org/doc/gopher/gophercolor.png",
+ Tags: []string{"golang", "elasticsearch"},
+ Location: "48.1333,11.5667", // lat,lon
+ Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
+ }
+ tweet2 := tweet{
+ User: "olivere",
+ Retweets: 0,
+ Message: "Another unrelated topic.",
+ Tags: []string{"golang"},
+ Location: "48.1189,11.4289", // lat,lon
+ Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
+ }
+ tweet3 := tweet{
+ User: "sandrae",
+ Retweets: 12,
+ Message: "Cycling is fun.",
+ Tags: []string{"sports", "cycling"},
+ Location: "47.7167,11.7167", // lat,lon
+ Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
+ }
+
+ // Add all documents
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ all := NewMatchAllQuery()
+
+ // Terms Aggregate by user name
+ globalAgg := NewGlobalAggregation()
+ usersAgg := NewTermsAggregation().Field("user").Size(10).OrderByCountDesc()
+ retweetsAgg := NewTermsAggregation().Field("retweets").Size(10)
+ avgRetweetsAgg := NewAvgAggregation().Field("retweets")
+ avgRetweetsWithMetaAgg := NewAvgAggregation().Field("retweetsMeta").Meta(map[string]interface{}{"meta": true})
+ minRetweetsAgg := NewMinAggregation().Field("retweets")
+ maxRetweetsAgg := NewMaxAggregation().Field("retweets")
+ sumRetweetsAgg := NewSumAggregation().Field("retweets")
+ statsRetweetsAgg := NewStatsAggregation().Field("retweets")
+ extstatsRetweetsAgg := NewExtendedStatsAggregation().Field("retweets")
+ valueCountRetweetsAgg := NewValueCountAggregation().Field("retweets")
+ percentilesRetweetsAgg := NewPercentilesAggregation().Field("retweets")
+ percentileRanksRetweetsAgg := NewPercentileRanksAggregation().Field("retweets").Values(25, 50, 75)
+ cardinalityAgg := NewCardinalityAggregation().Field("user")
+ significantTermsAgg := NewSignificantTermsAggregation().Field("message")
+ samplerAgg := NewSamplerAggregation().SubAggregation("tagged_with", NewTermsAggregation().Field("tags"))
+ retweetsRangeAgg := NewRangeAggregation().Field("retweets").Lt(10).Between(10, 100).Gt(100)
+ retweetsKeyedRangeAgg := NewRangeAggregation().Field("retweets").Keyed(true).Lt(10).Between(10, 100).Gt(100)
+ dateRangeAgg := NewDateRangeAggregation().Field("created").Lt("2012-01-01").Between("2012-01-01", "2013-01-01").Gt("2013-01-01")
+ missingTagsAgg := NewMissingAggregation().Field("tags")
+ retweetsHistoAgg := NewHistogramAggregation().Field("retweets").Interval(100)
+ dateHistoAgg := NewDateHistogramAggregation().Field("created").Interval("year")
+ retweetsFilterAgg := NewFilterAggregation().Filter(
+ NewRangeQuery("created").Gte("2012-01-01").Lte("2012-12-31")).
+ SubAggregation("avgRetweetsSub", NewAvgAggregation().Field("retweets"))
+ queryFilterAgg := NewFilterAggregation().Filter(NewTermQuery("tags", "golang"))
+ topTagsHitsAgg := NewTopHitsAggregation().Sort("created", false).Size(5).FetchSource(true)
+ topTagsAgg := NewTermsAggregation().Field("tags").Size(3).SubAggregation("top_tag_hits", topTagsHitsAgg)
+ geoBoundsAgg := NewGeoBoundsAggregation().Field("location")
+ geoHashAgg := NewGeoHashGridAggregation().Field("location").Precision(5)
+
+ // Run query
+ builder := client.Search().Index(testIndexName).Query(all).Pretty(true)
+ builder = builder.Aggregation("global", globalAgg)
+ builder = builder.Aggregation("users", usersAgg)
+ builder = builder.Aggregation("retweets", retweetsAgg)
+ builder = builder.Aggregation("avgRetweets", avgRetweetsAgg)
+ if esversion >= "2.0" {
+ builder = builder.Aggregation("avgRetweetsWithMeta", avgRetweetsWithMetaAgg)
+ }
+ builder = builder.Aggregation("minRetweets", minRetweetsAgg)
+ builder = builder.Aggregation("maxRetweets", maxRetweetsAgg)
+ builder = builder.Aggregation("sumRetweets", sumRetweetsAgg)
+ builder = builder.Aggregation("statsRetweets", statsRetweetsAgg)
+ builder = builder.Aggregation("extstatsRetweets", extstatsRetweetsAgg)
+ builder = builder.Aggregation("valueCountRetweets", valueCountRetweetsAgg)
+ builder = builder.Aggregation("percentilesRetweets", percentilesRetweetsAgg)
+ builder = builder.Aggregation("percentileRanksRetweets", percentileRanksRetweetsAgg)
+ builder = builder.Aggregation("usersCardinality", cardinalityAgg)
+ builder = builder.Aggregation("significantTerms", significantTermsAgg)
+ builder = builder.Aggregation("sample", samplerAgg)
+ builder = builder.Aggregation("retweetsRange", retweetsRangeAgg)
+ builder = builder.Aggregation("retweetsKeyedRange", retweetsKeyedRangeAgg)
+ builder = builder.Aggregation("dateRange", dateRangeAgg)
+ builder = builder.Aggregation("missingTags", missingTagsAgg)
+ builder = builder.Aggregation("retweetsHisto", retweetsHistoAgg)
+ builder = builder.Aggregation("dateHisto", dateHistoAgg)
+ builder = builder.Aggregation("retweetsFilter", retweetsFilterAgg)
+ builder = builder.Aggregation("queryFilter", queryFilterAgg)
+ builder = builder.Aggregation("top-tags", topTagsAgg)
+ builder = builder.Aggregation("viewport", geoBoundsAgg)
+ builder = builder.Aggregation("geohashed", geoHashAgg)
+ if esversion >= "1.4" {
+ // Unnamed filters
+ countByUserAgg := NewFiltersAggregation().
+ Filters(NewTermQuery("user", "olivere"), NewTermQuery("user", "sandrae"))
+ builder = builder.Aggregation("countByUser", countByUserAgg)
+ // Named filters
+ countByUserAgg2 := NewFiltersAggregation().
+ FilterWithName("olivere", NewTermQuery("user", "olivere")).
+ FilterWithName("sandrae", NewTermQuery("user", "sandrae"))
+ builder = builder.Aggregation("countByUser2", countByUserAgg2)
+ }
+ if esversion >= "2.0" {
+ // AvgBucket
+ dateHisto := NewDateHistogramAggregation().Field("created").Interval("year")
+ dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets"))
+ builder = builder.Aggregation("avgBucketDateHisto", dateHisto)
+ builder = builder.Aggregation("avgSumOfRetweets", NewAvgBucketAggregation().BucketsPath("avgBucketDateHisto>sumOfRetweets"))
+ // MinBucket
+ dateHisto = NewDateHistogramAggregation().Field("created").Interval("year")
+ dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets"))
+ builder = builder.Aggregation("minBucketDateHisto", dateHisto)
+ builder = builder.Aggregation("minBucketSumOfRetweets", NewMinBucketAggregation().BucketsPath("minBucketDateHisto>sumOfRetweets"))
+ // MaxBucket
+ dateHisto = NewDateHistogramAggregation().Field("created").Interval("year")
+ dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets"))
+ builder = builder.Aggregation("maxBucketDateHisto", dateHisto)
+ builder = builder.Aggregation("maxBucketSumOfRetweets", NewMaxBucketAggregation().BucketsPath("maxBucketDateHisto>sumOfRetweets"))
+ // SumBucket
+ dateHisto = NewDateHistogramAggregation().Field("created").Interval("year")
+ dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets"))
+ builder = builder.Aggregation("sumBucketDateHisto", dateHisto)
+ builder = builder.Aggregation("sumBucketSumOfRetweets", NewSumBucketAggregation().BucketsPath("sumBucketDateHisto>sumOfRetweets"))
+ // MovAvg
+ dateHisto = NewDateHistogramAggregation().Field("created").Interval("year")
+ dateHisto = dateHisto.SubAggregation("sumOfRetweets", NewSumAggregation().Field("retweets"))
+ dateHisto = dateHisto.SubAggregation("movingAvg", NewMovAvgAggregation().BucketsPath("sumOfRetweets"))
+ builder = builder.Aggregation("movingAvgDateHisto", dateHisto)
+ }
+ searchResult, err := builder.Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected Hits != nil; got: nil")
+ }
+ if searchResult.Hits.TotalHits != 3 {
+ t.Errorf("expected Hits.TotalHits = %d; got: %d", 3, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 3 {
+ t.Errorf("expected len(Hits.Hits) = %d; got: %d", 3, len(searchResult.Hits.Hits))
+ }
+ agg := searchResult.Aggregations
+ if agg == nil {
+ t.Fatalf("expected Aggregations != nil; got: nil")
+ }
+
+ // Search for non-existent aggregate should return (nil, false)
+ unknownAgg, found := agg.Terms("no-such-aggregate")
+ if found {
+ t.Errorf("expected unknown aggregation to not be found; got: %v", found)
+ }
+ if unknownAgg != nil {
+ t.Errorf("expected unknown aggregation to return %v; got %v", nil, unknownAgg)
+ }
+
+ // Global
+ globalAggRes, found := agg.Global("global")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if globalAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if globalAggRes.DocCount != 3 {
+ t.Errorf("expected DocCount = %d; got: %d", 3, globalAggRes.DocCount)
+ }
+
+ // Search for existent aggregate (by name) should return (aggregate, true)
+ termsAggRes, found := agg.Terms("users")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if termsAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if len(termsAggRes.Buckets) != 2 {
+ t.Fatalf("expected %d; got: %d", 2, len(termsAggRes.Buckets))
+ }
+ if termsAggRes.Buckets[0].Key != "olivere" {
+ t.Errorf("expected %q; got: %q", "olivere", termsAggRes.Buckets[0].Key)
+ }
+ if termsAggRes.Buckets[0].DocCount != 2 {
+ t.Errorf("expected %d; got: %d", 2, termsAggRes.Buckets[0].DocCount)
+ }
+ if termsAggRes.Buckets[1].Key != "sandrae" {
+ t.Errorf("expected %q; got: %q", "sandrae", termsAggRes.Buckets[1].Key)
+ }
+ if termsAggRes.Buckets[1].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, termsAggRes.Buckets[1].DocCount)
+ }
+
+ // A terms aggregate with keys that are not strings
+ retweetsAggRes, found := agg.Terms("retweets")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if retweetsAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if len(retweetsAggRes.Buckets) != 3 {
+ t.Fatalf("expected %d; got: %d", 3, len(retweetsAggRes.Buckets))
+ }
+
+ if retweetsAggRes.Buckets[0].Key != float64(0) {
+ t.Errorf("expected %v; got: %v", float64(0), retweetsAggRes.Buckets[0].Key)
+ }
+ if got, err := retweetsAggRes.Buckets[0].KeyNumber.Int64(); err != nil {
+ t.Errorf("expected %d; got: %v", 0, retweetsAggRes.Buckets[0].Key)
+ } else if got != 0 {
+ t.Errorf("expected %d; got: %d", 0, got)
+ }
+ if retweetsAggRes.Buckets[0].KeyNumber != "0" {
+ t.Errorf("expected %q; got: %q", "0", retweetsAggRes.Buckets[0].KeyNumber)
+ }
+ if retweetsAggRes.Buckets[0].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[0].DocCount)
+ }
+
+ if retweetsAggRes.Buckets[1].Key != float64(12) {
+ t.Errorf("expected %v; got: %v", float64(12), retweetsAggRes.Buckets[1].Key)
+ }
+ if got, err := retweetsAggRes.Buckets[1].KeyNumber.Int64(); err != nil {
+ t.Errorf("expected %d; got: %v", 0, retweetsAggRes.Buckets[1].KeyNumber)
+ } else if got != 12 {
+ t.Errorf("expected %d; got: %d", 12, got)
+ }
+ if retweetsAggRes.Buckets[1].KeyNumber != "12" {
+ t.Errorf("expected %q; got: %q", "12", retweetsAggRes.Buckets[1].KeyNumber)
+ }
+ if retweetsAggRes.Buckets[1].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[1].DocCount)
+ }
+
+ if retweetsAggRes.Buckets[2].Key != float64(108) {
+ t.Errorf("expected %v; got: %v", float64(108), retweetsAggRes.Buckets[2].Key)
+ }
+ if got, err := retweetsAggRes.Buckets[2].KeyNumber.Int64(); err != nil {
+ t.Errorf("expected %d; got: %v", 108, retweetsAggRes.Buckets[2].KeyNumber)
+ } else if got != 108 {
+ t.Errorf("expected %d; got: %d", 108, got)
+ }
+ if retweetsAggRes.Buckets[2].KeyNumber != "108" {
+ t.Errorf("expected %q; got: %q", "108", retweetsAggRes.Buckets[2].KeyNumber)
+ }
+ if retweetsAggRes.Buckets[2].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, retweetsAggRes.Buckets[2].DocCount)
+ }
+
+ // avgRetweets
+ avgAggRes, found := agg.Avg("avgRetweets")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if avgAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if avgAggRes.Value == nil {
+ t.Fatalf("expected != nil; got: %v", *avgAggRes.Value)
+ }
+ if *avgAggRes.Value != 40.0 {
+ t.Errorf("expected %v; got: %v", 40.0, *avgAggRes.Value)
+ }
+
+ // avgRetweetsWithMeta
+ if esversion >= "2.0" {
+ avgMetaAggRes, found := agg.Avg("avgRetweetsWithMeta")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if avgMetaAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if avgMetaAggRes.Meta == nil {
+ t.Fatalf("expected != nil; got: %v", avgMetaAggRes.Meta)
+ }
+ metaDataValue, found := avgMetaAggRes.Meta["meta"]
+ if !found {
+ t.Fatalf("expected to return meta data key %q; got: %v", "meta", found)
+ }
+ if flag, ok := metaDataValue.(bool); !ok {
+ t.Fatalf("expected to return meta data key type %T; got: %T", true, metaDataValue)
+ } else if flag != true {
+ t.Fatalf("expected to return meta data key value %v; got: %v", true, flag)
+ }
+ }
+
+ // minRetweets
+ minAggRes, found := agg.Min("minRetweets")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if minAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if minAggRes.Value == nil {
+ t.Fatalf("expected != nil; got: %v", *minAggRes.Value)
+ }
+ if *minAggRes.Value != 0.0 {
+ t.Errorf("expected %v; got: %v", 0.0, *minAggRes.Value)
+ }
+
+ // maxRetweets
+ maxAggRes, found := agg.Max("maxRetweets")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if maxAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if maxAggRes.Value == nil {
+ t.Fatalf("expected != nil; got: %v", *maxAggRes.Value)
+ }
+ if *maxAggRes.Value != 108.0 {
+ t.Errorf("expected %v; got: %v", 108.0, *maxAggRes.Value)
+ }
+
+ // sumRetweets
+ sumAggRes, found := agg.Sum("sumRetweets")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if sumAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if sumAggRes.Value == nil {
+ t.Fatalf("expected != nil; got: %v", *sumAggRes.Value)
+ }
+ if *sumAggRes.Value != 120.0 {
+ t.Errorf("expected %v; got: %v", 120.0, *sumAggRes.Value)
+ }
+
+ // statsRetweets
+ statsAggRes, found := agg.Stats("statsRetweets")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if statsAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if statsAggRes.Count != 3 {
+ t.Errorf("expected %d; got: %d", 3, statsAggRes.Count)
+ }
+ if statsAggRes.Min == nil {
+ t.Fatalf("expected != nil; got: %v", *statsAggRes.Min)
+ }
+ if *statsAggRes.Min != 0.0 {
+ t.Errorf("expected %v; got: %v", 0.0, *statsAggRes.Min)
+ }
+ if statsAggRes.Max == nil {
+ t.Fatalf("expected != nil; got: %v", *statsAggRes.Max)
+ }
+ if *statsAggRes.Max != 108.0 {
+ t.Errorf("expected %v; got: %v", 108.0, *statsAggRes.Max)
+ }
+ if statsAggRes.Avg == nil {
+ t.Fatalf("expected != nil; got: %v", *statsAggRes.Avg)
+ }
+ if *statsAggRes.Avg != 40.0 {
+ t.Errorf("expected %v; got: %v", 40.0, *statsAggRes.Avg)
+ }
+ if statsAggRes.Sum == nil {
+ t.Fatalf("expected != nil; got: %v", *statsAggRes.Sum)
+ }
+ if *statsAggRes.Sum != 120.0 {
+ t.Errorf("expected %v; got: %v", 120.0, *statsAggRes.Sum)
+ }
+
+ // extstatsRetweets
+ extStatsAggRes, found := agg.ExtendedStats("extstatsRetweets")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if extStatsAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if extStatsAggRes.Count != 3 {
+ t.Errorf("expected %d; got: %d", 3, extStatsAggRes.Count)
+ }
+ if extStatsAggRes.Min == nil {
+ t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Min)
+ }
+ if *extStatsAggRes.Min != 0.0 {
+ t.Errorf("expected %v; got: %v", 0.0, *extStatsAggRes.Min)
+ }
+ if extStatsAggRes.Max == nil {
+ t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Max)
+ }
+ if *extStatsAggRes.Max != 108.0 {
+ t.Errorf("expected %v; got: %v", 108.0, *extStatsAggRes.Max)
+ }
+ if extStatsAggRes.Avg == nil {
+ t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Avg)
+ }
+ if *extStatsAggRes.Avg != 40.0 {
+ t.Errorf("expected %v; got: %v", 40.0, *extStatsAggRes.Avg)
+ }
+ if extStatsAggRes.Sum == nil {
+ t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Sum)
+ }
+ if *extStatsAggRes.Sum != 120.0 {
+ t.Errorf("expected %v; got: %v", 120.0, *extStatsAggRes.Sum)
+ }
+ if extStatsAggRes.SumOfSquares == nil {
+ t.Fatalf("expected != nil; got: %v", *extStatsAggRes.SumOfSquares)
+ }
+ if *extStatsAggRes.SumOfSquares != 11808.0 {
+ t.Errorf("expected %v; got: %v", 11808.0, *extStatsAggRes.SumOfSquares)
+ }
+ if extStatsAggRes.Variance == nil {
+ t.Fatalf("expected != nil; got: %v", *extStatsAggRes.Variance)
+ }
+ if *extStatsAggRes.Variance != 2336.0 {
+ t.Errorf("expected %v; got: %v", 2336.0, *extStatsAggRes.Variance)
+ }
+ if extStatsAggRes.StdDeviation == nil {
+ t.Fatalf("expected != nil; got: %v", *extStatsAggRes.StdDeviation)
+ }
+ if *extStatsAggRes.StdDeviation != 48.33218389437829 {
+ t.Errorf("expected %v; got: %v", 48.33218389437829, *extStatsAggRes.StdDeviation)
+ }
+
+ // valueCountRetweets
+ valueCountAggRes, found := agg.ValueCount("valueCountRetweets")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if valueCountAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if valueCountAggRes.Value == nil {
+ t.Fatalf("expected != nil; got: %v", *valueCountAggRes.Value)
+ }
+ if *valueCountAggRes.Value != 3.0 {
+ t.Errorf("expected %v; got: %v", 3.0, *valueCountAggRes.Value)
+ }
+
+ // percentilesRetweets
+ percentilesAggRes, found := agg.Percentiles("percentilesRetweets")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if percentilesAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ // ES 1.4.x returns 7: {"1.0":...}
+ // ES 1.5.0 returns 14: {"1.0":..., "1.0_as_string":...}
+ // So we're relaxing the test here.
+ if len(percentilesAggRes.Values) == 0 {
+ t.Errorf("expected at least %d value; got: %d\nValues are: %#v", 1, len(percentilesAggRes.Values), percentilesAggRes.Values)
+ }
+ if _, found := percentilesAggRes.Values["0.0"]; found {
+ t.Errorf("expected %v; got: %v", false, found)
+ }
+ if percentilesAggRes.Values["1.0"] != 0.24 {
+ t.Errorf("expected %v; got: %v", 0.24, percentilesAggRes.Values["1.0"])
+ }
+ if percentilesAggRes.Values["25.0"] != 6.0 {
+ t.Errorf("expected %v; got: %v", 6.0, percentilesAggRes.Values["25.0"])
+ }
+ if percentilesAggRes.Values["99.0"] != 106.08 {
+ t.Errorf("expected %v; got: %v", 106.08, percentilesAggRes.Values["99.0"])
+ }
+
+ // percentileRanksRetweets
+ percentileRanksAggRes, found := agg.PercentileRanks("percentileRanksRetweets")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if percentileRanksAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if len(percentileRanksAggRes.Values) == 0 {
+ t.Errorf("expected at least %d value; got %d\nValues are: %#v", 1, len(percentileRanksAggRes.Values), percentileRanksAggRes.Values)
+ }
+ if _, found := percentileRanksAggRes.Values["0.0"]; found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if percentileRanksAggRes.Values["25.0"] != 21.180555555555557 {
+ t.Errorf("expected %v; got: %v", 21.180555555555557, percentileRanksAggRes.Values["25.0"])
+ }
+ if percentileRanksAggRes.Values["50.0"] != 29.86111111111111 {
+ t.Errorf("expected %v; got: %v", 29.86111111111111, percentileRanksAggRes.Values["50.0"])
+ }
+ if percentileRanksAggRes.Values["75.0"] != 38.54166666666667 {
+ t.Errorf("expected %v; got: %v", 38.54166666666667, percentileRanksAggRes.Values["75.0"])
+ }
+
+ // usersCardinality
+ cardAggRes, found := agg.Cardinality("usersCardinality")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if cardAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if cardAggRes.Value == nil {
+ t.Fatalf("expected != nil; got: %v", *cardAggRes.Value)
+ }
+ if *cardAggRes.Value != 2 {
+ t.Errorf("expected %v; got: %v", 2, *cardAggRes.Value)
+ }
+
+ // retweetsFilter
+ filterAggRes, found := agg.Filter("retweetsFilter")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if filterAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if filterAggRes.DocCount != 2 {
+ t.Fatalf("expected %v; got: %v", 2, filterAggRes.DocCount)
+ }
+
+ // Retrieve sub-aggregation
+ avgRetweetsAggRes, found := filterAggRes.Avg("avgRetweetsSub")
+ if !found {
+ t.Error("expected sub-aggregation \"avgRetweets\" to be found; got false")
+ }
+ if avgRetweetsAggRes == nil {
+ t.Fatal("expected sub-aggregation \"avgRetweets\"; got nil")
+ }
+ if avgRetweetsAggRes.Value == nil {
+ t.Fatalf("expected != nil; got: %v", avgRetweetsAggRes.Value)
+ }
+ if *avgRetweetsAggRes.Value != 54.0 {
+ t.Errorf("expected %v; got: %v", 54.0, *avgRetweetsAggRes.Value)
+ }
+
+ // queryFilter
+ queryFilterAggRes, found := agg.Filter("queryFilter")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if queryFilterAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if queryFilterAggRes.DocCount != 2 {
+ t.Fatalf("expected %v; got: %v", 2, queryFilterAggRes.DocCount)
+ }
+
+ // significantTerms
+ stAggRes, found := agg.SignificantTerms("significantTerms")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if stAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if stAggRes.DocCount != 3 {
+ t.Errorf("expected %v; got: %v", 3, stAggRes.DocCount)
+ }
+ if len(stAggRes.Buckets) != 0 {
+ t.Errorf("expected %v; got: %v", 0, len(stAggRes.Buckets))
+ }
+
+ // sampler
+ samplerAggRes, found := agg.Sampler("sample")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if samplerAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if samplerAggRes.DocCount != 3 {
+ t.Errorf("expected %v; got: %v", 3, samplerAggRes.DocCount)
+ }
+ sub, found := samplerAggRes.Aggregations["tagged_with"]
+ if !found {
+ t.Fatalf("expected sub aggregation %q", "tagged_with")
+ }
+ if sub == nil {
+ t.Fatalf("expected sub aggregation %q; got: %v", "tagged_with", sub)
+ }
+
+ // retweetsRange
+ rangeAggRes, found := agg.Range("retweetsRange")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if rangeAggRes == nil {
+ t.Fatal("expected != nil; got: nil")
+ }
+ if len(rangeAggRes.Buckets) != 3 {
+ t.Fatalf("expected %d; got: %d", 3, len(rangeAggRes.Buckets))
+ }
+ if rangeAggRes.Buckets[0].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[0].DocCount)
+ }
+ if rangeAggRes.Buckets[1].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[1].DocCount)
+ }
+ if rangeAggRes.Buckets[2].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, rangeAggRes.Buckets[2].DocCount)
+ }
+
+ // retweetsKeyedRange
+ keyedRangeAggRes, found := agg.KeyedRange("retweetsKeyedRange")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if keyedRangeAggRes == nil {
+ t.Fatal("expected != nil; got: nil")
+ }
+ if len(keyedRangeAggRes.Buckets) != 3 {
+ t.Fatalf("expected %d; got: %d", 3, len(keyedRangeAggRes.Buckets))
+ }
+ _, found = keyedRangeAggRes.Buckets["no-such-key"]
+ if found {
+ t.Fatalf("expected bucket to not be found; got: %v", found)
+ }
+ bucket, found := keyedRangeAggRes.Buckets["*-10.0"]
+ if !found {
+ t.Fatalf("expected bucket to be found; got: %v", found)
+ }
+ if bucket.DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, bucket.DocCount)
+ }
+ bucket, found = keyedRangeAggRes.Buckets["10.0-100.0"]
+ if !found {
+ t.Fatalf("expected bucket to be found; got: %v", found)
+ }
+ if bucket.DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, bucket.DocCount)
+ }
+ bucket, found = keyedRangeAggRes.Buckets["100.0-*"]
+ if !found {
+ t.Fatalf("expected bucket to be found; got: %v", found)
+ }
+ if bucket.DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, bucket.DocCount)
+ }
+
+ // dateRange
+ dateRangeRes, found := agg.DateRange("dateRange")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if dateRangeRes == nil {
+ t.Fatal("expected != nil; got: nil")
+ }
+ if dateRangeRes.Buckets[0].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, dateRangeRes.Buckets[0].DocCount)
+ }
+ if dateRangeRes.Buckets[0].From != nil {
+ t.Fatal("expected From to be nil")
+ }
+ if dateRangeRes.Buckets[0].To == nil {
+ t.Fatal("expected To to be != nil")
+ }
+ if *dateRangeRes.Buckets[0].To != 1.325376e+12 {
+ t.Errorf("expected %v; got: %v", 1.325376e+12, *dateRangeRes.Buckets[0].To)
+ }
+ if dateRangeRes.Buckets[0].ToAsString != "2012-01-01T00:00:00.000Z" {
+ t.Errorf("expected %q; got: %q", "2012-01-01T00:00:00.000Z", dateRangeRes.Buckets[0].ToAsString)
+ }
+ if dateRangeRes.Buckets[1].DocCount != 2 {
+ t.Errorf("expected %d; got: %d", 2, dateRangeRes.Buckets[1].DocCount)
+ }
+ if dateRangeRes.Buckets[1].From == nil {
+ t.Fatal("expected From to be != nil")
+ }
+ if *dateRangeRes.Buckets[1].From != 1.325376e+12 {
+ t.Errorf("expected From = %v; got: %v", 1.325376e+12, *dateRangeRes.Buckets[1].From)
+ }
+ if dateRangeRes.Buckets[1].FromAsString != "2012-01-01T00:00:00.000Z" {
+ t.Errorf("expected FromAsString = %q; got: %q", "2012-01-01T00:00:00.000Z", dateRangeRes.Buckets[1].FromAsString)
+ }
+ if dateRangeRes.Buckets[1].To == nil {
+ t.Fatal("expected To to be != nil")
+ }
+ if *dateRangeRes.Buckets[1].To != 1.3569984e+12 {
+ t.Errorf("expected To = %v; got: %v", 1.3569984e+12, *dateRangeRes.Buckets[1].To)
+ }
+ if dateRangeRes.Buckets[1].ToAsString != "2013-01-01T00:00:00.000Z" {
+ t.Errorf("expected ToAsString = %q; got: %q", "2013-01-01T00:00:00.000Z", dateRangeRes.Buckets[1].ToAsString)
+ }
+ if dateRangeRes.Buckets[2].DocCount != 0 {
+ t.Errorf("expected %d; got: %d", 0, dateRangeRes.Buckets[2].DocCount)
+ }
+ if dateRangeRes.Buckets[2].To != nil {
+ t.Fatal("expected To to be nil")
+ }
+ if dateRangeRes.Buckets[2].From == nil {
+ t.Fatal("expected From to be != nil")
+ }
+ if *dateRangeRes.Buckets[2].From != 1.3569984e+12 {
+ t.Errorf("expected %v; got: %v", 1.3569984e+12, *dateRangeRes.Buckets[2].From)
+ }
+ if dateRangeRes.Buckets[2].FromAsString != "2013-01-01T00:00:00.000Z" {
+ t.Errorf("expected %q; got: %q", "2013-01-01T00:00:00.000Z", dateRangeRes.Buckets[2].FromAsString)
+ }
+
+ // missingTags
+ missingRes, found := agg.Missing("missingTags")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if missingRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if missingRes.DocCount != 0 {
+ t.Errorf("expected searchResult.Aggregations[\"missingTags\"].DocCount = %v; got %v", 0, missingRes.DocCount)
+ }
+
+ // retweetsHisto
+ histoRes, found := agg.Histogram("retweetsHisto")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if histoRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if len(histoRes.Buckets) != 2 {
+ t.Fatalf("expected %d; got: %d", 2, len(histoRes.Buckets))
+ }
+ if histoRes.Buckets[0].DocCount != 2 {
+ t.Errorf("expected %d; got: %d", 2, histoRes.Buckets[0].DocCount)
+ }
+ if histoRes.Buckets[0].Key != 0.0 {
+ t.Errorf("expected %v; got: %v", 0.0, histoRes.Buckets[0].Key)
+ }
+ if histoRes.Buckets[1].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, histoRes.Buckets[1].DocCount)
+ }
+ if histoRes.Buckets[1].Key != 100.0 {
+ t.Errorf("expected %v; got: %+v", 100.0, histoRes.Buckets[1].Key)
+ }
+
+ // dateHisto
+ dateHistoRes, found := agg.DateHistogram("dateHisto")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if dateHistoRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if len(dateHistoRes.Buckets) != 2 {
+ t.Fatalf("expected %d; got: %d", 2, len(dateHistoRes.Buckets))
+ }
+ if dateHistoRes.Buckets[0].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, dateHistoRes.Buckets[0].DocCount)
+ }
+ if dateHistoRes.Buckets[0].Key != 1.29384e+12 {
+ t.Errorf("expected %v; got: %v", 1.29384e+12, dateHistoRes.Buckets[0].Key)
+ }
+ if dateHistoRes.Buckets[0].KeyAsString == nil {
+ t.Fatalf("expected != nil; got: %q", dateHistoRes.Buckets[0].KeyAsString)
+ }
+ if *dateHistoRes.Buckets[0].KeyAsString != "2011-01-01T00:00:00.000Z" {
+ t.Errorf("expected %q; got: %q", "2011-01-01T00:00:00.000Z", *dateHistoRes.Buckets[0].KeyAsString)
+ }
+ if dateHistoRes.Buckets[1].DocCount != 2 {
+ t.Errorf("expected %d; got: %d", 2, dateHistoRes.Buckets[1].DocCount)
+ }
+ if dateHistoRes.Buckets[1].Key != 1.325376e+12 {
+ t.Errorf("expected %v; got: %v", 1.325376e+12, dateHistoRes.Buckets[1].Key)
+ }
+ if dateHistoRes.Buckets[1].KeyAsString == nil {
+ t.Fatalf("expected != nil; got: %q", dateHistoRes.Buckets[1].KeyAsString)
+ }
+ if *dateHistoRes.Buckets[1].KeyAsString != "2012-01-01T00:00:00.000Z" {
+ t.Errorf("expected %q; got: %q", "2012-01-01T00:00:00.000Z", *dateHistoRes.Buckets[1].KeyAsString)
+ }
+
+ // topHits
+ topTags, found := agg.Terms("top-tags")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if topTags == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if esversion >= "1.4.0" {
+ if topTags.DocCountErrorUpperBound != 0 {
+ t.Errorf("expected %v; got: %v", 0, topTags.DocCountErrorUpperBound)
+ }
+ if topTags.SumOfOtherDocCount != 1 {
+ t.Errorf("expected %v; got: %v", 1, topTags.SumOfOtherDocCount)
+ }
+ }
+ if len(topTags.Buckets) != 3 {
+ t.Fatalf("expected %d; got: %d", 3, len(topTags.Buckets))
+ }
+ if topTags.Buckets[0].DocCount != 2 {
+ t.Errorf("expected %d; got: %d", 2, topTags.Buckets[0].DocCount)
+ }
+ if topTags.Buckets[0].Key != "golang" {
+ t.Errorf("expected %v; got: %v", "golang", topTags.Buckets[0].Key)
+ }
+ topHits, found := topTags.Buckets[0].TopHits("top_tag_hits")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if topHits == nil {
+ t.Fatal("expected != nil; got: nil")
+ }
+ if topHits.Hits == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if topHits.Hits.TotalHits != 2 {
+ t.Errorf("expected %d; got: %d", 2, topHits.Hits.TotalHits)
+ }
+ if topHits.Hits.Hits == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if len(topHits.Hits.Hits) != 2 {
+ t.Fatalf("expected %d; got: %d", 2, len(topHits.Hits.Hits))
+ }
+ hit := topHits.Hits.Hits[0]
+ if !found {
+ t.Fatalf("expected %v; got: %v", true, found)
+ }
+ if hit == nil {
+ t.Fatal("expected != nil; got: nil")
+ }
+ var tw tweet
+ if err := json.Unmarshal(*hit.Source, &tw); err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if tw.Message != "Welcome to Golang and Elasticsearch." {
+ t.Errorf("expected %q; got: %q", "Welcome to Golang and Elasticsearch.", tw.Message)
+ }
+ if topTags.Buckets[1].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, topTags.Buckets[1].DocCount)
+ }
+ if topTags.Buckets[1].Key != "cycling" {
+ t.Errorf("expected %v; got: %v", "cycling", topTags.Buckets[1].Key)
+ }
+ topHits, found = topTags.Buckets[1].TopHits("top_tag_hits")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if topHits == nil {
+ t.Fatal("expected != nil; got: nil")
+ }
+ if topHits.Hits == nil {
+ t.Fatal("expected != nil; got nil")
+ }
+ if topHits.Hits.TotalHits != 1 {
+ t.Errorf("expected %d; got: %d", 1, topHits.Hits.TotalHits)
+ }
+ if topTags.Buckets[2].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, topTags.Buckets[2].DocCount)
+ }
+ if topTags.Buckets[2].Key != "elasticsearch" {
+ t.Errorf("expected %v; got: %v", "elasticsearch", topTags.Buckets[2].Key)
+ }
+ topHits, found = topTags.Buckets[2].TopHits("top_tag_hits")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if topHits == nil {
+ t.Fatal("expected != nil; got: nil")
+ }
+ if topHits.Hits == nil {
+ t.Fatal("expected != nil; got: nil")
+ }
+ if topHits.Hits.TotalHits != 1 {
+ t.Errorf("expected %d; got: %d", 1, topHits.Hits.TotalHits)
+ }
+
+ // viewport via geo_bounds (1.3.0 has an error in that it doesn't output the aggregation name)
+ geoBoundsRes, found := agg.GeoBounds("viewport")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if geoBoundsRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+
+ // geohashed via geohash
+ geoHashRes, found := agg.GeoHash("geohashed")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if geoHashRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+
+ if esversion >= "1.4" {
+ // Filters agg "countByUser" (unnamed)
+ countByUserAggRes, found := agg.Filters("countByUser")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if countByUserAggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if len(countByUserAggRes.Buckets) != 2 {
+ t.Fatalf("expected %d; got: %d", 2, len(countByUserAggRes.Buckets))
+ }
+ if len(countByUserAggRes.NamedBuckets) != 0 {
+ t.Fatalf("expected %d; got: %d", 0, len(countByUserAggRes.NamedBuckets))
+ }
+ if countByUserAggRes.Buckets[0].DocCount != 2 {
+ t.Errorf("expected %d; got: %d", 2, countByUserAggRes.Buckets[0].DocCount)
+ }
+ if countByUserAggRes.Buckets[1].DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, countByUserAggRes.Buckets[1].DocCount)
+ }
+
+ // Filters agg "countByUser2" (named)
+ countByUser2AggRes, found := agg.Filters("countByUser2")
+ if !found {
+ t.Errorf("expected %v; got: %v", true, found)
+ }
+ if countByUser2AggRes == nil {
+ t.Fatalf("expected != nil; got: nil")
+ }
+ if len(countByUser2AggRes.Buckets) != 0 {
+ t.Fatalf("expected %d; got: %d", 0, len(countByUser2AggRes.Buckets))
+ }
+ if len(countByUser2AggRes.NamedBuckets) != 2 {
+ t.Fatalf("expected %d; got: %d", 2, len(countByUser2AggRes.NamedBuckets))
+ }
+ b, found := countByUser2AggRes.NamedBuckets["olivere"]
+ if !found {
+ t.Fatalf("expected bucket %q; got: %v", "olivere", found)
+ }
+ if b == nil {
+ t.Fatalf("expected bucket %q; got: %v", "olivere", b)
+ }
+ if b.DocCount != 2 {
+ t.Errorf("expected %d; got: %d", 2, b.DocCount)
+ }
+ b, found = countByUser2AggRes.NamedBuckets["sandrae"]
+ if !found {
+ t.Fatalf("expected bucket %q; got: %v", "sandrae", found)
+ }
+ if b == nil {
+ t.Fatalf("expected bucket %q; got: %v", "sandrae", b)
+ }
+ if b.DocCount != 1 {
+ t.Errorf("expected %d; got: %d", 1, b.DocCount)
+ }
+ }
+}
+
+// TestAggsMarshal ensures that marshaling aggregations back into a string
+// does not yield base64 encoded data. See https://github.com/olivere/elastic/issues/51
+// and https://groups.google.com/forum/#!topic/Golang-Nuts/38ShOlhxAYY for details.
+func TestAggsMarshal(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{
+ User: "olivere",
+ Retweets: 108,
+ Message: "Welcome to Golang and Elasticsearch.",
+ Image: "http://golang.org/doc/gopher/gophercolor.png",
+ Tags: []string{"golang", "elasticsearch"},
+ Location: "48.1333,11.5667", // lat,lon
+ Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
+ }
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ all := NewMatchAllQuery()
+ dhagg := NewDateHistogramAggregation().Field("created").Interval("year")
+
+ // Run query
+ builder := client.Search().Index(testIndexName).Query(all)
+ builder = builder.Aggregation("dhagg", dhagg)
+ searchResult, err := builder.Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.TotalHits() != 1 {
+ t.Errorf("expected Hits.TotalHits = %d; got: %d", 1, searchResult.TotalHits())
+ }
+ if _, found := searchResult.Aggregations["dhagg"]; !found {
+ t.Fatalf("expected aggregation %q", "dhagg")
+ }
+ buf, err := json.Marshal(searchResult)
+ if err != nil {
+ t.Fatal(err)
+ }
+ s := string(buf)
+ if i := strings.Index(s, `{"dhagg":{"buckets":[{"key_as_string":"2012-01-01`); i < 0 {
+ t.Errorf("expected to serialize aggregation into string; got: %v", s)
+ }
+}
+
+func TestAggsMetricsMin(t *testing.T) {
+ s := `{
+ "min_price": {
+ "value": 10
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Min("min_price")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Value == nil {
+ t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
+ }
+ if *agg.Value != float64(10) {
+ t.Fatalf("expected aggregation value = %v; got: %v", float64(10), *agg.Value)
+ }
+}
+
+func TestAggsMetricsMax(t *testing.T) {
+ s := `{
+ "max_price": {
+ "value": 35
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Max("max_price")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Value == nil {
+ t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
+ }
+ if *agg.Value != float64(35) {
+ t.Fatalf("expected aggregation value = %v; got: %v", float64(35), *agg.Value)
+ }
+}
+
+func TestAggsMetricsSum(t *testing.T) {
+ s := `{
+ "intraday_return": {
+ "value": 2.18
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Sum("intraday_return")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Value == nil {
+ t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
+ }
+ if *agg.Value != float64(2.18) {
+ t.Fatalf("expected aggregation value = %v; got: %v", float64(2.18), *agg.Value)
+ }
+}
+
+func TestAggsMetricsAvg(t *testing.T) {
+ s := `{
+ "avg_grade": {
+ "value": 75
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Avg("avg_grade")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Value == nil {
+ t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
+ }
+ if *agg.Value != float64(75) {
+ t.Fatalf("expected aggregation value = %v; got: %v", float64(75), *agg.Value)
+ }
+}
+
+func TestAggsMetricsValueCount(t *testing.T) {
+ s := `{
+ "grades_count": {
+ "value": 10
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.ValueCount("grades_count")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Value == nil {
+ t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
+ }
+ if *agg.Value != float64(10) {
+ t.Fatalf("expected aggregation value = %v; got: %v", float64(10), *agg.Value)
+ }
+}
+
+func TestAggsMetricsCardinality(t *testing.T) {
+ s := `{
+ "author_count": {
+ "value": 12
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Cardinality("author_count")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Value == nil {
+ t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
+ }
+ if *agg.Value != float64(12) {
+ t.Fatalf("expected aggregation value = %v; got: %v", float64(12), *agg.Value)
+ }
+}
+
+func TestAggsMetricsStats(t *testing.T) {
+ s := `{
+ "grades_stats": {
+ "count": 6,
+ "min": 60,
+ "max": 98,
+ "avg": 78.5,
+ "sum": 471
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Stats("grades_stats")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Count != int64(6) {
+ t.Fatalf("expected aggregation Count = %v; got: %v", int64(6), agg.Count)
+ }
+ if agg.Min == nil {
+ t.Fatalf("expected aggregation Min != nil; got: %v", agg.Min)
+ }
+ if *agg.Min != float64(60) {
+ t.Fatalf("expected aggregation Min = %v; got: %v", float64(60), *agg.Min)
+ }
+ if agg.Max == nil {
+ t.Fatalf("expected aggregation Max != nil; got: %v", agg.Max)
+ }
+ if *agg.Max != float64(98) {
+ t.Fatalf("expected aggregation Max = %v; got: %v", float64(98), *agg.Max)
+ }
+ if agg.Avg == nil {
+ t.Fatalf("expected aggregation Avg != nil; got: %v", agg.Avg)
+ }
+ if *agg.Avg != float64(78.5) {
+ t.Fatalf("expected aggregation Avg = %v; got: %v", float64(78.5), *agg.Avg)
+ }
+ if agg.Sum == nil {
+ t.Fatalf("expected aggregation Sum != nil; got: %v", agg.Sum)
+ }
+ if *agg.Sum != float64(471) {
+ t.Fatalf("expected aggregation Sum = %v; got: %v", float64(471), *agg.Sum)
+ }
+}
+
+func TestAggsMetricsExtendedStats(t *testing.T) {
+ s := `{
+ "grades_stats": {
+ "count": 6,
+ "min": 72,
+ "max": 117.6,
+ "avg": 94.2,
+ "sum": 565.2,
+ "sum_of_squares": 54551.51999999999,
+ "variance": 218.2799999999976,
+ "std_deviation": 14.774302013969987
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.ExtendedStats("grades_stats")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Count != int64(6) {
+ t.Fatalf("expected aggregation Count = %v; got: %v", int64(6), agg.Count)
+ }
+ if agg.Min == nil {
+ t.Fatalf("expected aggregation Min != nil; got: %v", agg.Min)
+ }
+ if *agg.Min != float64(72) {
+ t.Fatalf("expected aggregation Min = %v; got: %v", float64(72), *agg.Min)
+ }
+ if agg.Max == nil {
+ t.Fatalf("expected aggregation Max != nil; got: %v", agg.Max)
+ }
+ if *agg.Max != float64(117.6) {
+ t.Fatalf("expected aggregation Max = %v; got: %v", float64(117.6), *agg.Max)
+ }
+ if agg.Avg == nil {
+ t.Fatalf("expected aggregation Avg != nil; got: %v", agg.Avg)
+ }
+ if *agg.Avg != float64(94.2) {
+ t.Fatalf("expected aggregation Avg = %v; got: %v", float64(94.2), *agg.Avg)
+ }
+ if agg.Sum == nil {
+ t.Fatalf("expected aggregation Sum != nil; got: %v", agg.Sum)
+ }
+ if *agg.Sum != float64(565.2) {
+ t.Fatalf("expected aggregation Sum = %v; got: %v", float64(565.2), *agg.Sum)
+ }
+ if agg.SumOfSquares == nil {
+ t.Fatalf("expected aggregation sum_of_squares != nil; got: %v", agg.SumOfSquares)
+ }
+ if *agg.SumOfSquares != float64(54551.51999999999) {
+ t.Fatalf("expected aggregation sum_of_squares = %v; got: %v", float64(54551.51999999999), *agg.SumOfSquares)
+ }
+ if agg.Variance == nil {
+ t.Fatalf("expected aggregation Variance != nil; got: %v", agg.Variance)
+ }
+ if *agg.Variance != float64(218.2799999999976) {
+ t.Fatalf("expected aggregation Variance = %v; got: %v", float64(218.2799999999976), *agg.Variance)
+ }
+ if agg.StdDeviation == nil {
+ t.Fatalf("expected aggregation StdDeviation != nil; got: %v", agg.StdDeviation)
+ }
+ if *agg.StdDeviation != float64(14.774302013969987) {
+ t.Fatalf("expected aggregation StdDeviation = %v; got: %v", float64(14.774302013969987), *agg.StdDeviation)
+ }
+}
+
+func TestAggsMatrixStats(t *testing.T) {
+ s := `{
+ "matrixstats": {
+ "fields": [{
+ "name": "income",
+ "count": 50,
+ "mean": 51985.1,
+ "variance": 7.383377037755103E7,
+ "skewness": 0.5595114003506483,
+ "kurtosis": 2.5692365287787124,
+ "covariance": {
+ "income": 7.383377037755103E7,
+ "poverty": -21093.65836734694
+ },
+ "correlation": {
+ "income": 1.0,
+ "poverty": -0.8352655256272504
+ }
+ }, {
+ "name": "poverty",
+ "count": 51,
+ "mean": 12.732000000000001,
+ "variance": 8.637730612244896,
+ "skewness": 0.4516049811903419,
+ "kurtosis": 2.8615929677997767,
+ "covariance": {
+ "income": -21093.65836734694,
+ "poverty": 8.637730612244896
+ },
+ "correlation": {
+ "income": -0.8352655256272504,
+ "poverty": 1.0
+ }
+ }]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.MatrixStats("matrixstats")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if want, got := 2, len(agg.Fields); want != got {
+ t.Fatalf("expected aggregaton len(Fields) = %v; got: %v", want, got)
+ }
+ field := agg.Fields[0]
+ if want, got := "income", field.Name; want != got {
+ t.Fatalf("expected aggregation field name == %q; got: %q", want, got)
+ }
+ if want, got := int64(50), field.Count; want != got {
+ t.Fatalf("expected aggregation field count == %v; got: %v", want, got)
+ }
+ if want, got := 51985.1, field.Mean; want != got {
+ t.Fatalf("expected aggregation field mean == %v; got: %v", want, got)
+ }
+ if want, got := 7.383377037755103e7, field.Variance; want != got {
+ t.Fatalf("expected aggregation field variance == %v; got: %v", want, got)
+ }
+ if want, got := 0.5595114003506483, field.Skewness; want != got {
+ t.Fatalf("expected aggregation field skewness == %v; got: %v", want, got)
+ }
+ if want, got := 2.5692365287787124, field.Kurtosis; want != got {
+ t.Fatalf("expected aggregation field kurtosis == %v; got: %v", want, got)
+ }
+ if field.Covariance == nil {
+ t.Fatalf("expected aggregation field covariance != nil; got: %v", nil)
+ }
+ if want, got := 7.383377037755103e7, field.Covariance["income"]; want != got {
+ t.Fatalf("expected aggregation field covariance == %v; got: %v", want, got)
+ }
+ if want, got := -21093.65836734694, field.Covariance["poverty"]; want != got {
+ t.Fatalf("expected aggregation field covariance == %v; got: %v", want, got)
+ }
+ if field.Correlation == nil {
+ t.Fatalf("expected aggregation field correlation != nil; got: %v", nil)
+ }
+ if want, got := 1.0, field.Correlation["income"]; want != got {
+ t.Fatalf("expected aggregation field correlation == %v; got: %v", want, got)
+ }
+ if want, got := -0.8352655256272504, field.Correlation["poverty"]; want != got {
+ t.Fatalf("expected aggregation field correlation == %v; got: %v", want, got)
+ }
+ field = agg.Fields[1]
+ if want, got := "poverty", field.Name; want != got {
+ t.Fatalf("expected aggregation field name == %q; got: %q", want, got)
+ }
+ if want, got := int64(51), field.Count; want != got {
+ t.Fatalf("expected aggregation field count == %v; got: %v", want, got)
+ }
+}
+
+func TestAggsMetricsPercentiles(t *testing.T) {
+ s := `{
+ "load_time_outlier": {
+ "values" : {
+ "1.0": 15,
+ "5.0": 20,
+ "25.0": 23,
+ "50.0": 25,
+ "75.0": 29,
+ "95.0": 60,
+ "99.0": 150
+ }
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Percentiles("load_time_outlier")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Values == nil {
+ t.Fatalf("expected aggregation Values != nil; got: %v", agg.Values)
+ }
+ if len(agg.Values) != 7 {
+ t.Fatalf("expected %d aggregation Values; got: %d", 7, len(agg.Values))
+ }
+ if agg.Values["1.0"] != float64(15) {
+ t.Errorf("expected aggregation value for \"1.0\" = %v; got: %v", float64(15), agg.Values["1.0"])
+ }
+ if agg.Values["5.0"] != float64(20) {
+ t.Errorf("expected aggregation value for \"5.0\" = %v; got: %v", float64(20), agg.Values["5.0"])
+ }
+ if agg.Values["25.0"] != float64(23) {
+ t.Errorf("expected aggregation value for \"25.0\" = %v; got: %v", float64(23), agg.Values["25.0"])
+ }
+ if agg.Values["50.0"] != float64(25) {
+ t.Errorf("expected aggregation value for \"50.0\" = %v; got: %v", float64(25), agg.Values["50.0"])
+ }
+ if agg.Values["75.0"] != float64(29) {
+ t.Errorf("expected aggregation value for \"75.0\" = %v; got: %v", float64(29), agg.Values["75.0"])
+ }
+ if agg.Values["95.0"] != float64(60) {
+ t.Errorf("expected aggregation value for \"95.0\" = %v; got: %v", float64(60), agg.Values["95.0"])
+ }
+ if agg.Values["99.0"] != float64(150) {
+ t.Errorf("expected aggregation value for \"99.0\" = %v; got: %v", float64(150), agg.Values["99.0"])
+ }
+}
+
+func TestAggsMetricsPercentileRanks(t *testing.T) {
+ s := `{
+ "load_time_outlier": {
+ "values" : {
+ "15": 92,
+ "30": 100
+ }
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.PercentileRanks("load_time_outlier")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Values == nil {
+ t.Fatalf("expected aggregation Values != nil; got: %v", agg.Values)
+ }
+ if len(agg.Values) != 2 {
+ t.Fatalf("expected %d aggregation Values; got: %d", 7, len(agg.Values))
+ }
+ if agg.Values["15"] != float64(92) {
+ t.Errorf("expected aggregation value for \"15\" = %v; got: %v", float64(92), agg.Values["15"])
+ }
+ if agg.Values["30"] != float64(100) {
+ t.Errorf("expected aggregation value for \"30\" = %v; got: %v", float64(100), agg.Values["30"])
+ }
+}
+
+func TestAggsMetricsTopHits(t *testing.T) {
+ s := `{
+ "top-tags": {
+ "buckets": [
+ {
+ "key": "windows-7",
+ "doc_count": 25365,
+ "top_tags_hits": {
+ "hits": {
+ "total": 25365,
+ "max_score": 1,
+ "hits": [
+ {
+ "_index": "stack",
+ "_type": "question",
+ "_id": "602679",
+ "_score": 1,
+ "_source": {
+ "title": "Windows port opening"
+ },
+ "sort": [
+ 1370143231177
+ ]
+ }
+ ]
+ }
+ }
+ },
+ {
+ "key": "linux",
+ "doc_count": 18342,
+ "top_tags_hits": {
+ "hits": {
+ "total": 18342,
+ "max_score": 1,
+ "hits": [
+ {
+ "_index": "stack",
+ "_type": "question",
+ "_id": "602672",
+ "_score": 1,
+ "_source": {
+ "title": "Ubuntu RFID Screensaver lock-unlock"
+ },
+ "sort": [
+ 1370143379747
+ ]
+ }
+ ]
+ }
+ }
+ },
+ {
+ "key": "windows",
+ "doc_count": 18119,
+ "top_tags_hits": {
+ "hits": {
+ "total": 18119,
+ "max_score": 1,
+ "hits": [
+ {
+ "_index": "stack",
+ "_type": "question",
+ "_id": "602678",
+ "_score": 1,
+ "_source": {
+ "title": "If I change my computers date / time, what could be affected?"
+ },
+ "sort": [
+ 1370142868283
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Terms("top-tags")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 3 {
+ t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets))
+ }
+ if agg.Buckets[0].Key != "windows-7" {
+ t.Errorf("expected bucket key = %q; got: %q", "windows-7", agg.Buckets[0].Key)
+ }
+ if agg.Buckets[1].Key != "linux" {
+ t.Errorf("expected bucket key = %q; got: %q", "linux", agg.Buckets[1].Key)
+ }
+ if agg.Buckets[2].Key != "windows" {
+ t.Errorf("expected bucket key = %q; got: %q", "windows", agg.Buckets[2].Key)
+ }
+
+ // Sub-aggregation of top-hits
+ subAgg, found := agg.Buckets[0].TopHits("top_tags_hits")
+ if !found {
+ t.Fatalf("expected sub aggregation to be found; got: %v", found)
+ }
+ if subAgg == nil {
+ t.Fatalf("expected sub aggregation != nil; got: %v", subAgg)
+ }
+ if subAgg.Hits == nil {
+ t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits)
+ }
+ if subAgg.Hits.TotalHits != 25365 {
+ t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 25365, subAgg.Hits.TotalHits)
+ }
+ if subAgg.Hits.MaxScore == nil {
+ t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore)
+ }
+ if *subAgg.Hits.MaxScore != float64(1.0) {
+ t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore)
+ }
+
+ subAgg, found = agg.Buckets[1].TopHits("top_tags_hits")
+ if !found {
+ t.Fatalf("expected sub aggregation to be found; got: %v", found)
+ }
+ if subAgg == nil {
+ t.Fatalf("expected sub aggregation != nil; got: %v", subAgg)
+ }
+ if subAgg.Hits == nil {
+ t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits)
+ }
+ if subAgg.Hits.TotalHits != 18342 {
+ t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 18342, subAgg.Hits.TotalHits)
+ }
+ if subAgg.Hits.MaxScore == nil {
+ t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore)
+ }
+ if *subAgg.Hits.MaxScore != float64(1.0) {
+ t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore)
+ }
+
+ subAgg, found = agg.Buckets[2].TopHits("top_tags_hits")
+ if !found {
+ t.Fatalf("expected sub aggregation to be found; got: %v", found)
+ }
+ if subAgg == nil {
+ t.Fatalf("expected sub aggregation != nil; got: %v", subAgg)
+ }
+ if subAgg.Hits == nil {
+ t.Fatalf("expected sub aggregation Hits != nil; got: %v", subAgg.Hits)
+ }
+ if subAgg.Hits.TotalHits != 18119 {
+ t.Fatalf("expected sub aggregation Hits.TotalHits = %d; got: %d", 18119, subAgg.Hits.TotalHits)
+ }
+ if subAgg.Hits.MaxScore == nil {
+ t.Fatalf("expected sub aggregation Hits.MaxScore != %v; got: %v", nil, *subAgg.Hits.MaxScore)
+ }
+ if *subAgg.Hits.MaxScore != float64(1.0) {
+ t.Fatalf("expected sub aggregation Hits.MaxScore = %v; got: %v", float64(1.0), *subAgg.Hits.MaxScore)
+ }
+}
+
+func TestAggsBucketGlobal(t *testing.T) {
+ s := `{
+ "all_products" : {
+ "doc_count" : 100,
+ "avg_price" : {
+ "value" : 56.3
+ }
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Global("all_products")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.DocCount != 100 {
+ t.Fatalf("expected aggregation DocCount = %d; got: %d", 100, agg.DocCount)
+ }
+
+ // Sub-aggregation
+ subAgg, found := agg.Avg("avg_price")
+ if !found {
+ t.Fatalf("expected sub-aggregation to be found; got: %v", found)
+ }
+ if subAgg == nil {
+ t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg)
+ }
+ if subAgg.Value == nil {
+ t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value)
+ }
+ if *subAgg.Value != float64(56.3) {
+ t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(56.3), *subAgg.Value)
+ }
+}
+
+func TestAggsBucketFilter(t *testing.T) {
+ s := `{
+ "in_stock_products" : {
+ "doc_count" : 100,
+ "avg_price" : { "value" : 56.3 }
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Filter("in_stock_products")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.DocCount != 100 {
+ t.Fatalf("expected aggregation DocCount = %d; got: %d", 100, agg.DocCount)
+ }
+
+ // Sub-aggregation
+ subAgg, found := agg.Avg("avg_price")
+ if !found {
+ t.Fatalf("expected sub-aggregation to be found; got: %v", found)
+ }
+ if subAgg == nil {
+ t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg)
+ }
+ if subAgg.Value == nil {
+ t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value)
+ }
+ if *subAgg.Value != float64(56.3) {
+ t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(56.3), *subAgg.Value)
+ }
+}
+
+func TestAggsBucketFiltersWithBuckets(t *testing.T) {
+ s := `{
+ "messages" : {
+ "buckets" : [
+ {
+ "doc_count" : 34,
+ "monthly" : {
+ "buckets" : []
+ }
+ },
+ {
+ "doc_count" : 439,
+ "monthly" : {
+ "buckets" : []
+ }
+ }
+ ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Filters("messages")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != %v; got: %v", nil, agg.Buckets)
+ }
+ if len(agg.Buckets) != 2 {
+ t.Fatalf("expected %d buckets; got: %d", 2, len(agg.Buckets))
+ }
+
+ if agg.Buckets[0].DocCount != 34 {
+ t.Fatalf("expected DocCount = %d; got: %d", 34, agg.Buckets[0].DocCount)
+ }
+ subAgg, found := agg.Buckets[0].Histogram("monthly")
+ if !found {
+ t.Fatalf("expected sub aggregation to be found; got: %v", found)
+ }
+ if subAgg == nil {
+ t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg)
+ }
+
+ if agg.Buckets[1].DocCount != 439 {
+ t.Fatalf("expected DocCount = %d; got: %d", 439, agg.Buckets[1].DocCount)
+ }
+ subAgg, found = agg.Buckets[1].Histogram("monthly")
+ if !found {
+ t.Fatalf("expected sub aggregation to be found; got: %v", found)
+ }
+ if subAgg == nil {
+ t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg)
+ }
+}
+
+func TestAggsBucketFiltersWithNamedBuckets(t *testing.T) {
+ s := `{
+ "messages" : {
+ "buckets" : {
+ "errors" : {
+ "doc_count" : 34,
+ "monthly" : {
+ "buckets" : []
+ }
+ },
+ "warnings" : {
+ "doc_count" : 439,
+ "monthly" : {
+ "buckets" : []
+ }
+ }
+ }
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Filters("messages")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.NamedBuckets == nil {
+ t.Fatalf("expected aggregation buckets != %v; got: %v", nil, agg.NamedBuckets)
+ }
+ if len(agg.NamedBuckets) != 2 {
+ t.Fatalf("expected %d buckets; got: %d", 2, len(agg.NamedBuckets))
+ }
+
+ if agg.NamedBuckets["errors"].DocCount != 34 {
+ t.Fatalf("expected DocCount = %d; got: %d", 34, agg.NamedBuckets["errors"].DocCount)
+ }
+ subAgg, found := agg.NamedBuckets["errors"].Histogram("monthly")
+ if !found {
+ t.Fatalf("expected sub aggregation to be found; got: %v", found)
+ }
+ if subAgg == nil {
+ t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg)
+ }
+
+ if agg.NamedBuckets["warnings"].DocCount != 439 {
+ t.Fatalf("expected DocCount = %d; got: %d", 439, agg.NamedBuckets["warnings"].DocCount)
+ }
+ subAgg, found = agg.NamedBuckets["warnings"].Histogram("monthly")
+ if !found {
+ t.Fatalf("expected sub aggregation to be found; got: %v", found)
+ }
+ if subAgg == nil {
+ t.Fatalf("expected sub aggregation != %v; got: %v", nil, subAgg)
+ }
+}
+
+func TestAggsBucketMissing(t *testing.T) {
+ s := `{
+ "products_without_a_price" : {
+ "doc_count" : 10
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Missing("products_without_a_price")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.DocCount != 10 {
+ t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount)
+ }
+}
+
+func TestAggsBucketNested(t *testing.T) {
+ s := `{
+ "resellers": {
+ "min_price": {
+ "value" : 350
+ }
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Nested("resellers")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.DocCount != 0 {
+ t.Fatalf("expected aggregation DocCount = %d; got: %d", 0, agg.DocCount)
+ }
+
+ // Sub-aggregation
+ subAgg, found := agg.Avg("min_price")
+ if !found {
+ t.Fatalf("expected sub-aggregation to be found; got: %v", found)
+ }
+ if subAgg == nil {
+ t.Fatalf("expected sub-aggregation != nil; got: %v", subAgg)
+ }
+ if subAgg.Value == nil {
+ t.Fatalf("expected sub-aggregation value != nil; got: %v", subAgg.Value)
+ }
+ if *subAgg.Value != float64(350) {
+ t.Fatalf("expected sub-aggregation value = %v; got: %v", float64(350), *subAgg.Value)
+ }
+}
+
+func TestAggsBucketReverseNested(t *testing.T) {
+ s := `{
+ "comment_to_issue": {
+ "doc_count" : 10
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.ReverseNested("comment_to_issue")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.DocCount != 10 {
+ t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount)
+ }
+}
+
+func TestAggsBucketChildren(t *testing.T) {
+ s := `{
+ "to-answers": {
+ "doc_count" : 10
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Children("to-answers")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.DocCount != 10 {
+ t.Fatalf("expected aggregation DocCount = %d; got: %d", 10, agg.DocCount)
+ }
+}
+
+func TestAggsBucketTerms(t *testing.T) {
+ s := `{
+ "users" : {
+ "doc_count_error_upper_bound" : 1,
+ "sum_other_doc_count" : 2,
+ "buckets" : [ {
+ "key" : "olivere",
+ "doc_count" : 2
+ }, {
+ "key" : "sandrae",
+ "doc_count" : 1
+ } ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Terms("users")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 2 {
+ t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))
+ }
+ if agg.Buckets[0].Key != "olivere" {
+ t.Errorf("expected key %q; got: %q", "olivere", agg.Buckets[0].Key)
+ }
+ if agg.Buckets[0].DocCount != 2 {
+ t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount)
+ }
+ if agg.Buckets[1].Key != "sandrae" {
+ t.Errorf("expected key %q; got: %q", "sandrae", agg.Buckets[1].Key)
+ }
+ if agg.Buckets[1].DocCount != 1 {
+ t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount)
+ }
+}
+
+func TestAggsBucketTermsWithNumericKeys(t *testing.T) {
+ s := `{
+ "users" : {
+ "doc_count_error_upper_bound" : 1,
+ "sum_other_doc_count" : 2,
+ "buckets" : [ {
+ "key" : 17,
+ "doc_count" : 2
+ }, {
+ "key" : 21,
+ "doc_count" : 1
+ } ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Terms("users")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 2 {
+ t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))
+ }
+ if agg.Buckets[0].Key != float64(17) {
+ t.Errorf("expected key %v; got: %v", 17, agg.Buckets[0].Key)
+ }
+ if got, err := agg.Buckets[0].KeyNumber.Int64(); err != nil {
+ t.Errorf("expected to convert key to int64; got: %v", err)
+ } else if got != 17 {
+ t.Errorf("expected key %v; got: %v", 17, agg.Buckets[0].Key)
+ }
+ if agg.Buckets[0].DocCount != 2 {
+ t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount)
+ }
+ if agg.Buckets[1].Key != float64(21) {
+ t.Errorf("expected key %v; got: %v", 21, agg.Buckets[1].Key)
+ }
+ if got, err := agg.Buckets[1].KeyNumber.Int64(); err != nil {
+ t.Errorf("expected to convert key to int64; got: %v", err)
+ } else if got != 21 {
+ t.Errorf("expected key %v; got: %v", 21, agg.Buckets[1].Key)
+ }
+ if agg.Buckets[1].DocCount != 1 {
+ t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount)
+ }
+}
+
+func TestAggsBucketTermsWithBoolKeys(t *testing.T) {
+ s := `{
+ "users" : {
+ "doc_count_error_upper_bound" : 1,
+ "sum_other_doc_count" : 2,
+ "buckets" : [ {
+ "key" : true,
+ "doc_count" : 2
+ }, {
+ "key" : false,
+ "doc_count" : 1
+ } ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Terms("users")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 2 {
+ t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))
+ }
+ if agg.Buckets[0].Key != true {
+ t.Errorf("expected key %v; got: %v", true, agg.Buckets[0].Key)
+ }
+ if agg.Buckets[0].DocCount != 2 {
+ t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[0].DocCount)
+ }
+ if agg.Buckets[1].Key != false {
+ t.Errorf("expected key %v; got: %v", false, agg.Buckets[1].Key)
+ }
+ if agg.Buckets[1].DocCount != 1 {
+ t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[1].DocCount)
+ }
+}
+
+func TestAggsBucketSignificantTerms(t *testing.T) {
+ s := `{
+ "significantCrimeTypes" : {
+ "doc_count": 47347,
+ "buckets" : [
+ {
+ "key": "Bicycle theft",
+ "doc_count": 3640,
+ "score": 0.371235374214817,
+ "bg_count": 66799
+ }
+ ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.SignificantTerms("significantCrimeTypes")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.DocCount != 47347 {
+ t.Fatalf("expected aggregation DocCount != %d; got: %d", 47347, agg.DocCount)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 1 {
+ t.Errorf("expected %d bucket entries; got: %d", 1, len(agg.Buckets))
+ }
+ if agg.Buckets[0].Key != "Bicycle theft" {
+ t.Errorf("expected key = %q; got: %q", "Bicycle theft", agg.Buckets[0].Key)
+ }
+ if agg.Buckets[0].DocCount != 3640 {
+ t.Errorf("expected doc count = %d; got: %d", 3640, agg.Buckets[0].DocCount)
+ }
+ if agg.Buckets[0].Score != float64(0.371235374214817) {
+ t.Errorf("expected score = %v; got: %v", float64(0.371235374214817), agg.Buckets[0].Score)
+ }
+ if agg.Buckets[0].BgCount != 66799 {
+ t.Errorf("expected BgCount = %d; got: %d", 66799, agg.Buckets[0].BgCount)
+ }
+}
+
+func TestAggsBucketSampler(t *testing.T) {
+ s := `{
+ "sample" : {
+ "doc_count": 1000,
+ "keywords": {
+ "doc_count": 1000,
+ "buckets" : [
+ {
+ "key": "bend",
+ "doc_count": 58,
+ "score": 37.982536582524276,
+ "bg_count": 103
+ }
+ ]
+ }
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Sampler("sample")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.DocCount != 1000 {
+ t.Fatalf("expected aggregation DocCount != %d; got: %d", 1000, agg.DocCount)
+ }
+ sub, found := agg.Aggregations["keywords"]
+ if !found {
+ t.Fatalf("expected sub aggregation %q", "keywords")
+ }
+ if sub == nil {
+ t.Fatalf("expected sub aggregation %q; got: %v", "keywords", sub)
+ }
+}
+
+func TestAggsBucketRange(t *testing.T) {
+ s := `{
+ "price_ranges" : {
+ "buckets": [
+ {
+ "to": 50,
+ "doc_count": 2
+ },
+ {
+ "from": 50,
+ "to": 100,
+ "doc_count": 4
+ },
+ {
+ "from": 100,
+ "doc_count": 4
+ }
+ ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Range("price_ranges")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 3 {
+ t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets))
+ }
+ if agg.Buckets[0].From != nil {
+ t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From)
+ }
+ if agg.Buckets[0].To == nil {
+ t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To)
+ }
+ if *agg.Buckets[0].To != float64(50) {
+ t.Errorf("expected To = %v; got: %v", float64(50), *agg.Buckets[0].To)
+ }
+ if agg.Buckets[0].DocCount != 2 {
+ t.Errorf("expected DocCount = %d; got: %d", 2, agg.Buckets[0].DocCount)
+ }
+ if agg.Buckets[1].From == nil {
+ t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From)
+ }
+ if *agg.Buckets[1].From != float64(50) {
+ t.Errorf("expected From = %v; got: %v", float64(50), *agg.Buckets[1].From)
+ }
+ if agg.Buckets[1].To == nil {
+ t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[1].To)
+ }
+ if *agg.Buckets[1].To != float64(100) {
+ t.Errorf("expected To = %v; got: %v", float64(100), *agg.Buckets[1].To)
+ }
+ if agg.Buckets[1].DocCount != 4 {
+ t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[1].DocCount)
+ }
+ if agg.Buckets[2].From == nil {
+ t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[2].From)
+ }
+ if *agg.Buckets[2].From != float64(100) {
+ t.Errorf("expected From = %v; got: %v", float64(100), *agg.Buckets[2].From)
+ }
+ if agg.Buckets[2].To != nil {
+ t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[2].To)
+ }
+ if agg.Buckets[2].DocCount != 4 {
+ t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[2].DocCount)
+ }
+}
+
+func TestAggsBucketDateRange(t *testing.T) {
+ s := `{
+ "range": {
+ "buckets": [
+ {
+ "to": 1.3437792E+12,
+ "to_as_string": "08-2012",
+ "doc_count": 7
+ },
+ {
+ "from": 1.3437792E+12,
+ "from_as_string": "08-2012",
+ "doc_count": 2
+ }
+ ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.DateRange("range")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 2 {
+ t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))
+ }
+ if agg.Buckets[0].From != nil {
+ t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From)
+ }
+ if agg.Buckets[0].To == nil {
+ t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To)
+ }
+ if *agg.Buckets[0].To != float64(1.3437792E+12) {
+ t.Errorf("expected To = %v; got: %v", float64(1.3437792E+12), *agg.Buckets[0].To)
+ }
+ if agg.Buckets[0].ToAsString != "08-2012" {
+ t.Errorf("expected ToAsString = %q; got: %q", "08-2012", agg.Buckets[0].ToAsString)
+ }
+ if agg.Buckets[0].DocCount != 7 {
+ t.Errorf("expected DocCount = %d; got: %d", 7, agg.Buckets[0].DocCount)
+ }
+ if agg.Buckets[1].From == nil {
+ t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From)
+ }
+ if *agg.Buckets[1].From != float64(1.3437792E+12) {
+ t.Errorf("expected From = %v; got: %v", float64(1.3437792E+12), *agg.Buckets[1].From)
+ }
+ if agg.Buckets[1].FromAsString != "08-2012" {
+ t.Errorf("expected FromAsString = %q; got: %q", "08-2012", agg.Buckets[1].FromAsString)
+ }
+ if agg.Buckets[1].To != nil {
+ t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[1].To)
+ }
+ if agg.Buckets[1].DocCount != 2 {
+ t.Errorf("expected DocCount = %d; got: %d", 2, agg.Buckets[1].DocCount)
+ }
+}
+
+func TestAggsBucketIPRange(t *testing.T) {
+ s := `{
+ "ip_ranges": {
+ "buckets" : [
+ {
+ "to": 167772165,
+ "to_as_string": "10.0.0.5",
+ "doc_count": 4
+ },
+ {
+ "from": 167772165,
+ "from_as_string": "10.0.0.5",
+ "doc_count": 6
+ }
+ ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.IPRange("ip_ranges")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 2 {
+ t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))
+ }
+ if agg.Buckets[0].From != nil {
+ t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From)
+ }
+ if agg.Buckets[0].To == nil {
+ t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To)
+ }
+ if *agg.Buckets[0].To != float64(167772165) {
+ t.Errorf("expected To = %v; got: %v", float64(167772165), *agg.Buckets[0].To)
+ }
+ if agg.Buckets[0].ToAsString != "10.0.0.5" {
+ t.Errorf("expected ToAsString = %q; got: %q", "10.0.0.5", agg.Buckets[0].ToAsString)
+ }
+ if agg.Buckets[0].DocCount != 4 {
+ t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[0].DocCount)
+ }
+ if agg.Buckets[1].From == nil {
+ t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From)
+ }
+ if *agg.Buckets[1].From != float64(167772165) {
+ t.Errorf("expected From = %v; got: %v", float64(167772165), *agg.Buckets[1].From)
+ }
+ if agg.Buckets[1].FromAsString != "10.0.0.5" {
+ t.Errorf("expected FromAsString = %q; got: %q", "10.0.0.5", agg.Buckets[1].FromAsString)
+ }
+ if agg.Buckets[1].To != nil {
+ t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[1].To)
+ }
+ if agg.Buckets[1].DocCount != 6 {
+ t.Errorf("expected DocCount = %d; got: %d", 6, agg.Buckets[1].DocCount)
+ }
+}
+
+func TestAggsBucketHistogram(t *testing.T) {
+ s := `{
+ "prices" : {
+ "buckets": [
+ {
+ "key": 0,
+ "doc_count": 2
+ },
+ {
+ "key": 50,
+ "doc_count": 4
+ },
+ {
+ "key": 150,
+ "doc_count": 3
+ }
+ ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Histogram("prices")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 3 {
+ t.Errorf("expected %d buckets; got: %d", 3, len(agg.Buckets))
+ }
+ if agg.Buckets[0].Key != 0 {
+ t.Errorf("expected key = %v; got: %v", 0, agg.Buckets[0].Key)
+ }
+ if agg.Buckets[0].KeyAsString != nil {
+ t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[0].KeyAsString)
+ }
+ if agg.Buckets[0].DocCount != 2 {
+ t.Errorf("expected doc count = %d; got: %d", 2, agg.Buckets[0].DocCount)
+ }
+ if agg.Buckets[1].Key != 50 {
+ t.Errorf("expected key = %v; got: %v", 50, agg.Buckets[1].Key)
+ }
+ if agg.Buckets[1].KeyAsString != nil {
+ t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[1].KeyAsString)
+ }
+ if agg.Buckets[1].DocCount != 4 {
+ t.Errorf("expected doc count = %d; got: %d", 4, agg.Buckets[1].DocCount)
+ }
+ if agg.Buckets[2].Key != 150 {
+ t.Errorf("expected key = %v; got: %v", 150, agg.Buckets[2].Key)
+ }
+ if agg.Buckets[2].KeyAsString != nil {
+ t.Fatalf("expected key_as_string = %v; got: %q", nil, *agg.Buckets[2].KeyAsString)
+ }
+ if agg.Buckets[2].DocCount != 3 {
+ t.Errorf("expected doc count = %d; got: %d", 3, agg.Buckets[2].DocCount)
+ }
+}
+
+func TestAggsBucketDateHistogram(t *testing.T) {
+ s := `{
+ "articles_over_time": {
+ "buckets": [
+ {
+ "key_as_string": "2013-02-02",
+ "key": 1328140800000,
+ "doc_count": 1
+ },
+ {
+ "key_as_string": "2013-03-02",
+ "key": 1330646400000,
+ "doc_count": 2
+ }
+ ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.DateHistogram("articles_over_time")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 2 {
+ t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))
+ }
+ if agg.Buckets[0].Key != 1328140800000 {
+ t.Errorf("expected key %v; got: %v", 1328140800000, agg.Buckets[0].Key)
+ }
+ if agg.Buckets[0].KeyAsString == nil {
+ t.Fatalf("expected key_as_string != nil; got: %v", agg.Buckets[0].KeyAsString)
+ }
+ if *agg.Buckets[0].KeyAsString != "2013-02-02" {
+ t.Errorf("expected key_as_string %q; got: %q", "2013-02-02", *agg.Buckets[0].KeyAsString)
+ }
+ if agg.Buckets[0].DocCount != 1 {
+ t.Errorf("expected doc count %d; got: %d", 1, agg.Buckets[0].DocCount)
+ }
+ if agg.Buckets[1].Key != 1330646400000 {
+ t.Errorf("expected key %v; got: %v", 1330646400000, agg.Buckets[1].Key)
+ }
+ if agg.Buckets[1].KeyAsString == nil {
+ t.Fatalf("expected key_as_string != nil; got: %v", agg.Buckets[1].KeyAsString)
+ }
+ if *agg.Buckets[1].KeyAsString != "2013-03-02" {
+ t.Errorf("expected key_as_string %q; got: %q", "2013-03-02", *agg.Buckets[1].KeyAsString)
+ }
+ if agg.Buckets[1].DocCount != 2 {
+ t.Errorf("expected doc count %d; got: %d", 2, agg.Buckets[1].DocCount)
+ }
+}
+
+func TestAggsMetricsGeoBounds(t *testing.T) {
+ s := `{
+ "viewport": {
+ "bounds": {
+ "top_left": {
+ "lat": 80.45,
+ "lon": -160.22
+ },
+ "bottom_right": {
+ "lat": 40.65,
+ "lon": 42.57
+ }
+ }
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.GeoBounds("viewport")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Bounds.TopLeft.Latitude != float64(80.45) {
+ t.Fatalf("expected Bounds.TopLeft.Latitude != %v; got: %v", float64(80.45), agg.Bounds.TopLeft.Latitude)
+ }
+ if agg.Bounds.TopLeft.Longitude != float64(-160.22) {
+ t.Fatalf("expected Bounds.TopLeft.Longitude != %v; got: %v", float64(-160.22), agg.Bounds.TopLeft.Longitude)
+ }
+ if agg.Bounds.BottomRight.Latitude != float64(40.65) {
+ t.Fatalf("expected Bounds.BottomRight.Latitude != %v; got: %v", float64(40.65), agg.Bounds.BottomRight.Latitude)
+ }
+ if agg.Bounds.BottomRight.Longitude != float64(42.57) {
+ t.Fatalf("expected Bounds.BottomRight.Longitude != %v; got: %v", float64(42.57), agg.Bounds.BottomRight.Longitude)
+ }
+}
+
+func TestAggsBucketGeoHash(t *testing.T) {
+ s := `{
+ "myLarge-GrainGeoHashGrid": {
+ "buckets": [
+ {
+ "key": "svz",
+ "doc_count": 10964
+ },
+ {
+ "key": "sv8",
+ "doc_count": 3198
+ }
+ ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.GeoHash("myLarge-GrainGeoHashGrid")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 2 {
+ t.Errorf("expected %d bucket entries; got: %d", 2, len(agg.Buckets))
+ }
+ if agg.Buckets[0].Key != "svz" {
+ t.Errorf("expected key %q; got: %q", "svz", agg.Buckets[0].Key)
+ }
+ if agg.Buckets[0].DocCount != 10964 {
+ t.Errorf("expected doc count %d; got: %d", 10964, agg.Buckets[0].DocCount)
+ }
+ if agg.Buckets[1].Key != "sv8" {
+ t.Errorf("expected key %q; got: %q", "sv8", agg.Buckets[1].Key)
+ }
+ if agg.Buckets[1].DocCount != 3198 {
+ t.Errorf("expected doc count %d; got: %d", 3198, agg.Buckets[1].DocCount)
+ }
+}
+
+func TestAggsBucketGeoDistance(t *testing.T) {
+ s := `{
+ "rings" : {
+ "buckets": [
+ {
+ "unit": "km",
+ "to": 100.0,
+ "doc_count": 3
+ },
+ {
+ "unit": "km",
+ "from": 100.0,
+ "to": 300.0,
+ "doc_count": 1
+ },
+ {
+ "unit": "km",
+ "from": 300.0,
+ "doc_count": 7
+ }
+ ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.GeoDistance("rings")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Buckets == nil {
+ t.Fatalf("expected aggregation buckets != nil; got: %v", agg.Buckets)
+ }
+ if len(agg.Buckets) != 3 {
+ t.Errorf("expected %d bucket entries; got: %d", 3, len(agg.Buckets))
+ }
+ if agg.Buckets[0].From != nil {
+ t.Errorf("expected From = %v; got: %v", nil, agg.Buckets[0].From)
+ }
+ if agg.Buckets[0].To == nil {
+ t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[0].To)
+ }
+ if *agg.Buckets[0].To != float64(100.0) {
+ t.Errorf("expected To = %v; got: %v", float64(100.0), *agg.Buckets[0].To)
+ }
+ if agg.Buckets[0].DocCount != 3 {
+ t.Errorf("expected DocCount = %d; got: %d", 4, agg.Buckets[0].DocCount)
+ }
+
+ if agg.Buckets[1].From == nil {
+ t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[1].From)
+ }
+ if *agg.Buckets[1].From != float64(100.0) {
+ t.Errorf("expected From = %v; got: %v", float64(100.0), *agg.Buckets[1].From)
+ }
+ if agg.Buckets[1].To == nil {
+ t.Errorf("expected To != %v; got: %v", nil, agg.Buckets[1].To)
+ }
+ if *agg.Buckets[1].To != float64(300.0) {
+ t.Errorf("expected From = %v; got: %v", float64(300.0), *agg.Buckets[1].To)
+ }
+ if agg.Buckets[1].DocCount != 1 {
+ t.Errorf("expected DocCount = %d; got: %d", 1, agg.Buckets[1].DocCount)
+ }
+
+ if agg.Buckets[2].From == nil {
+ t.Errorf("expected From != %v; got: %v", nil, agg.Buckets[2].From)
+ }
+ if *agg.Buckets[2].From != float64(300.0) {
+ t.Errorf("expected From = %v; got: %v", float64(300.0), *agg.Buckets[2].From)
+ }
+ if agg.Buckets[2].To != nil {
+ t.Errorf("expected To = %v; got: %v", nil, agg.Buckets[2].To)
+ }
+ if agg.Buckets[2].DocCount != 7 {
+ t.Errorf("expected DocCount = %d; got: %d", 7, agg.Buckets[2].DocCount)
+ }
+}
+
+func TestAggsSubAggregates(t *testing.T) {
+ rs := `{
+ "users" : {
+ "doc_count_error_upper_bound" : 1,
+ "sum_other_doc_count" : 2,
+ "buckets" : [ {
+ "key" : "olivere",
+ "doc_count" : 2,
+ "ts" : {
+ "buckets" : [ {
+ "key_as_string" : "2012-01-01T00:00:00.000Z",
+ "key" : 1325376000000,
+ "doc_count" : 2
+ } ]
+ }
+ }, {
+ "key" : "sandrae",
+ "doc_count" : 1,
+ "ts" : {
+ "buckets" : [ {
+ "key_as_string" : "2011-01-01T00:00:00.000Z",
+ "key" : 1293840000000,
+ "doc_count" : 1
+ } ]
+ }
+ } ]
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(rs), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ // Access top-level aggregation
+ users, found := aggs.Terms("users")
+ if !found {
+ t.Fatalf("expected users aggregation to be found; got: %v", found)
+ }
+ if users == nil {
+ t.Fatalf("expected users aggregation; got: %v", users)
+ }
+ if users.Buckets == nil {
+ t.Fatalf("expected users buckets; got: %v", users.Buckets)
+ }
+ if len(users.Buckets) != 2 {
+ t.Errorf("expected %d bucket entries; got: %d", 2, len(users.Buckets))
+ }
+ if users.Buckets[0].Key != "olivere" {
+ t.Errorf("expected key %q; got: %q", "olivere", users.Buckets[0].Key)
+ }
+ if users.Buckets[0].DocCount != 2 {
+ t.Errorf("expected doc count %d; got: %d", 2, users.Buckets[0].DocCount)
+ }
+ if users.Buckets[1].Key != "sandrae" {
+ t.Errorf("expected key %q; got: %q", "sandrae", users.Buckets[1].Key)
+ }
+ if users.Buckets[1].DocCount != 1 {
+ t.Errorf("expected doc count %d; got: %d", 1, users.Buckets[1].DocCount)
+ }
+
+ // Access sub-aggregation
+ ts, found := users.Buckets[0].DateHistogram("ts")
+ if !found {
+ t.Fatalf("expected ts aggregation to be found; got: %v", found)
+ }
+ if ts == nil {
+ t.Fatalf("expected ts aggregation; got: %v", ts)
+ }
+ if ts.Buckets == nil {
+ t.Fatalf("expected ts buckets; got: %v", ts.Buckets)
+ }
+ if len(ts.Buckets) != 1 {
+ t.Errorf("expected %d bucket entries; got: %d", 1, len(ts.Buckets))
+ }
+ if ts.Buckets[0].Key != 1325376000000 {
+ t.Errorf("expected key %v; got: %v", 1325376000000, ts.Buckets[0].Key)
+ }
+ if ts.Buckets[0].KeyAsString == nil {
+ t.Fatalf("expected key_as_string != %v; got: %v", nil, ts.Buckets[0].KeyAsString)
+ }
+ if *ts.Buckets[0].KeyAsString != "2012-01-01T00:00:00.000Z" {
+ t.Errorf("expected key_as_string %q; got: %q", "2012-01-01T00:00:00.000Z", *ts.Buckets[0].KeyAsString)
+ }
+}
+
+func TestAggsPipelineAvgBucket(t *testing.T) {
+ s := `{
+ "avg_monthly_sales" : {
+ "value" : 328.33333333333333
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.AvgBucket("avg_monthly_sales")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Value == nil {
+ t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
+ }
+ if *agg.Value != float64(328.33333333333333) {
+ t.Fatalf("expected aggregation value = %v; got: %v", float64(328.33333333333333), *agg.Value)
+ }
+}
+
+func TestAggsPipelineSumBucket(t *testing.T) {
+ s := `{
+ "sum_monthly_sales" : {
+ "value" : 985
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.SumBucket("sum_monthly_sales")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Value == nil {
+ t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
+ }
+ if *agg.Value != float64(985) {
+ t.Fatalf("expected aggregation value = %v; got: %v", float64(985), *agg.Value)
+ }
+}
+
+func TestAggsPipelineMaxBucket(t *testing.T) {
+ s := `{
+ "max_monthly_sales" : {
+ "keys": ["2015/01/01 00:00:00"],
+ "value" : 550
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.MaxBucket("max_monthly_sales")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if len(agg.Keys) != 1 {
+ t.Fatalf("expected 1 key; got: %d", len(agg.Keys))
+ }
+ if got, want := agg.Keys[0], "2015/01/01 00:00:00"; got != want {
+ t.Fatalf("expected key %q; got: %v (%T)", want, got, got)
+ }
+ if agg.Value == nil {
+ t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
+ }
+ if *agg.Value != float64(550) {
+ t.Fatalf("expected aggregation value = %v; got: %v", float64(550), *agg.Value)
+ }
+}
+
+func TestAggsPipelineMinBucket(t *testing.T) {
+ s := `{
+ "min_monthly_sales" : {
+ "keys": ["2015/02/01 00:00:00"],
+ "value" : 60
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.MinBucket("min_monthly_sales")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if len(agg.Keys) != 1 {
+ t.Fatalf("expected 1 key; got: %d", len(agg.Keys))
+ }
+ if got, want := agg.Keys[0], "2015/02/01 00:00:00"; got != want {
+ t.Fatalf("expected key %q; got: %v (%T)", want, got, got)
+ }
+ if agg.Value == nil {
+ t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
+ }
+ if *agg.Value != float64(60) {
+ t.Fatalf("expected aggregation value = %v; got: %v", float64(60), *agg.Value)
+ }
+}
+
+func TestAggsPipelineMovAvg(t *testing.T) {
+ s := `{
+ "the_movavg" : {
+ "value" : 12.0
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.MovAvg("the_movavg")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Value == nil {
+ t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
+ }
+ if *agg.Value != float64(12.0) {
+ t.Fatalf("expected aggregation value = %v; got: %v", float64(12.0), *agg.Value)
+ }
+}
+
+func TestAggsPipelineDerivative(t *testing.T) {
+ s := `{
+ "sales_deriv" : {
+ "value" : 315
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.Derivative("sales_deriv")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Value == nil {
+ t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
+ }
+ if *agg.Value != float64(315) {
+ t.Fatalf("expected aggregation value = %v; got: %v", float64(315), *agg.Value)
+ }
+}
+
+func TestAggsPipelinePercentilesBucket(t *testing.T) {
+ s := `{
+ "sales_percentiles": {
+ "values": {
+ "25.0": 100,
+ "50.0": 200,
+ "75.0": 300
+ }
+ }
+}`
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.PercentilesBucket("sales_percentiles")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if len(agg.Values) != 3 {
+ t.Fatalf("expected aggregation map with three entries; got: %v", agg.Values)
+ }
+}
+
+func TestAggsPipelineStatsBucket(t *testing.T) {
+ s := `{
+ "stats_monthly_sales": {
+ "count": 3,
+ "min": 60.0,
+ "max": 550.0,
+ "avg": 328.3333333333333,
+ "sum": 985.0
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.StatsBucket("stats_monthly_sales")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Count != 3 {
+ t.Fatalf("expected aggregation count = %v; got: %v", 3, agg.Count)
+ }
+ if agg.Min == nil {
+ t.Fatalf("expected aggregation min != nil; got: %v", agg.Min)
+ }
+ if *agg.Min != float64(60.0) {
+ t.Fatalf("expected aggregation min = %v; got: %v", float64(60.0), *agg.Min)
+ }
+ if agg.Max == nil {
+ t.Fatalf("expected aggregation max != nil; got: %v", agg.Max)
+ }
+ if *agg.Max != float64(550.0) {
+ t.Fatalf("expected aggregation max = %v; got: %v", float64(550.0), *agg.Max)
+ }
+ if agg.Avg == nil {
+ t.Fatalf("expected aggregation avg != nil; got: %v", agg.Avg)
+ }
+ if *agg.Avg != float64(328.3333333333333) {
+ t.Fatalf("expected aggregation average = %v; got: %v", float64(328.3333333333333), *agg.Avg)
+ }
+ if agg.Sum == nil {
+ t.Fatalf("expected aggregation sum != nil; got: %v", agg.Sum)
+ }
+ if *agg.Sum != float64(985.0) {
+ t.Fatalf("expected aggregation sum = %v; got: %v", float64(985.0), *agg.Sum)
+ }
+}
+
+func TestAggsPipelineCumulativeSum(t *testing.T) {
+ s := `{
+ "cumulative_sales" : {
+ "value" : 550
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.CumulativeSum("cumulative_sales")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Value == nil {
+ t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
+ }
+ if *agg.Value != float64(550) {
+ t.Fatalf("expected aggregation value = %v; got: %v", float64(550), *agg.Value)
+ }
+}
+
+func TestAggsPipelineBucketScript(t *testing.T) {
+ s := `{
+ "t-shirt-percentage" : {
+ "value" : 20
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.BucketScript("t-shirt-percentage")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Value == nil {
+ t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
+ }
+ if *agg.Value != float64(20) {
+ t.Fatalf("expected aggregation value = %v; got: %v", float64(20), *agg.Value)
+ }
+}
+
+func TestAggsPipelineSerialDiff(t *testing.T) {
+ s := `{
+ "the_diff" : {
+ "value" : -722.0
+ }
+}`
+
+ aggs := new(Aggregations)
+ err := json.Unmarshal([]byte(s), &aggs)
+ if err != nil {
+ t.Fatalf("expected no error decoding; got: %v", err)
+ }
+
+ agg, found := aggs.SerialDiff("the_diff")
+ if !found {
+ t.Fatalf("expected aggregation to be found; got: %v", found)
+ }
+ if agg == nil {
+ t.Fatalf("expected aggregation != nil; got: %v", agg)
+ }
+ if agg.Value == nil {
+ t.Fatalf("expected aggregation value != nil; got: %v", agg.Value)
+ }
+ if *agg.Value != float64(-722.0) {
+ t.Fatalf("expected aggregation value = %v; got: %v", float64(20), *agg.Value)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_collapse_builder.go b/vendor/github.com/olivere/elastic/search_collapse_builder.go
new file mode 100644
index 000000000..b3c628ba3
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_collapse_builder.go
@@ -0,0 +1,68 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// CollapseBuilder enables field collapsing on a search request.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-collapse.html
+// for details.
+type CollapseBuilder struct {
+ field string
+ innerHit *InnerHit
+ maxConcurrentGroupRequests *int
+}
+
+// NewCollapseBuilder creates a new CollapseBuilder.
+func NewCollapseBuilder(field string) *CollapseBuilder {
+ return &CollapseBuilder{field: field}
+}
+
+// Field to collapse.
+func (b *CollapseBuilder) Field(field string) *CollapseBuilder {
+ b.field = field
+ return b
+}
+
+// InnerHit option to expand the collapsed results.
+func (b *CollapseBuilder) InnerHit(innerHit *InnerHit) *CollapseBuilder {
+ b.innerHit = innerHit
+ return b
+}
+
+// MaxConcurrentGroupRequests is the maximum number of group requests that are
+// allowed to be ran concurrently in the inner_hits phase.
+func (b *CollapseBuilder) MaxConcurrentGroupRequests(max int) *CollapseBuilder {
+ b.maxConcurrentGroupRequests = &max
+ return b
+}
+
+// Source generates the JSON serializable fragment for the CollapseBuilder.
+func (b *CollapseBuilder) Source() (interface{}, error) {
+ // {
+ // "field": "user",
+ // "inner_hits": {
+ // "name": "last_tweets",
+ // "size": 5,
+ // "sort": [{ "date": "asc" }]
+ // },
+ // "max_concurrent_group_searches": 4
+ // }
+ src := map[string]interface{}{
+ "field": b.field,
+ }
+
+ if b.innerHit != nil {
+ hits, err := b.innerHit.Source()
+ if err != nil {
+ return nil, err
+ }
+ src["inner_hits"] = hits
+ }
+
+ if b.maxConcurrentGroupRequests != nil {
+ src["max_concurrent_group_searches"] = *b.maxConcurrentGroupRequests
+ }
+
+ return src, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_collapse_builder_test.go b/vendor/github.com/olivere/elastic/search_collapse_builder_test.go
new file mode 100644
index 000000000..0b74fadab
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_collapse_builder_test.go
@@ -0,0 +1,29 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestCollapseBuilderSource(t *testing.T) {
+ b := NewCollapseBuilder("user").
+ InnerHit(NewInnerHit().Name("last_tweets").Size(5).Sort("date", true)).
+ MaxConcurrentGroupRequests(4)
+ src, err := b.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"field":"user","inner_hits":{"name":"last_tweets","size":5,"sort":[{"date":{"order":"asc"}}]},"max_concurrent_group_searches":4}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_bool.go b/vendor/github.com/olivere/elastic/search_queries_bool.go
new file mode 100644
index 000000000..a1ff17596
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_bool.go
@@ -0,0 +1,203 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "fmt"
+
+// A bool query matches documents matching boolean
+// combinations of other queries.
+// For more details, see:
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-bool-query.html
+type BoolQuery struct {
+ Query
+ mustClauses []Query
+ mustNotClauses []Query
+ filterClauses []Query
+ shouldClauses []Query
+ boost *float64
+ minimumShouldMatch string
+ adjustPureNegative *bool
+ queryName string
+}
+
+// Creates a new bool query.
+func NewBoolQuery() *BoolQuery {
+ return &BoolQuery{
+ mustClauses: make([]Query, 0),
+ mustNotClauses: make([]Query, 0),
+ filterClauses: make([]Query, 0),
+ shouldClauses: make([]Query, 0),
+ }
+}
+
+func (q *BoolQuery) Must(queries ...Query) *BoolQuery {
+ q.mustClauses = append(q.mustClauses, queries...)
+ return q
+}
+
+func (q *BoolQuery) MustNot(queries ...Query) *BoolQuery {
+ q.mustNotClauses = append(q.mustNotClauses, queries...)
+ return q
+}
+
+func (q *BoolQuery) Filter(filters ...Query) *BoolQuery {
+ q.filterClauses = append(q.filterClauses, filters...)
+ return q
+}
+
+func (q *BoolQuery) Should(queries ...Query) *BoolQuery {
+ q.shouldClauses = append(q.shouldClauses, queries...)
+ return q
+}
+
+func (q *BoolQuery) Boost(boost float64) *BoolQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q *BoolQuery) MinimumShouldMatch(minimumShouldMatch string) *BoolQuery {
+ q.minimumShouldMatch = minimumShouldMatch
+ return q
+}
+
+func (q *BoolQuery) MinimumNumberShouldMatch(minimumNumberShouldMatch int) *BoolQuery {
+ q.minimumShouldMatch = fmt.Sprintf("%d", minimumNumberShouldMatch)
+ return q
+}
+
+func (q *BoolQuery) AdjustPureNegative(adjustPureNegative bool) *BoolQuery {
+ q.adjustPureNegative = &adjustPureNegative
+ return q
+}
+
+func (q *BoolQuery) QueryName(queryName string) *BoolQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Creates the query source for the bool query.
+func (q *BoolQuery) Source() (interface{}, error) {
+ // {
+ // "bool" : {
+ // "must" : {
+ // "term" : { "user" : "kimchy" }
+ // },
+ // "must_not" : {
+ // "range" : {
+ // "age" : { "from" : 10, "to" : 20 }
+ // }
+ // },
+ // "filter" : [
+ // ...
+ // ]
+ // "should" : [
+ // {
+ // "term" : { "tag" : "wow" }
+ // },
+ // {
+ // "term" : { "tag" : "elasticsearch" }
+ // }
+ // ],
+ // "minimum_should_match" : 1,
+ // "boost" : 1.0
+ // }
+ // }
+
+ query := make(map[string]interface{})
+
+ boolClause := make(map[string]interface{})
+ query["bool"] = boolClause
+
+ // must
+ if len(q.mustClauses) == 1 {
+ src, err := q.mustClauses[0].Source()
+ if err != nil {
+ return nil, err
+ }
+ boolClause["must"] = src
+ } else if len(q.mustClauses) > 1 {
+ var clauses []interface{}
+ for _, subQuery := range q.mustClauses {
+ src, err := subQuery.Source()
+ if err != nil {
+ return nil, err
+ }
+ clauses = append(clauses, src)
+ }
+ boolClause["must"] = clauses
+ }
+
+ // must_not
+ if len(q.mustNotClauses) == 1 {
+ src, err := q.mustNotClauses[0].Source()
+ if err != nil {
+ return nil, err
+ }
+ boolClause["must_not"] = src
+ } else if len(q.mustNotClauses) > 1 {
+ var clauses []interface{}
+ for _, subQuery := range q.mustNotClauses {
+ src, err := subQuery.Source()
+ if err != nil {
+ return nil, err
+ }
+ clauses = append(clauses, src)
+ }
+ boolClause["must_not"] = clauses
+ }
+
+ // filter
+ if len(q.filterClauses) == 1 {
+ src, err := q.filterClauses[0].Source()
+ if err != nil {
+ return nil, err
+ }
+ boolClause["filter"] = src
+ } else if len(q.filterClauses) > 1 {
+ var clauses []interface{}
+ for _, subQuery := range q.filterClauses {
+ src, err := subQuery.Source()
+ if err != nil {
+ return nil, err
+ }
+ clauses = append(clauses, src)
+ }
+ boolClause["filter"] = clauses
+ }
+
+ // should
+ if len(q.shouldClauses) == 1 {
+ src, err := q.shouldClauses[0].Source()
+ if err != nil {
+ return nil, err
+ }
+ boolClause["should"] = src
+ } else if len(q.shouldClauses) > 1 {
+ var clauses []interface{}
+ for _, subQuery := range q.shouldClauses {
+ src, err := subQuery.Source()
+ if err != nil {
+ return nil, err
+ }
+ clauses = append(clauses, src)
+ }
+ boolClause["should"] = clauses
+ }
+
+ if q.boost != nil {
+ boolClause["boost"] = *q.boost
+ }
+ if q.minimumShouldMatch != "" {
+ boolClause["minimum_should_match"] = q.minimumShouldMatch
+ }
+ if q.adjustPureNegative != nil {
+ boolClause["adjust_pure_negative"] = *q.adjustPureNegative
+ }
+ if q.queryName != "" {
+ boolClause["_name"] = q.queryName
+ }
+
+ return query, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_bool_test.go b/vendor/github.com/olivere/elastic/search_queries_bool_test.go
new file mode 100644
index 000000000..cdcc38de1
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_bool_test.go
@@ -0,0 +1,33 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestBoolQuery(t *testing.T) {
+ q := NewBoolQuery()
+ q = q.Must(NewTermQuery("tag", "wow"))
+ q = q.MustNot(NewRangeQuery("age").From(10).To(20))
+ q = q.Filter(NewTermQuery("account", "1"))
+ q = q.Should(NewTermQuery("tag", "sometag"), NewTermQuery("tag", "sometagtag"))
+ q = q.Boost(10)
+ q = q.QueryName("Test")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"bool":{"_name":"Test","boost":10,"filter":{"term":{"account":"1"}},"must":{"term":{"tag":"wow"}},"must_not":{"range":{"age":{"from":10,"include_lower":true,"include_upper":true,"to":20}}},"should":[{"term":{"tag":"sometag"}},{"term":{"tag":"sometagtag"}}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_boosting.go b/vendor/github.com/olivere/elastic/search_queries_boosting.go
new file mode 100644
index 000000000..0060a30a8
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_boosting.go
@@ -0,0 +1,97 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A boosting query can be used to effectively
+// demote results that match a given query.
+// For more details, see:
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-boosting-query.html
+type BoostingQuery struct {
+ Query
+ positiveClause Query
+ negativeClause Query
+ negativeBoost *float64
+ boost *float64
+}
+
+// Creates a new boosting query.
+func NewBoostingQuery() *BoostingQuery {
+ return &BoostingQuery{}
+}
+
+func (q *BoostingQuery) Positive(positive Query) *BoostingQuery {
+ q.positiveClause = positive
+ return q
+}
+
+func (q *BoostingQuery) Negative(negative Query) *BoostingQuery {
+ q.negativeClause = negative
+ return q
+}
+
+func (q *BoostingQuery) NegativeBoost(negativeBoost float64) *BoostingQuery {
+ q.negativeBoost = &negativeBoost
+ return q
+}
+
+func (q *BoostingQuery) Boost(boost float64) *BoostingQuery {
+ q.boost = &boost
+ return q
+}
+
+// Creates the query source for the boosting query.
+func (q *BoostingQuery) Source() (interface{}, error) {
+ // {
+ // "boosting" : {
+ // "positive" : {
+ // "term" : {
+ // "field1" : "value1"
+ // }
+ // },
+ // "negative" : {
+ // "term" : {
+ // "field2" : "value2"
+ // }
+ // },
+ // "negative_boost" : 0.2
+ // }
+ // }
+
+ query := make(map[string]interface{})
+
+ boostingClause := make(map[string]interface{})
+ query["boosting"] = boostingClause
+
+ // Negative and positive clause as well as negative boost
+ // are mandatory in the Java client.
+
+ // positive
+ if q.positiveClause != nil {
+ src, err := q.positiveClause.Source()
+ if err != nil {
+ return nil, err
+ }
+ boostingClause["positive"] = src
+ }
+
+ // negative
+ if q.negativeClause != nil {
+ src, err := q.negativeClause.Source()
+ if err != nil {
+ return nil, err
+ }
+ boostingClause["negative"] = src
+ }
+
+ if q.negativeBoost != nil {
+ boostingClause["negative_boost"] = *q.negativeBoost
+ }
+
+ if q.boost != nil {
+ boostingClause["boost"] = *q.boost
+ }
+
+ return query, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_boosting_test.go b/vendor/github.com/olivere/elastic/search_queries_boosting_test.go
new file mode 100644
index 000000000..6c7f263f4
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_boosting_test.go
@@ -0,0 +1,30 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestBoostingQuery(t *testing.T) {
+ q := NewBoostingQuery()
+ q = q.Positive(NewTermQuery("tag", "wow"))
+ q = q.Negative(NewRangeQuery("age").From(10).To(20))
+ q = q.NegativeBoost(0.2)
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"boosting":{"negative":{"range":{"age":{"from":10,"include_lower":true,"include_upper":true,"to":20}}},"negative_boost":0.2,"positive":{"term":{"tag":"wow"}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_common_terms.go b/vendor/github.com/olivere/elastic/search_queries_common_terms.go
new file mode 100644
index 000000000..93a03de54
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_common_terms.go
@@ -0,0 +1,137 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// CommonTermsQuery is a modern alternative to stopwords
+// which improves the precision and recall of search results
+// (by taking stopwords into account), without sacrificing performance.
+// For more details, see:
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-common-terms-query.html
+type CommonTermsQuery struct {
+ Query
+ name string
+ text interface{}
+ cutoffFreq *float64
+ highFreq *float64
+ highFreqOp string
+ highFreqMinimumShouldMatch string
+ lowFreq *float64
+ lowFreqOp string
+ lowFreqMinimumShouldMatch string
+ analyzer string
+ boost *float64
+ queryName string
+}
+
+// NewCommonTermsQuery creates and initializes a new common terms query.
+func NewCommonTermsQuery(name string, text interface{}) *CommonTermsQuery {
+ return &CommonTermsQuery{name: name, text: text}
+}
+
+func (q *CommonTermsQuery) CutoffFrequency(f float64) *CommonTermsQuery {
+ q.cutoffFreq = &f
+ return q
+}
+
+func (q *CommonTermsQuery) HighFreq(f float64) *CommonTermsQuery {
+ q.highFreq = &f
+ return q
+}
+
+func (q *CommonTermsQuery) HighFreqOperator(op string) *CommonTermsQuery {
+ q.highFreqOp = op
+ return q
+}
+
+func (q *CommonTermsQuery) HighFreqMinimumShouldMatch(minShouldMatch string) *CommonTermsQuery {
+ q.highFreqMinimumShouldMatch = minShouldMatch
+ return q
+}
+
+func (q *CommonTermsQuery) LowFreq(f float64) *CommonTermsQuery {
+ q.lowFreq = &f
+ return q
+}
+
+func (q *CommonTermsQuery) LowFreqOperator(op string) *CommonTermsQuery {
+ q.lowFreqOp = op
+ return q
+}
+
+func (q *CommonTermsQuery) LowFreqMinimumShouldMatch(minShouldMatch string) *CommonTermsQuery {
+ q.lowFreqMinimumShouldMatch = minShouldMatch
+ return q
+}
+
+func (q *CommonTermsQuery) Analyzer(analyzer string) *CommonTermsQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q *CommonTermsQuery) Boost(boost float64) *CommonTermsQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q *CommonTermsQuery) QueryName(queryName string) *CommonTermsQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Creates the query source for the common query.
+func (q *CommonTermsQuery) Source() (interface{}, error) {
+ // {
+ // "common": {
+ // "body": {
+ // "query": "this is bonsai cool",
+ // "cutoff_frequency": 0.001
+ // }
+ // }
+ // }
+ source := make(map[string]interface{})
+ body := make(map[string]interface{})
+ query := make(map[string]interface{})
+
+ source["common"] = body
+ body[q.name] = query
+ query["query"] = q.text
+
+ if q.cutoffFreq != nil {
+ query["cutoff_frequency"] = *q.cutoffFreq
+ }
+ if q.highFreq != nil {
+ query["high_freq"] = *q.highFreq
+ }
+ if q.highFreqOp != "" {
+ query["high_freq_operator"] = q.highFreqOp
+ }
+ if q.lowFreq != nil {
+ query["low_freq"] = *q.lowFreq
+ }
+ if q.lowFreqOp != "" {
+ query["low_freq_operator"] = q.lowFreqOp
+ }
+ if q.lowFreqMinimumShouldMatch != "" || q.highFreqMinimumShouldMatch != "" {
+ mm := make(map[string]interface{})
+ if q.lowFreqMinimumShouldMatch != "" {
+ mm["low_freq"] = q.lowFreqMinimumShouldMatch
+ }
+ if q.highFreqMinimumShouldMatch != "" {
+ mm["high_freq"] = q.highFreqMinimumShouldMatch
+ }
+ query["minimum_should_match"] = mm
+ }
+ if q.analyzer != "" {
+ query["analyzer"] = q.analyzer
+ }
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+ if q.queryName != "" {
+ query["_name"] = q.queryName
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_common_terms_test.go b/vendor/github.com/olivere/elastic/search_queries_common_terms_test.go
new file mode 100644
index 000000000..e841e7731
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_common_terms_test.go
@@ -0,0 +1,85 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ _ "net/http"
+ "testing"
+)
+
+func TestCommonTermsQuery(t *testing.T) {
+ q := NewCommonTermsQuery("message", "Golang").CutoffFrequency(0.001)
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"common":{"message":{"cutoff_frequency":0.001,"query":"Golang"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchQueriesCommonTermsQuery(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Common terms query
+ q := NewCommonTermsQuery("message", "Golang")
+ searchResult, err := client.Search().Index(testIndexName).Query(q).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 1 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 1 {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits))
+ }
+
+ for _, hit := range searchResult.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_constant_score.go b/vendor/github.com/olivere/elastic/search_queries_constant_score.go
new file mode 100644
index 000000000..285d91817
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_constant_score.go
@@ -0,0 +1,59 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// ConstantScoreQuery is a query that wraps a filter and simply returns
+// a constant score equal to the query boost for every document in the filter.
+//
+// For more details, see:
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-constant-score-query.html
+type ConstantScoreQuery struct {
+ filter Query
+ boost *float64
+}
+
+// ConstantScoreQuery creates and initializes a new constant score query.
+func NewConstantScoreQuery(filter Query) *ConstantScoreQuery {
+ return &ConstantScoreQuery{
+ filter: filter,
+ }
+}
+
+// Boost sets the boost for this query. Documents matching this query
+// will (in addition to the normal weightings) have their score multiplied
+// by the boost provided.
+func (q *ConstantScoreQuery) Boost(boost float64) *ConstantScoreQuery {
+ q.boost = &boost
+ return q
+}
+
+// Source returns the query source.
+func (q *ConstantScoreQuery) Source() (interface{}, error) {
+ // "constant_score" : {
+ // "filter" : {
+ // ....
+ // },
+ // "boost" : 1.5
+ // }
+
+ query := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+ query["constant_score"] = params
+
+ // filter
+ src, err := q.filter.Source()
+ if err != nil {
+ return nil, err
+ }
+ params["filter"] = src
+
+ // boost
+ if q.boost != nil {
+ params["boost"] = *q.boost
+ }
+
+ return query, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_constant_score_test.go b/vendor/github.com/olivere/elastic/search_queries_constant_score_test.go
new file mode 100644
index 000000000..6508a91fb
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_constant_score_test.go
@@ -0,0 +1,27 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestConstantScoreQuery(t *testing.T) {
+ q := NewConstantScoreQuery(NewTermQuery("user", "kimchy")).Boost(1.2)
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"constant_score":{"boost":1.2,"filter":{"term":{"user":"kimchy"}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_dis_max.go b/vendor/github.com/olivere/elastic/search_queries_dis_max.go
new file mode 100644
index 000000000..7a4f53a97
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_dis_max.go
@@ -0,0 +1,104 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// DisMaxQuery is a query that generates the union of documents produced by
+// its subqueries, and that scores each document with the maximum score
+// for that document as produced by any subquery, plus a tie breaking
+// increment for any additional matching subqueries.
+//
+// For more details, see:
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-dis-max-query.html
+type DisMaxQuery struct {
+ queries []Query
+ boost *float64
+ tieBreaker *float64
+ queryName string
+}
+
+// NewDisMaxQuery creates and initializes a new dis max query.
+func NewDisMaxQuery() *DisMaxQuery {
+ return &DisMaxQuery{
+ queries: make([]Query, 0),
+ }
+}
+
+// Query adds one or more queries to the dis max query.
+func (q *DisMaxQuery) Query(queries ...Query) *DisMaxQuery {
+ q.queries = append(q.queries, queries...)
+ return q
+}
+
+// Boost sets the boost for this query. Documents matching this query will
+// (in addition to the normal weightings) have their score multiplied by
+// the boost provided.
+func (q *DisMaxQuery) Boost(boost float64) *DisMaxQuery {
+ q.boost = &boost
+ return q
+}
+
+// TieBreaker is the factor by which the score of each non-maximum disjunct
+// for a document is multiplied with and added into the final score.
+//
+// If non-zero, the value should be small, on the order of 0.1, which says
+// that 10 occurrences of word in a lower-scored field that is also in a
+// higher scored field is just as good as a unique word in the lower scored
+// field (i.e., one that is not in any higher scored field).
+func (q *DisMaxQuery) TieBreaker(tieBreaker float64) *DisMaxQuery {
+ q.tieBreaker = &tieBreaker
+ return q
+}
+
+// QueryName sets the query name for the filter that can be used
+// when searching for matched filters per hit.
+func (q *DisMaxQuery) QueryName(queryName string) *DisMaxQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Source returns the JSON serializable content for this query.
+func (q *DisMaxQuery) Source() (interface{}, error) {
+ // {
+ // "dis_max" : {
+ // "tie_breaker" : 0.7,
+ // "boost" : 1.2,
+ // "queries" : {
+ // {
+ // "term" : { "age" : 34 }
+ // },
+ // {
+ // "term" : { "age" : 35 }
+ // }
+ // ]
+ // }
+ // }
+
+ query := make(map[string]interface{})
+ params := make(map[string]interface{})
+ query["dis_max"] = params
+
+ if q.tieBreaker != nil {
+ params["tie_breaker"] = *q.tieBreaker
+ }
+ if q.boost != nil {
+ params["boost"] = *q.boost
+ }
+ if q.queryName != "" {
+ params["_name"] = q.queryName
+ }
+
+ // queries
+ var clauses []interface{}
+ for _, subQuery := range q.queries {
+ src, err := subQuery.Source()
+ if err != nil {
+ return nil, err
+ }
+ clauses = append(clauses, src)
+ }
+ params["queries"] = clauses
+
+ return query, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_dis_max_test.go b/vendor/github.com/olivere/elastic/search_queries_dis_max_test.go
new file mode 100644
index 000000000..76ddfb079
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_dis_max_test.go
@@ -0,0 +1,28 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestDisMaxQuery(t *testing.T) {
+ q := NewDisMaxQuery()
+ q = q.Query(NewTermQuery("age", 34), NewTermQuery("age", 35)).Boost(1.2).TieBreaker(0.7)
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"dis_max":{"boost":1.2,"queries":[{"term":{"age":34}},{"term":{"age":35}}],"tie_breaker":0.7}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_exists.go b/vendor/github.com/olivere/elastic/search_queries_exists.go
new file mode 100644
index 000000000..ac7378bad
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_exists.go
@@ -0,0 +1,49 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// ExistsQuery is a query that only matches on documents that the field
+// has a value in them.
+//
+// For more details, see:
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-exists-query.html
+type ExistsQuery struct {
+ name string
+ queryName string
+}
+
+// NewExistsQuery creates and initializes a new dis max query.
+func NewExistsQuery(name string) *ExistsQuery {
+ return &ExistsQuery{
+ name: name,
+ }
+}
+
+// QueryName sets the query name for the filter that can be used
+// when searching for matched queries per hit.
+func (q *ExistsQuery) QueryName(queryName string) *ExistsQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Source returns the JSON serializable content for this query.
+func (q *ExistsQuery) Source() (interface{}, error) {
+ // {
+ // "exists" : {
+ // "field" : "user"
+ // }
+ // }
+
+ query := make(map[string]interface{})
+ params := make(map[string]interface{})
+ query["exists"] = params
+
+ params["field"] = q.name
+ if q.queryName != "" {
+ params["_name"] = q.queryName
+ }
+
+ return query, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_exists_test.go b/vendor/github.com/olivere/elastic/search_queries_exists_test.go
new file mode 100644
index 000000000..f2d047087
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_exists_test.go
@@ -0,0 +1,27 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestExistsQuery(t *testing.T) {
+ q := NewExistsQuery("user")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"exists":{"field":"user"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_fsq.go b/vendor/github.com/olivere/elastic/search_queries_fsq.go
new file mode 100644
index 000000000..4cabd9bd9
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_fsq.go
@@ -0,0 +1,171 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// FunctionScoreQuery allows you to modify the score of documents that
+// are retrieved by a query. This can be useful if, for example,
+// a score function is computationally expensive and it is sufficient
+// to compute the score on a filtered set of documents.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html
+type FunctionScoreQuery struct {
+ query Query
+ filter Query
+ boost *float64
+ maxBoost *float64
+ scoreMode string
+ boostMode string
+ filters []Query
+ scoreFuncs []ScoreFunction
+ minScore *float64
+ weight *float64
+}
+
+// NewFunctionScoreQuery creates and initializes a new function score query.
+func NewFunctionScoreQuery() *FunctionScoreQuery {
+ return &FunctionScoreQuery{
+ filters: make([]Query, 0),
+ scoreFuncs: make([]ScoreFunction, 0),
+ }
+}
+
+// Query sets the query for the function score query.
+func (q *FunctionScoreQuery) Query(query Query) *FunctionScoreQuery {
+ q.query = query
+ return q
+}
+
+// Filter sets the filter for the function score query.
+func (q *FunctionScoreQuery) Filter(filter Query) *FunctionScoreQuery {
+ q.filter = filter
+ return q
+}
+
+// Add adds a score function that will execute on all the documents
+// matching the filter.
+func (q *FunctionScoreQuery) Add(filter Query, scoreFunc ScoreFunction) *FunctionScoreQuery {
+ q.filters = append(q.filters, filter)
+ q.scoreFuncs = append(q.scoreFuncs, scoreFunc)
+ return q
+}
+
+// AddScoreFunc adds a score function that will execute the function on all documents.
+func (q *FunctionScoreQuery) AddScoreFunc(scoreFunc ScoreFunction) *FunctionScoreQuery {
+ q.filters = append(q.filters, nil)
+ q.scoreFuncs = append(q.scoreFuncs, scoreFunc)
+ return q
+}
+
+// ScoreMode defines how results of individual score functions will be aggregated.
+// Can be first, avg, max, sum, min, or multiply.
+func (q *FunctionScoreQuery) ScoreMode(scoreMode string) *FunctionScoreQuery {
+ q.scoreMode = scoreMode
+ return q
+}
+
+// BoostMode defines how the combined result of score functions will
+// influence the final score together with the sub query score.
+func (q *FunctionScoreQuery) BoostMode(boostMode string) *FunctionScoreQuery {
+ q.boostMode = boostMode
+ return q
+}
+
+// MaxBoost is the maximum boost that will be applied by function score.
+func (q *FunctionScoreQuery) MaxBoost(maxBoost float64) *FunctionScoreQuery {
+ q.maxBoost = &maxBoost
+ return q
+}
+
+// Boost sets the boost for this query. Documents matching this query will
+// (in addition to the normal weightings) have their score multiplied by the
+// boost provided.
+func (q *FunctionScoreQuery) Boost(boost float64) *FunctionScoreQuery {
+ q.boost = &boost
+ return q
+}
+
+// MinScore sets the minimum score.
+func (q *FunctionScoreQuery) MinScore(minScore float64) *FunctionScoreQuery {
+ q.minScore = &minScore
+ return q
+}
+
+// Source returns JSON for the function score query.
+func (q *FunctionScoreQuery) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ query := make(map[string]interface{})
+ source["function_score"] = query
+
+ if q.query != nil {
+ src, err := q.query.Source()
+ if err != nil {
+ return nil, err
+ }
+ query["query"] = src
+ }
+ if q.filter != nil {
+ src, err := q.filter.Source()
+ if err != nil {
+ return nil, err
+ }
+ query["filter"] = src
+ }
+
+ if len(q.filters) == 1 && q.filters[0] == nil {
+ // Weight needs to be serialized on this level.
+ if weight := q.scoreFuncs[0].GetWeight(); weight != nil {
+ query["weight"] = weight
+ }
+ // Serialize the score function
+ src, err := q.scoreFuncs[0].Source()
+ if err != nil {
+ return nil, err
+ }
+ query[q.scoreFuncs[0].Name()] = src
+ } else {
+ funcs := make([]interface{}, len(q.filters))
+ for i, filter := range q.filters {
+ hsh := make(map[string]interface{})
+ if filter != nil {
+ src, err := filter.Source()
+ if err != nil {
+ return nil, err
+ }
+ hsh["filter"] = src
+ }
+ // Weight needs to be serialized on this level.
+ if weight := q.scoreFuncs[i].GetWeight(); weight != nil {
+ hsh["weight"] = weight
+ }
+ // Serialize the score function
+ src, err := q.scoreFuncs[i].Source()
+ if err != nil {
+ return nil, err
+ }
+ hsh[q.scoreFuncs[i].Name()] = src
+ funcs[i] = hsh
+ }
+ query["functions"] = funcs
+ }
+
+ if q.scoreMode != "" {
+ query["score_mode"] = q.scoreMode
+ }
+ if q.boostMode != "" {
+ query["boost_mode"] = q.boostMode
+ }
+ if q.maxBoost != nil {
+ query["max_boost"] = *q.maxBoost
+ }
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+ if q.minScore != nil {
+ query["min_score"] = *q.minScore
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_fsq_score_funcs.go b/vendor/github.com/olivere/elastic/search_queries_fsq_score_funcs.go
new file mode 100644
index 000000000..84cc52de9
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_fsq_score_funcs.go
@@ -0,0 +1,567 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "strings"
+)
+
+// ScoreFunction is used in combination with the Function Score Query.
+type ScoreFunction interface {
+ Name() string
+ GetWeight() *float64 // returns the weight which must be serialized at the level of FunctionScoreQuery
+ Source() (interface{}, error)
+}
+
+// -- Exponential Decay --
+
+// ExponentialDecayFunction builds an exponential decay score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html
+// for details.
+type ExponentialDecayFunction struct {
+ fieldName string
+ origin interface{}
+ scale interface{}
+ decay *float64
+ offset interface{}
+ multiValueMode string
+ weight *float64
+}
+
+// NewExponentialDecayFunction creates a new ExponentialDecayFunction.
+func NewExponentialDecayFunction() *ExponentialDecayFunction {
+ return &ExponentialDecayFunction{}
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn *ExponentialDecayFunction) Name() string {
+ return "exp"
+}
+
+// FieldName specifies the name of the field to which this decay function is applied to.
+func (fn *ExponentialDecayFunction) FieldName(fieldName string) *ExponentialDecayFunction {
+ fn.fieldName = fieldName
+ return fn
+}
+
+// Origin defines the "central point" by which the decay function calculates
+// "distance".
+func (fn *ExponentialDecayFunction) Origin(origin interface{}) *ExponentialDecayFunction {
+ fn.origin = origin
+ return fn
+}
+
+// Scale defines the scale to be used with Decay.
+func (fn *ExponentialDecayFunction) Scale(scale interface{}) *ExponentialDecayFunction {
+ fn.scale = scale
+ return fn
+}
+
+// Decay defines how documents are scored at the distance given a Scale.
+// If no decay is defined, documents at the distance Scale will be scored 0.5.
+func (fn *ExponentialDecayFunction) Decay(decay float64) *ExponentialDecayFunction {
+ fn.decay = &decay
+ return fn
+}
+
+// Offset, if defined, computes the decay function only for a distance
+// greater than the defined offset.
+func (fn *ExponentialDecayFunction) Offset(offset interface{}) *ExponentialDecayFunction {
+ fn.offset = offset
+ return fn
+}
+
+// Weight adjusts the score of the score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_using_function_score
+// for details.
+func (fn *ExponentialDecayFunction) Weight(weight float64) *ExponentialDecayFunction {
+ fn.weight = &weight
+ return fn
+}
+
+// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
+// Returns nil if weight is not specified.
+func (fn *ExponentialDecayFunction) GetWeight() *float64 {
+ return fn.weight
+}
+
+// MultiValueMode specifies how the decay function should be calculated
+// on a field that has multiple values.
+// Valid modes are: min, max, avg, and sum.
+func (fn *ExponentialDecayFunction) MultiValueMode(mode string) *ExponentialDecayFunction {
+ fn.multiValueMode = mode
+ return fn
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn *ExponentialDecayFunction) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source[fn.fieldName] = params
+ if fn.origin != nil {
+ params["origin"] = fn.origin
+ }
+ params["scale"] = fn.scale
+ if fn.decay != nil && *fn.decay > 0 {
+ params["decay"] = *fn.decay
+ }
+ if fn.offset != nil {
+ params["offset"] = fn.offset
+ }
+ if fn.multiValueMode != "" {
+ source["multi_value_mode"] = fn.multiValueMode
+ }
+ return source, nil
+}
+
+// -- Gauss Decay --
+
+// GaussDecayFunction builds a gauss decay score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html
+// for details.
+type GaussDecayFunction struct {
+ fieldName string
+ origin interface{}
+ scale interface{}
+ decay *float64
+ offset interface{}
+ multiValueMode string
+ weight *float64
+}
+
+// NewGaussDecayFunction returns a new GaussDecayFunction.
+func NewGaussDecayFunction() *GaussDecayFunction {
+ return &GaussDecayFunction{}
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn *GaussDecayFunction) Name() string {
+ return "gauss"
+}
+
+// FieldName specifies the name of the field to which this decay function is applied to.
+func (fn *GaussDecayFunction) FieldName(fieldName string) *GaussDecayFunction {
+ fn.fieldName = fieldName
+ return fn
+}
+
+// Origin defines the "central point" by which the decay function calculates
+// "distance".
+func (fn *GaussDecayFunction) Origin(origin interface{}) *GaussDecayFunction {
+ fn.origin = origin
+ return fn
+}
+
+// Scale defines the scale to be used with Decay.
+func (fn *GaussDecayFunction) Scale(scale interface{}) *GaussDecayFunction {
+ fn.scale = scale
+ return fn
+}
+
+// Decay defines how documents are scored at the distance given a Scale.
+// If no decay is defined, documents at the distance Scale will be scored 0.5.
+func (fn *GaussDecayFunction) Decay(decay float64) *GaussDecayFunction {
+ fn.decay = &decay
+ return fn
+}
+
+// Offset, if defined, computes the decay function only for a distance
+// greater than the defined offset.
+func (fn *GaussDecayFunction) Offset(offset interface{}) *GaussDecayFunction {
+ fn.offset = offset
+ return fn
+}
+
+// Weight adjusts the score of the score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_using_function_score
+// for details.
+func (fn *GaussDecayFunction) Weight(weight float64) *GaussDecayFunction {
+ fn.weight = &weight
+ return fn
+}
+
+// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
+// Returns nil if weight is not specified.
+func (fn *GaussDecayFunction) GetWeight() *float64 {
+ return fn.weight
+}
+
+// MultiValueMode specifies how the decay function should be calculated
+// on a field that has multiple values.
+// Valid modes are: min, max, avg, and sum.
+func (fn *GaussDecayFunction) MultiValueMode(mode string) *GaussDecayFunction {
+ fn.multiValueMode = mode
+ return fn
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn *GaussDecayFunction) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source[fn.fieldName] = params
+ if fn.origin != nil {
+ params["origin"] = fn.origin
+ }
+ params["scale"] = fn.scale
+ if fn.decay != nil && *fn.decay > 0 {
+ params["decay"] = *fn.decay
+ }
+ if fn.offset != nil {
+ params["offset"] = fn.offset
+ }
+ if fn.multiValueMode != "" {
+ source["multi_value_mode"] = fn.multiValueMode
+ }
+ // Notice that the weight has to be serialized in FunctionScoreQuery.
+ return source, nil
+}
+
+// -- Linear Decay --
+
+// LinearDecayFunction builds a linear decay score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html
+// for details.
+type LinearDecayFunction struct {
+ fieldName string
+ origin interface{}
+ scale interface{}
+ decay *float64
+ offset interface{}
+ multiValueMode string
+ weight *float64
+}
+
+// NewLinearDecayFunction initializes and returns a new LinearDecayFunction.
+func NewLinearDecayFunction() *LinearDecayFunction {
+ return &LinearDecayFunction{}
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn *LinearDecayFunction) Name() string {
+ return "linear"
+}
+
+// FieldName specifies the name of the field to which this decay function is applied to.
+func (fn *LinearDecayFunction) FieldName(fieldName string) *LinearDecayFunction {
+ fn.fieldName = fieldName
+ return fn
+}
+
+// Origin defines the "central point" by which the decay function calculates
+// "distance".
+func (fn *LinearDecayFunction) Origin(origin interface{}) *LinearDecayFunction {
+ fn.origin = origin
+ return fn
+}
+
+// Scale defines the scale to be used with Decay.
+func (fn *LinearDecayFunction) Scale(scale interface{}) *LinearDecayFunction {
+ fn.scale = scale
+ return fn
+}
+
+// Decay defines how documents are scored at the distance given a Scale.
+// If no decay is defined, documents at the distance Scale will be scored 0.5.
+func (fn *LinearDecayFunction) Decay(decay float64) *LinearDecayFunction {
+ fn.decay = &decay
+ return fn
+}
+
+// Offset, if defined, computes the decay function only for a distance
+// greater than the defined offset.
+func (fn *LinearDecayFunction) Offset(offset interface{}) *LinearDecayFunction {
+ fn.offset = offset
+ return fn
+}
+
+// Weight adjusts the score of the score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_using_function_score
+// for details.
+func (fn *LinearDecayFunction) Weight(weight float64) *LinearDecayFunction {
+ fn.weight = &weight
+ return fn
+}
+
+// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
+// Returns nil if weight is not specified.
+func (fn *LinearDecayFunction) GetWeight() *float64 {
+ return fn.weight
+}
+
+// MultiValueMode specifies how the decay function should be calculated
+// on a field that has multiple values.
+// Valid modes are: min, max, avg, and sum.
+func (fn *LinearDecayFunction) MultiValueMode(mode string) *LinearDecayFunction {
+ fn.multiValueMode = mode
+ return fn
+}
+
+// GetMultiValueMode returns how the decay function should be calculated
+// on a field that has multiple values.
+// Valid modes are: min, max, avg, and sum.
+func (fn *LinearDecayFunction) GetMultiValueMode() string {
+ return fn.multiValueMode
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn *LinearDecayFunction) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source[fn.fieldName] = params
+ if fn.origin != nil {
+ params["origin"] = fn.origin
+ }
+ params["scale"] = fn.scale
+ if fn.decay != nil && *fn.decay > 0 {
+ params["decay"] = *fn.decay
+ }
+ if fn.offset != nil {
+ params["offset"] = fn.offset
+ }
+ if fn.multiValueMode != "" {
+ source["multi_value_mode"] = fn.multiValueMode
+ }
+ // Notice that the weight has to be serialized in FunctionScoreQuery.
+ return source, nil
+}
+
+// -- Script --
+
+// ScriptFunction builds a script score function. It uses a script to
+// compute or influence the score of documents that match with the inner
+// query or filter.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_script_score
+// for details.
+type ScriptFunction struct {
+ script *Script
+ weight *float64
+}
+
+// NewScriptFunction initializes and returns a new ScriptFunction.
+func NewScriptFunction(script *Script) *ScriptFunction {
+ return &ScriptFunction{
+ script: script,
+ }
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn *ScriptFunction) Name() string {
+ return "script_score"
+}
+
+// Script specifies the script to be executed.
+func (fn *ScriptFunction) Script(script *Script) *ScriptFunction {
+ fn.script = script
+ return fn
+}
+
+// Weight adjusts the score of the score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_using_function_score
+// for details.
+func (fn *ScriptFunction) Weight(weight float64) *ScriptFunction {
+ fn.weight = &weight
+ return fn
+}
+
+// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
+// Returns nil if weight is not specified.
+func (fn *ScriptFunction) GetWeight() *float64 {
+ return fn.weight
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn *ScriptFunction) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ if fn.script != nil {
+ src, err := fn.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["script"] = src
+ }
+ // Notice that the weight has to be serialized in FunctionScoreQuery.
+ return source, nil
+}
+
+// -- Field value factor --
+
+// FieldValueFactorFunction is a function score function that allows you
+// to use a field from a document to influence the score.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_field_value_factor.
+type FieldValueFactorFunction struct {
+ field string
+ factor *float64
+ missing *float64
+ weight *float64
+ modifier string
+}
+
+// NewFieldValueFactorFunction initializes and returns a new FieldValueFactorFunction.
+func NewFieldValueFactorFunction() *FieldValueFactorFunction {
+ return &FieldValueFactorFunction{}
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn *FieldValueFactorFunction) Name() string {
+ return "field_value_factor"
+}
+
+// Field is the field to be extracted from the document.
+func (fn *FieldValueFactorFunction) Field(field string) *FieldValueFactorFunction {
+ fn.field = field
+ return fn
+}
+
+// Factor is the (optional) factor to multiply the field with. If you do not
+// specify a factor, the default is 1.
+func (fn *FieldValueFactorFunction) Factor(factor float64) *FieldValueFactorFunction {
+ fn.factor = &factor
+ return fn
+}
+
+// Modifier to apply to the field value. It can be one of: none, log, log1p,
+// log2p, ln, ln1p, ln2p, square, sqrt, or reciprocal. Defaults to: none.
+func (fn *FieldValueFactorFunction) Modifier(modifier string) *FieldValueFactorFunction {
+ fn.modifier = modifier
+ return fn
+}
+
+// Weight adjusts the score of the score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_using_function_score
+// for details.
+func (fn *FieldValueFactorFunction) Weight(weight float64) *FieldValueFactorFunction {
+ fn.weight = &weight
+ return fn
+}
+
+// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
+// Returns nil if weight is not specified.
+func (fn *FieldValueFactorFunction) GetWeight() *float64 {
+ return fn.weight
+}
+
+// Missing is used if a document does not have that field.
+func (fn *FieldValueFactorFunction) Missing(missing float64) *FieldValueFactorFunction {
+ fn.missing = &missing
+ return fn
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn *FieldValueFactorFunction) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ if fn.field != "" {
+ source["field"] = fn.field
+ }
+ if fn.factor != nil {
+ source["factor"] = *fn.factor
+ }
+ if fn.missing != nil {
+ source["missing"] = *fn.missing
+ }
+ if fn.modifier != "" {
+ source["modifier"] = strings.ToLower(fn.modifier)
+ }
+ // Notice that the weight has to be serialized in FunctionScoreQuery.
+ return source, nil
+}
+
+// -- Weight Factor --
+
+// WeightFactorFunction builds a weight factor function that multiplies
+// the weight to the score.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_weight
+// for details.
+type WeightFactorFunction struct {
+ weight float64
+}
+
+// NewWeightFactorFunction initializes and returns a new WeightFactorFunction.
+func NewWeightFactorFunction(weight float64) *WeightFactorFunction {
+ return &WeightFactorFunction{weight: weight}
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn *WeightFactorFunction) Name() string {
+ return "weight"
+}
+
+// Weight adjusts the score of the score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_using_function_score
+// for details.
+func (fn *WeightFactorFunction) Weight(weight float64) *WeightFactorFunction {
+ fn.weight = weight
+ return fn
+}
+
+// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
+// Returns nil if weight is not specified.
+func (fn *WeightFactorFunction) GetWeight() *float64 {
+ return &fn.weight
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn *WeightFactorFunction) Source() (interface{}, error) {
+ // Notice that the weight has to be serialized in FunctionScoreQuery.
+ return fn.weight, nil
+}
+
+// -- Random --
+
+// RandomFunction builds a random score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_random
+// for details.
+type RandomFunction struct {
+ seed interface{}
+ weight *float64
+}
+
+// NewRandomFunction initializes and returns a new RandomFunction.
+func NewRandomFunction() *RandomFunction {
+ return &RandomFunction{}
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn *RandomFunction) Name() string {
+ return "random_score"
+}
+
+// Seed is documented in 1.6 as a numeric value. However, in the source code
+// of the Java client, it also accepts strings. So we accept both here, too.
+func (fn *RandomFunction) Seed(seed interface{}) *RandomFunction {
+ fn.seed = seed
+ return fn
+}
+
+// Weight adjusts the score of the score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_using_function_score
+// for details.
+func (fn *RandomFunction) Weight(weight float64) *RandomFunction {
+ fn.weight = &weight
+ return fn
+}
+
+// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
+// Returns nil if weight is not specified.
+func (fn *RandomFunction) GetWeight() *float64 {
+ return fn.weight
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn *RandomFunction) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ if fn.seed != nil {
+ source["seed"] = fn.seed
+ }
+ // Notice that the weight has to be serialized in FunctionScoreQuery.
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_fsq_test.go b/vendor/github.com/olivere/elastic/search_queries_fsq_test.go
new file mode 100644
index 000000000..256752d18
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_fsq_test.go
@@ -0,0 +1,166 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestFunctionScoreQuery(t *testing.T) {
+ q := NewFunctionScoreQuery().
+ Query(NewTermQuery("name.last", "banon")).
+ Add(NewTermQuery("name.last", "banon"), NewWeightFactorFunction(1.5)).
+ AddScoreFunc(NewWeightFactorFunction(3)).
+ AddScoreFunc(NewRandomFunction()).
+ Boost(3).
+ MaxBoost(10).
+ ScoreMode("avg")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"function_score":{"boost":3,"functions":[{"filter":{"term":{"name.last":"banon"}},"weight":1.5},{"weight":3},{"random_score":{}}],"max_boost":10,"query":{"term":{"name.last":"banon"}},"score_mode":"avg"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFunctionScoreQueryWithNilFilter(t *testing.T) {
+ q := NewFunctionScoreQuery().
+ Query(NewTermQuery("tag", "wow")).
+ AddScoreFunc(NewRandomFunction()).
+ Boost(2.0).
+ MaxBoost(12.0).
+ BoostMode("multiply").
+ ScoreMode("max")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"function_score":{"boost":2,"boost_mode":"multiply","max_boost":12,"query":{"term":{"tag":"wow"}},"random_score":{},"score_mode":"max"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFieldValueFactor(t *testing.T) {
+ q := NewFunctionScoreQuery().
+ Query(NewTermQuery("name.last", "banon")).
+ AddScoreFunc(NewFieldValueFactorFunction().Modifier("sqrt").Factor(2).Field("income")).
+ Boost(2.0).
+ MaxBoost(12.0).
+ BoostMode("multiply").
+ ScoreMode("max")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"function_score":{"boost":2,"boost_mode":"multiply","field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFieldValueFactorWithWeight(t *testing.T) {
+ q := NewFunctionScoreQuery().
+ Query(NewTermQuery("name.last", "banon")).
+ AddScoreFunc(NewFieldValueFactorFunction().Modifier("sqrt").Factor(2).Field("income").Weight(2.5)).
+ Boost(2.0).
+ MaxBoost(12.0).
+ BoostMode("multiply").
+ ScoreMode("max")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"function_score":{"boost":2,"boost_mode":"multiply","field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max","weight":2.5}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFieldValueFactorWithMultipleScoreFuncsAndWeights(t *testing.T) {
+ q := NewFunctionScoreQuery().
+ Query(NewTermQuery("name.last", "banon")).
+ AddScoreFunc(NewFieldValueFactorFunction().Modifier("sqrt").Factor(2).Field("income").Weight(2.5)).
+ AddScoreFunc(NewScriptFunction(NewScript("_score * doc['my_numeric_field'].value")).Weight(1.25)).
+ AddScoreFunc(NewWeightFactorFunction(0.5)).
+ Boost(2.0).
+ MaxBoost(12.0).
+ BoostMode("multiply").
+ ScoreMode("max")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"function_score":{"boost":2,"boost_mode":"multiply","functions":[{"field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"weight":2.5},{"script_score":{"script":{"source":"_score * doc['my_numeric_field'].value"}},"weight":1.25},{"weight":0.5}],"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFunctionScoreQueryWithGaussScoreFunc(t *testing.T) {
+ q := NewFunctionScoreQuery().
+ Query(NewTermQuery("name.last", "banon")).
+ AddScoreFunc(NewGaussDecayFunction().FieldName("pin.location").Origin("11, 12").Scale("2km").Offset("0km").Decay(0.33))
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"function_score":{"gauss":{"pin.location":{"decay":0.33,"offset":"0km","origin":"11, 12","scale":"2km"}},"query":{"term":{"name.last":"banon"}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFunctionScoreQueryWithGaussScoreFuncAndMultiValueMode(t *testing.T) {
+ q := NewFunctionScoreQuery().
+ Query(NewTermQuery("name.last", "banon")).
+ AddScoreFunc(NewGaussDecayFunction().FieldName("pin.location").Origin("11, 12").Scale("2km").Offset("0km").Decay(0.33).MultiValueMode("avg"))
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"function_score":{"gauss":{"multi_value_mode":"avg","pin.location":{"decay":0.33,"offset":"0km","origin":"11, 12","scale":"2km"}},"query":{"term":{"name.last":"banon"}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_fuzzy.go b/vendor/github.com/olivere/elastic/search_queries_fuzzy.go
new file mode 100644
index 000000000..02b6c52c2
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_fuzzy.go
@@ -0,0 +1,120 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// FuzzyQuery uses similarity based on Levenshtein edit distance for
+// string fields, and a +/- margin on numeric and date fields.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-fuzzy-query.html
+type FuzzyQuery struct {
+ name string
+ value interface{}
+ boost *float64
+ fuzziness interface{}
+ prefixLength *int
+ maxExpansions *int
+ transpositions *bool
+ rewrite string
+ queryName string
+}
+
+// NewFuzzyQuery creates a new fuzzy query.
+func NewFuzzyQuery(name string, value interface{}) *FuzzyQuery {
+ q := &FuzzyQuery{
+ name: name,
+ value: value,
+ }
+ return q
+}
+
+// Boost sets the boost for this query. Documents matching this query will
+// (in addition to the normal weightings) have their score multiplied by
+// the boost provided.
+func (q *FuzzyQuery) Boost(boost float64) *FuzzyQuery {
+ q.boost = &boost
+ return q
+}
+
+// Fuzziness can be an integer/long like 0, 1 or 2 as well as strings
+// like "auto", "0..1", "1..4" or "0.0..1.0".
+func (q *FuzzyQuery) Fuzziness(fuzziness interface{}) *FuzzyQuery {
+ q.fuzziness = fuzziness
+ return q
+}
+
+func (q *FuzzyQuery) PrefixLength(prefixLength int) *FuzzyQuery {
+ q.prefixLength = &prefixLength
+ return q
+}
+
+func (q *FuzzyQuery) MaxExpansions(maxExpansions int) *FuzzyQuery {
+ q.maxExpansions = &maxExpansions
+ return q
+}
+
+func (q *FuzzyQuery) Transpositions(transpositions bool) *FuzzyQuery {
+ q.transpositions = &transpositions
+ return q
+}
+
+func (q *FuzzyQuery) Rewrite(rewrite string) *FuzzyQuery {
+ q.rewrite = rewrite
+ return q
+}
+
+// QueryName sets the query name for the filter that can be used when
+// searching for matched filters per hit.
+func (q *FuzzyQuery) QueryName(queryName string) *FuzzyQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Source returns JSON for the function score query.
+func (q *FuzzyQuery) Source() (interface{}, error) {
+ // {
+ // "fuzzy" : {
+ // "user" : {
+ // "value" : "ki",
+ // "boost" : 1.0,
+ // "fuzziness" : 2,
+ // "prefix_length" : 0,
+ // "max_expansions" : 100
+ // }
+ // }
+
+ source := make(map[string]interface{})
+ query := make(map[string]interface{})
+ source["fuzzy"] = query
+
+ fq := make(map[string]interface{})
+ query[q.name] = fq
+
+ fq["value"] = q.value
+
+ if q.boost != nil {
+ fq["boost"] = *q.boost
+ }
+ if q.transpositions != nil {
+ fq["transpositions"] = *q.transpositions
+ }
+ if q.fuzziness != nil {
+ fq["fuzziness"] = q.fuzziness
+ }
+ if q.prefixLength != nil {
+ fq["prefix_length"] = *q.prefixLength
+ }
+ if q.maxExpansions != nil {
+ fq["max_expansions"] = *q.maxExpansions
+ }
+ if q.rewrite != "" {
+ fq["rewrite"] = q.rewrite
+ }
+ if q.queryName != "" {
+ fq["_name"] = q.queryName
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_fuzzy_test.go b/vendor/github.com/olivere/elastic/search_queries_fuzzy_test.go
new file mode 100644
index 000000000..89140ca23
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_fuzzy_test.go
@@ -0,0 +1,27 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestFuzzyQuery(t *testing.T) {
+ q := NewFuzzyQuery("user", "ki").Boost(1.5).Fuzziness(2).PrefixLength(0).MaxExpansions(100)
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"fuzzy":{"user":{"boost":1.5,"fuzziness":2,"max_expansions":100,"prefix_length":0,"value":"ki"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_geo_bounding_box.go b/vendor/github.com/olivere/elastic/search_queries_geo_bounding_box.go
new file mode 100644
index 000000000..0418620d8
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_geo_bounding_box.go
@@ -0,0 +1,121 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "errors"
+
+// GeoBoundingBoxQuery allows to filter hits based on a point location using
+// a bounding box.
+//
+// For more details, see:
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-geo-bounding-box-query.html
+type GeoBoundingBoxQuery struct {
+ name string
+ top *float64
+ left *float64
+ bottom *float64
+ right *float64
+ typ string
+ queryName string
+}
+
+// NewGeoBoundingBoxQuery creates and initializes a new GeoBoundingBoxQuery.
+func NewGeoBoundingBoxQuery(name string) *GeoBoundingBoxQuery {
+ return &GeoBoundingBoxQuery{
+ name: name,
+ }
+}
+
+func (q *GeoBoundingBoxQuery) TopLeft(top, left float64) *GeoBoundingBoxQuery {
+ q.top = &top
+ q.left = &left
+ return q
+}
+
+func (q *GeoBoundingBoxQuery) TopLeftFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery {
+ return q.TopLeft(point.Lat, point.Lon)
+}
+
+func (q *GeoBoundingBoxQuery) BottomRight(bottom, right float64) *GeoBoundingBoxQuery {
+ q.bottom = &bottom
+ q.right = &right
+ return q
+}
+
+func (q *GeoBoundingBoxQuery) BottomRightFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery {
+ return q.BottomRight(point.Lat, point.Lon)
+}
+
+func (q *GeoBoundingBoxQuery) BottomLeft(bottom, left float64) *GeoBoundingBoxQuery {
+ q.bottom = &bottom
+ q.left = &left
+ return q
+}
+
+func (q *GeoBoundingBoxQuery) BottomLeftFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery {
+ return q.BottomLeft(point.Lat, point.Lon)
+}
+
+func (q *GeoBoundingBoxQuery) TopRight(top, right float64) *GeoBoundingBoxQuery {
+ q.top = &top
+ q.right = &right
+ return q
+}
+
+func (q *GeoBoundingBoxQuery) TopRightFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery {
+ return q.TopRight(point.Lat, point.Lon)
+}
+
+// Type sets the type of executing the geo bounding box. It can be either
+// memory or indexed. It defaults to memory.
+func (q *GeoBoundingBoxQuery) Type(typ string) *GeoBoundingBoxQuery {
+ q.typ = typ
+ return q
+}
+
+func (q *GeoBoundingBoxQuery) QueryName(queryName string) *GeoBoundingBoxQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Source returns JSON for the function score query.
+func (q *GeoBoundingBoxQuery) Source() (interface{}, error) {
+ // {
+ // "geo_bounding_box" : {
+ // ...
+ // }
+ // }
+
+ if q.top == nil {
+ return nil, errors.New("geo_bounding_box requires top latitude to be set")
+ }
+ if q.bottom == nil {
+ return nil, errors.New("geo_bounding_box requires bottom latitude to be set")
+ }
+ if q.right == nil {
+ return nil, errors.New("geo_bounding_box requires right longitude to be set")
+ }
+ if q.left == nil {
+ return nil, errors.New("geo_bounding_box requires left longitude to be set")
+ }
+
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["geo_bounding_box"] = params
+
+ box := make(map[string]interface{})
+ box["top_left"] = []float64{*q.left, *q.top}
+ box["bottom_right"] = []float64{*q.right, *q.bottom}
+ params[q.name] = box
+
+ if q.typ != "" {
+ params["type"] = q.typ
+ }
+ if q.queryName != "" {
+ params["_name"] = q.queryName
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_geo_bounding_box_test.go b/vendor/github.com/olivere/elastic/search_queries_geo_bounding_box_test.go
new file mode 100644
index 000000000..f44a2364f
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_geo_bounding_box_test.go
@@ -0,0 +1,63 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestGeoBoundingBoxQueryIncomplete(t *testing.T) {
+ q := NewGeoBoundingBoxQuery("pin.location")
+ q = q.TopLeft(40.73, -74.1)
+ // no bottom and no right here
+ q = q.Type("memory")
+ src, err := q.Source()
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ if src != nil {
+ t.Fatal("expected empty source")
+ }
+}
+
+func TestGeoBoundingBoxQuery(t *testing.T) {
+ q := NewGeoBoundingBoxQuery("pin.location")
+ q = q.TopLeft(40.73, -74.1)
+ q = q.BottomRight(40.01, -71.12)
+ q = q.Type("memory")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_bounding_box":{"pin.location":{"bottom_right":[-71.12,40.01],"top_left":[-74.1,40.73]},"type":"memory"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestGeoBoundingBoxQueryWithGeoPoint(t *testing.T) {
+ q := NewGeoBoundingBoxQuery("pin.location")
+ q = q.TopLeftFromGeoPoint(GeoPointFromLatLon(40.73, -74.1))
+ q = q.BottomRightFromGeoPoint(GeoPointFromLatLon(40.01, -71.12))
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_bounding_box":{"pin.location":{"bottom_right":[-71.12,40.01],"top_left":[-74.1,40.73]}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_geo_distance.go b/vendor/github.com/olivere/elastic/search_queries_geo_distance.go
new file mode 100644
index 000000000..00e62725f
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_geo_distance.go
@@ -0,0 +1,107 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// GeoDistanceQuery filters documents that include only hits that exists
+// within a specific distance from a geo point.
+//
+// For more details, see:
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-geo-distance-query.html
+type GeoDistanceQuery struct {
+ name string
+ distance string
+ lat float64
+ lon float64
+ geohash string
+ distanceType string
+ queryName string
+}
+
+// NewGeoDistanceQuery creates and initializes a new GeoDistanceQuery.
+func NewGeoDistanceQuery(name string) *GeoDistanceQuery {
+ return &GeoDistanceQuery{name: name}
+}
+
+func (q *GeoDistanceQuery) GeoPoint(point *GeoPoint) *GeoDistanceQuery {
+ q.lat = point.Lat
+ q.lon = point.Lon
+ return q
+}
+
+func (q *GeoDistanceQuery) Point(lat, lon float64) *GeoDistanceQuery {
+ q.lat = lat
+ q.lon = lon
+ return q
+}
+
+func (q *GeoDistanceQuery) Lat(lat float64) *GeoDistanceQuery {
+ q.lat = lat
+ return q
+}
+
+func (q *GeoDistanceQuery) Lon(lon float64) *GeoDistanceQuery {
+ q.lon = lon
+ return q
+}
+
+func (q *GeoDistanceQuery) GeoHash(geohash string) *GeoDistanceQuery {
+ q.geohash = geohash
+ return q
+}
+
+func (q *GeoDistanceQuery) Distance(distance string) *GeoDistanceQuery {
+ q.distance = distance
+ return q
+}
+
+func (q *GeoDistanceQuery) DistanceType(distanceType string) *GeoDistanceQuery {
+ q.distanceType = distanceType
+ return q
+}
+
+func (q *GeoDistanceQuery) QueryName(queryName string) *GeoDistanceQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Source returns JSON for the function score query.
+func (q *GeoDistanceQuery) Source() (interface{}, error) {
+ // {
+ // "geo_distance" : {
+ // "distance" : "200km",
+ // "pin.location" : {
+ // "lat" : 40,
+ // "lon" : -70
+ // }
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+
+ if q.geohash != "" {
+ params[q.name] = q.geohash
+ } else {
+ location := make(map[string]interface{})
+ location["lat"] = q.lat
+ location["lon"] = q.lon
+ params[q.name] = location
+ }
+
+ if q.distance != "" {
+ params["distance"] = q.distance
+ }
+ if q.distanceType != "" {
+ params["distance_type"] = q.distanceType
+ }
+ if q.queryName != "" {
+ params["_name"] = q.queryName
+ }
+
+ source["geo_distance"] = params
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_geo_distance_test.go b/vendor/github.com/olivere/elastic/search_queries_geo_distance_test.go
new file mode 100644
index 000000000..dd169575a
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_geo_distance_test.go
@@ -0,0 +1,69 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestGeoDistanceQuery(t *testing.T) {
+ q := NewGeoDistanceQuery("pin.location")
+ q = q.Lat(40)
+ q = q.Lon(-70)
+ q = q.Distance("200km")
+ q = q.DistanceType("plane")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_distance":{"distance":"200km","distance_type":"plane","pin.location":{"lat":40,"lon":-70}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestGeoDistanceQueryWithGeoPoint(t *testing.T) {
+ q := NewGeoDistanceQuery("pin.location")
+ q = q.GeoPoint(GeoPointFromLatLon(40, -70))
+ q = q.Distance("200km")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_distance":{"distance":"200km","pin.location":{"lat":40,"lon":-70}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestGeoDistanceQueryWithGeoHash(t *testing.T) {
+ q := NewGeoDistanceQuery("pin.location")
+ q = q.GeoHash("drm3btev3e86")
+ q = q.Distance("12km")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_distance":{"distance":"12km","pin.location":"drm3btev3e86"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_geo_polygon.go b/vendor/github.com/olivere/elastic/search_queries_geo_polygon.go
new file mode 100644
index 000000000..7678c3f3b
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_geo_polygon.go
@@ -0,0 +1,72 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// GeoPolygonQuery allows to include hits that only fall within a polygon of points.
+//
+// For more details, see:
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-geo-polygon-query.html
+type GeoPolygonQuery struct {
+ name string
+ points []*GeoPoint
+ queryName string
+}
+
+// NewGeoPolygonQuery creates and initializes a new GeoPolygonQuery.
+func NewGeoPolygonQuery(name string) *GeoPolygonQuery {
+ return &GeoPolygonQuery{
+ name: name,
+ points: make([]*GeoPoint, 0),
+ }
+}
+
+// AddPoint adds a point from latitude and longitude.
+func (q *GeoPolygonQuery) AddPoint(lat, lon float64) *GeoPolygonQuery {
+ q.points = append(q.points, GeoPointFromLatLon(lat, lon))
+ return q
+}
+
+// AddGeoPoint adds a GeoPoint.
+func (q *GeoPolygonQuery) AddGeoPoint(point *GeoPoint) *GeoPolygonQuery {
+ q.points = append(q.points, point)
+ return q
+}
+
+func (q *GeoPolygonQuery) QueryName(queryName string) *GeoPolygonQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Source returns JSON for the function score query.
+func (q *GeoPolygonQuery) Source() (interface{}, error) {
+ // "geo_polygon" : {
+ // "person.location" : {
+ // "points" : [
+ // {"lat" : 40, "lon" : -70},
+ // {"lat" : 30, "lon" : -80},
+ // {"lat" : 20, "lon" : -90}
+ // ]
+ // }
+ // }
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+ source["geo_polygon"] = params
+
+ polygon := make(map[string]interface{})
+ params[q.name] = polygon
+
+ var points []interface{}
+ for _, point := range q.points {
+ points = append(points, point.Source())
+ }
+ polygon["points"] = points
+
+ if q.queryName != "" {
+ params["_name"] = q.queryName
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_geo_polygon_test.go b/vendor/github.com/olivere/elastic/search_queries_geo_polygon_test.go
new file mode 100644
index 000000000..932c57d7b
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_geo_polygon_test.go
@@ -0,0 +1,58 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestGeoPolygonQuery(t *testing.T) {
+ q := NewGeoPolygonQuery("person.location")
+ q = q.AddPoint(40, -70)
+ q = q.AddPoint(30, -80)
+ point, err := GeoPointFromString("20,-90")
+ if err != nil {
+ t.Fatalf("GeoPointFromString failed: %v", err)
+ }
+ q = q.AddGeoPoint(point)
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_polygon":{"person.location":{"points":[{"lat":40,"lon":-70},{"lat":30,"lon":-80},{"lat":20,"lon":-90}]}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestGeoPolygonQueryFromGeoPoints(t *testing.T) {
+ q := NewGeoPolygonQuery("person.location")
+ q = q.AddGeoPoint(&GeoPoint{Lat: 40, Lon: -70})
+ q = q.AddGeoPoint(GeoPointFromLatLon(30, -80))
+ point, err := GeoPointFromString("20,-90")
+ if err != nil {
+ t.Fatalf("GeoPointFromString failed: %v", err)
+ }
+ q = q.AddGeoPoint(point)
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"geo_polygon":{"person.location":{"points":[{"lat":40,"lon":-70},{"lat":30,"lon":-80},{"lat":20,"lon":-90}]}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_has_child.go b/vendor/github.com/olivere/elastic/search_queries_has_child.go
new file mode 100644
index 000000000..41e7429c4
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_has_child.go
@@ -0,0 +1,131 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// HasChildQuery accepts a query and the child type to run against, and results
+// in parent documents that have child docs matching the query.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-has-child-query.html
+type HasChildQuery struct {
+ query Query
+ childType string
+ boost *float64
+ scoreMode string
+ minChildren *int
+ maxChildren *int
+ shortCircuitCutoff *int
+ queryName string
+ innerHit *InnerHit
+}
+
+// NewHasChildQuery creates and initializes a new has_child query.
+func NewHasChildQuery(childType string, query Query) *HasChildQuery {
+ return &HasChildQuery{
+ query: query,
+ childType: childType,
+ }
+}
+
+// Boost sets the boost for this query.
+func (q *HasChildQuery) Boost(boost float64) *HasChildQuery {
+ q.boost = &boost
+ return q
+}
+
+// ScoreMode defines how the scores from the matching child documents
+// are mapped into the parent document. Allowed values are: min, max,
+// avg, or none.
+func (q *HasChildQuery) ScoreMode(scoreMode string) *HasChildQuery {
+ q.scoreMode = scoreMode
+ return q
+}
+
+// MinChildren defines the minimum number of children that are required
+// to match for the parent to be considered a match.
+func (q *HasChildQuery) MinChildren(minChildren int) *HasChildQuery {
+ q.minChildren = &minChildren
+ return q
+}
+
+// MaxChildren defines the maximum number of children that are required
+// to match for the parent to be considered a match.
+func (q *HasChildQuery) MaxChildren(maxChildren int) *HasChildQuery {
+ q.maxChildren = &maxChildren
+ return q
+}
+
+// ShortCircuitCutoff configures what cut off point only to evaluate
+// parent documents that contain the matching parent id terms instead
+// of evaluating all parent docs.
+func (q *HasChildQuery) ShortCircuitCutoff(shortCircuitCutoff int) *HasChildQuery {
+ q.shortCircuitCutoff = &shortCircuitCutoff
+ return q
+}
+
+// QueryName specifies the query name for the filter that can be used when
+// searching for matched filters per hit.
+func (q *HasChildQuery) QueryName(queryName string) *HasChildQuery {
+ q.queryName = queryName
+ return q
+}
+
+// InnerHit sets the inner hit definition in the scope of this query and
+// reusing the defined type and query.
+func (q *HasChildQuery) InnerHit(innerHit *InnerHit) *HasChildQuery {
+ q.innerHit = innerHit
+ return q
+}
+
+// Source returns JSON for the function score query.
+func (q *HasChildQuery) Source() (interface{}, error) {
+ // {
+ // "has_child" : {
+ // "type" : "blog_tag",
+ // "score_mode" : "min",
+ // "query" : {
+ // "term" : {
+ // "tag" : "something"
+ // }
+ // }
+ // }
+ // }
+ source := make(map[string]interface{})
+ query := make(map[string]interface{})
+ source["has_child"] = query
+
+ src, err := q.query.Source()
+ if err != nil {
+ return nil, err
+ }
+ query["query"] = src
+ query["type"] = q.childType
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+ if q.scoreMode != "" {
+ query["score_mode"] = q.scoreMode
+ }
+ if q.minChildren != nil {
+ query["min_children"] = *q.minChildren
+ }
+ if q.maxChildren != nil {
+ query["max_children"] = *q.maxChildren
+ }
+ if q.shortCircuitCutoff != nil {
+ query["short_circuit_cutoff"] = *q.shortCircuitCutoff
+ }
+ if q.queryName != "" {
+ query["_name"] = q.queryName
+ }
+ if q.innerHit != nil {
+ src, err := q.innerHit.Source()
+ if err != nil {
+ return nil, err
+ }
+ query["inner_hits"] = src
+ }
+ return source, nil
+}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_indices_test.go b/vendor/github.com/olivere/elastic/search_queries_has_child_test.go
index 0c04499d1..745c263f9 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_indices_test.go
+++ b/vendor/github.com/olivere/elastic/search_queries_has_child_test.go
@@ -9,9 +9,8 @@ import (
"testing"
)
-func TestIndicesQuery(t *testing.T) {
- q := NewIndicesQuery(NewTermQuery("tag", "wow"), "index1", "index2")
- q = q.NoMatchQuery(NewTermQuery("tag", "kow"))
+func TestHasChildQuery(t *testing.T) {
+ q := NewHasChildQuery("blog_tag", NewTermQuery("tag", "something")).ScoreMode("min")
src, err := q.Source()
if err != nil {
t.Fatal(err)
@@ -21,15 +20,15 @@ func TestIndicesQuery(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"indices":{"indices":["index1","index2"],"no_match_query":{"term":{"tag":"kow"}},"query":{"term":{"tag":"wow"}}}}`
+ expected := `{"has_child":{"query":{"term":{"tag":"something"}},"score_mode":"min","type":"blog_tag"}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
}
-func TestIndicesQueryWithNoMatchQueryType(t *testing.T) {
- q := NewIndicesQuery(NewTermQuery("tag", "wow"), "index1", "index2")
- q = q.NoMatchQueryType("all")
+func TestHasChildQueryWithInnerHit(t *testing.T) {
+ q := NewHasChildQuery("blog_tag", NewTermQuery("tag", "something"))
+ q = q.InnerHit(NewInnerHit().Name("comments"))
src, err := q.Source()
if err != nil {
t.Fatal(err)
@@ -39,7 +38,7 @@ func TestIndicesQueryWithNoMatchQueryType(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"indices":{"indices":["index1","index2"],"no_match_query":"all","query":{"term":{"tag":"wow"}}}}`
+ expected := `{"has_child":{"inner_hits":{"name":"comments"},"query":{"term":{"tag":"something"}},"type":"blog_tag"}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
diff --git a/vendor/github.com/olivere/elastic/search_queries_has_parent.go b/vendor/github.com/olivere/elastic/search_queries_has_parent.go
new file mode 100644
index 000000000..5e1b650af
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_has_parent.go
@@ -0,0 +1,97 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// HasParentQuery accepts a query and a parent type. The query is executed
+// in the parent document space which is specified by the parent type.
+// This query returns child documents which associated parents have matched.
+// For the rest has_parent query has the same options and works in the
+// same manner as has_child query.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-has-parent-query.html
+type HasParentQuery struct {
+ query Query
+ parentType string
+ boost *float64
+ score *bool
+ queryName string
+ innerHit *InnerHit
+}
+
+// NewHasParentQuery creates and initializes a new has_parent query.
+func NewHasParentQuery(parentType string, query Query) *HasParentQuery {
+ return &HasParentQuery{
+ query: query,
+ parentType: parentType,
+ }
+}
+
+// Boost sets the boost for this query.
+func (q *HasParentQuery) Boost(boost float64) *HasParentQuery {
+ q.boost = &boost
+ return q
+}
+
+// Score defines if the parent score is mapped into the child documents.
+func (q *HasParentQuery) Score(score bool) *HasParentQuery {
+ q.score = &score
+ return q
+}
+
+// QueryName specifies the query name for the filter that can be used when
+// searching for matched filters per hit.
+func (q *HasParentQuery) QueryName(queryName string) *HasParentQuery {
+ q.queryName = queryName
+ return q
+}
+
+// InnerHit sets the inner hit definition in the scope of this query and
+// reusing the defined type and query.
+func (q *HasParentQuery) InnerHit(innerHit *InnerHit) *HasParentQuery {
+ q.innerHit = innerHit
+ return q
+}
+
+// Source returns JSON for the function score query.
+func (q *HasParentQuery) Source() (interface{}, error) {
+ // {
+ // "has_parent" : {
+ // "parent_type" : "blog",
+ // "query" : {
+ // "term" : {
+ // "tag" : "something"
+ // }
+ // }
+ // }
+ // }
+ source := make(map[string]interface{})
+ query := make(map[string]interface{})
+ source["has_parent"] = query
+
+ src, err := q.query.Source()
+ if err != nil {
+ return nil, err
+ }
+ query["query"] = src
+ query["parent_type"] = q.parentType
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+ if q.score != nil {
+ query["score"] = *q.score
+ }
+ if q.queryName != "" {
+ query["_name"] = q.queryName
+ }
+ if q.innerHit != nil {
+ src, err := q.innerHit.Source()
+ if err != nil {
+ return nil, err
+ }
+ query["inner_hits"] = src
+ }
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_has_parent_test.go b/vendor/github.com/olivere/elastic/search_queries_has_parent_test.go
new file mode 100644
index 000000000..0fec395e3
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_has_parent_test.go
@@ -0,0 +1,27 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestHasParentQueryTest(t *testing.T) {
+ q := NewHasParentQuery("blog", NewTermQuery("tag", "something")).Score(true)
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"has_parent":{"parent_type":"blog","query":{"term":{"tag":"something"}},"score":true}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_ids.go b/vendor/github.com/olivere/elastic/search_queries_ids.go
new file mode 100644
index 000000000..e067aebbe
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_ids.go
@@ -0,0 +1,76 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// IdsQuery filters documents that only have the provided ids.
+// Note, this query uses the _uid field.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-ids-query.html
+type IdsQuery struct {
+ types []string
+ values []string
+ boost *float64
+ queryName string
+}
+
+// NewIdsQuery creates and initializes a new ids query.
+func NewIdsQuery(types ...string) *IdsQuery {
+ return &IdsQuery{
+ types: types,
+ values: make([]string, 0),
+ }
+}
+
+// Ids adds ids to the filter.
+func (q *IdsQuery) Ids(ids ...string) *IdsQuery {
+ q.values = append(q.values, ids...)
+ return q
+}
+
+// Boost sets the boost for this query.
+func (q *IdsQuery) Boost(boost float64) *IdsQuery {
+ q.boost = &boost
+ return q
+}
+
+// QueryName sets the query name for the filter.
+func (q *IdsQuery) QueryName(queryName string) *IdsQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Source returns JSON for the function score query.
+func (q *IdsQuery) Source() (interface{}, error) {
+ // {
+ // "ids" : {
+ // "type" : "my_type",
+ // "values" : ["1", "4", "100"]
+ // }
+ // }
+
+ source := make(map[string]interface{})
+ query := make(map[string]interface{})
+ source["ids"] = query
+
+ // type(s)
+ if len(q.types) == 1 {
+ query["type"] = q.types[0]
+ } else if len(q.types) > 1 {
+ query["types"] = q.types
+ }
+
+ // values
+ query["values"] = q.values
+
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+ if q.queryName != "" {
+ query["_name"] = q.queryName
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_ids_test.go b/vendor/github.com/olivere/elastic/search_queries_ids_test.go
new file mode 100644
index 000000000..b36605b4d
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_ids_test.go
@@ -0,0 +1,27 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestIdsQuery(t *testing.T) {
+ q := NewIdsQuery("my_type").Ids("1", "4", "100").Boost(10.5).QueryName("my_query")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"ids":{"_name":"my_query","boost":10.5,"type":"my_type","values":["1","4","100"]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_match.go b/vendor/github.com/olivere/elastic/search_queries_match.go
new file mode 100644
index 000000000..b38b12452
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_match.go
@@ -0,0 +1,189 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MatchQuery is a family of queries that accepts text/numerics/dates,
+// analyzes them, and constructs a query.
+//
+// To create a new MatchQuery, use NewMatchQuery. To create specific types
+// of queries, e.g. a match_phrase query, use NewMatchPhrQuery(...).Type("phrase"),
+// or use one of the shortcuts e.g. NewMatchPhraseQuery(...).
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-match-query.html
+type MatchQuery struct {
+ name string
+ text interface{}
+ operator string // or / and
+ analyzer string
+ boost *float64
+ fuzziness string
+ prefixLength *int
+ maxExpansions *int
+ minimumShouldMatch string
+ fuzzyRewrite string
+ lenient *bool
+ fuzzyTranspositions *bool
+ zeroTermsQuery string
+ cutoffFrequency *float64
+ queryName string
+}
+
+// NewMatchQuery creates and initializes a new MatchQuery.
+func NewMatchQuery(name string, text interface{}) *MatchQuery {
+ return &MatchQuery{name: name, text: text}
+}
+
+// Operator sets the operator to use when using a boolean query.
+// Can be "AND" or "OR" (default).
+func (q *MatchQuery) Operator(operator string) *MatchQuery {
+ q.operator = operator
+ return q
+}
+
+// Analyzer explicitly sets the analyzer to use. It defaults to use explicit
+// mapping config for the field, or, if not set, the default search analyzer.
+func (q *MatchQuery) Analyzer(analyzer string) *MatchQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+// Fuzziness sets the fuzziness when evaluated to a fuzzy query type.
+// Defaults to "AUTO".
+func (q *MatchQuery) Fuzziness(fuzziness string) *MatchQuery {
+ q.fuzziness = fuzziness
+ return q
+}
+
+// PrefixLength sets the length of a length of common (non-fuzzy)
+// prefix for fuzzy match queries. It must be non-negative.
+func (q *MatchQuery) PrefixLength(prefixLength int) *MatchQuery {
+ q.prefixLength = &prefixLength
+ return q
+}
+
+// MaxExpansions is used with fuzzy or prefix type queries. It specifies
+// the number of term expansions to use. It defaults to unbounded so that
+// its recommended to set it to a reasonable value for faster execution.
+func (q *MatchQuery) MaxExpansions(maxExpansions int) *MatchQuery {
+ q.maxExpansions = &maxExpansions
+ return q
+}
+
+// CutoffFrequency can be a value in [0..1] (or an absolute number >=1).
+// It represents the maximum treshold of a terms document frequency to be
+// considered a low frequency term.
+func (q *MatchQuery) CutoffFrequency(cutoff float64) *MatchQuery {
+ q.cutoffFrequency = &cutoff
+ return q
+}
+
+// MinimumShouldMatch sets the optional minimumShouldMatch value to
+// apply to the query.
+func (q *MatchQuery) MinimumShouldMatch(minimumShouldMatch string) *MatchQuery {
+ q.minimumShouldMatch = minimumShouldMatch
+ return q
+}
+
+// FuzzyRewrite sets the fuzzy_rewrite parameter controlling how the
+// fuzzy query will get rewritten.
+func (q *MatchQuery) FuzzyRewrite(fuzzyRewrite string) *MatchQuery {
+ q.fuzzyRewrite = fuzzyRewrite
+ return q
+}
+
+// FuzzyTranspositions sets whether transpositions are supported in
+// fuzzy queries.
+//
+// The default metric used by fuzzy queries to determine a match is
+// the Damerau-Levenshtein distance formula which supports transpositions.
+// Setting transposition to false will
+// * switch to classic Levenshtein distance.
+// * If not set, Damerau-Levenshtein distance metric will be used.
+func (q *MatchQuery) FuzzyTranspositions(fuzzyTranspositions bool) *MatchQuery {
+ q.fuzzyTranspositions = &fuzzyTranspositions
+ return q
+}
+
+// Lenient specifies whether format based failures will be ignored.
+func (q *MatchQuery) Lenient(lenient bool) *MatchQuery {
+ q.lenient = &lenient
+ return q
+}
+
+// ZeroTermsQuery can be "all" or "none".
+func (q *MatchQuery) ZeroTermsQuery(zeroTermsQuery string) *MatchQuery {
+ q.zeroTermsQuery = zeroTermsQuery
+ return q
+}
+
+// Boost sets the boost to apply to this query.
+func (q *MatchQuery) Boost(boost float64) *MatchQuery {
+ q.boost = &boost
+ return q
+}
+
+// QueryName sets the query name for the filter that can be used when
+// searching for matched filters per hit.
+func (q *MatchQuery) QueryName(queryName string) *MatchQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Source returns JSON for the function score query.
+func (q *MatchQuery) Source() (interface{}, error) {
+ // {"match":{"name":{"query":"value","type":"boolean/phrase"}}}
+ source := make(map[string]interface{})
+
+ match := make(map[string]interface{})
+ source["match"] = match
+
+ query := make(map[string]interface{})
+ match[q.name] = query
+
+ query["query"] = q.text
+
+ if q.operator != "" {
+ query["operator"] = q.operator
+ }
+ if q.analyzer != "" {
+ query["analyzer"] = q.analyzer
+ }
+ if q.fuzziness != "" {
+ query["fuzziness"] = q.fuzziness
+ }
+ if q.prefixLength != nil {
+ query["prefix_length"] = *q.prefixLength
+ }
+ if q.maxExpansions != nil {
+ query["max_expansions"] = *q.maxExpansions
+ }
+ if q.minimumShouldMatch != "" {
+ query["minimum_should_match"] = q.minimumShouldMatch
+ }
+ if q.fuzzyRewrite != "" {
+ query["fuzzy_rewrite"] = q.fuzzyRewrite
+ }
+ if q.lenient != nil {
+ query["lenient"] = *q.lenient
+ }
+ if q.fuzzyTranspositions != nil {
+ query["fuzzy_transpositions"] = *q.fuzzyTranspositions
+ }
+ if q.zeroTermsQuery != "" {
+ query["zero_terms_query"] = q.zeroTermsQuery
+ }
+ if q.cutoffFrequency != nil {
+ query["cutoff_frequency"] = q.cutoffFrequency
+ }
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+ if q.queryName != "" {
+ query["_name"] = q.queryName
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_match_all.go b/vendor/github.com/olivere/elastic/search_queries_match_all.go
new file mode 100644
index 000000000..3829c8af0
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_match_all.go
@@ -0,0 +1,51 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MatchAllQuery is the most simple query, which matches all documents,
+// giving them all a _score of 1.0.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-match-all-query.html
+type MatchAllQuery struct {
+ boost *float64
+ queryName string
+}
+
+// NewMatchAllQuery creates and initializes a new match all query.
+func NewMatchAllQuery() *MatchAllQuery {
+ return &MatchAllQuery{}
+}
+
+// Boost sets the boost for this query. Documents matching this query will
+// (in addition to the normal weightings) have their score multiplied by the
+// boost provided.
+func (q *MatchAllQuery) Boost(boost float64) *MatchAllQuery {
+ q.boost = &boost
+ return q
+}
+
+// QueryName sets the query name.
+func (q *MatchAllQuery) QueryName(name string) *MatchAllQuery {
+ q.queryName = name
+ return q
+}
+
+// Source returns JSON for the match all query.
+func (q MatchAllQuery) Source() (interface{}, error) {
+ // {
+ // "match_all" : { ... }
+ // }
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["match_all"] = params
+ if q.boost != nil {
+ params["boost"] = *q.boost
+ }
+ if q.queryName != "" {
+ params["_name"] = q.queryName
+ }
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_match_all_test.go b/vendor/github.com/olivere/elastic/search_queries_match_all_test.go
new file mode 100644
index 000000000..5d8671025
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_match_all_test.go
@@ -0,0 +1,61 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestMatchAllQuery(t *testing.T) {
+ q := NewMatchAllQuery()
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"match_all":{}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMatchAllQueryWithBoost(t *testing.T) {
+ q := NewMatchAllQuery().Boost(3.14)
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"match_all":{"boost":3.14}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMatchAllQueryWithQueryName(t *testing.T) {
+ q := NewMatchAllQuery().QueryName("qname")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"match_all":{"_name":"qname"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_match_none.go b/vendor/github.com/olivere/elastic/search_queries_match_none.go
new file mode 100644
index 000000000..9afe16716
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_match_none.go
@@ -0,0 +1,39 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MatchNoneQuery returns no documents. It is the inverse of
+// MatchAllQuery.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-match-all-query.html
+type MatchNoneQuery struct {
+ queryName string
+}
+
+// NewMatchNoneQuery creates and initializes a new match none query.
+func NewMatchNoneQuery() *MatchNoneQuery {
+ return &MatchNoneQuery{}
+}
+
+// QueryName sets the query name.
+func (q *MatchNoneQuery) QueryName(name string) *MatchNoneQuery {
+ q.queryName = name
+ return q
+}
+
+// Source returns JSON for the match none query.
+func (q MatchNoneQuery) Source() (interface{}, error) {
+ // {
+ // "match_none" : { ... }
+ // }
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["match_none"] = params
+ if q.queryName != "" {
+ params["_name"] = q.queryName
+ }
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_match_none_test.go b/vendor/github.com/olivere/elastic/search_queries_match_none_test.go
new file mode 100644
index 000000000..6463452da
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_match_none_test.go
@@ -0,0 +1,44 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestMatchNoneQuery(t *testing.T) {
+ q := NewMatchNoneQuery()
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"match_none":{}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMatchNoneQueryWithQueryName(t *testing.T) {
+ q := NewMatchNoneQuery().QueryName("qname")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"match_none":{"_name":"qname"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_match_phrase.go b/vendor/github.com/olivere/elastic/search_queries_match_phrase.go
new file mode 100644
index 000000000..0e4c6327e
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_match_phrase.go
@@ -0,0 +1,79 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MatchPhraseQuery analyzes the text and creates a phrase query out of
+// the analyzed text.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-match-query-phrase.html
+type MatchPhraseQuery struct {
+ name string
+ value interface{}
+ analyzer string
+ slop *int
+ boost *float64
+ queryName string
+}
+
+// NewMatchPhraseQuery creates and initializes a new MatchPhraseQuery.
+func NewMatchPhraseQuery(name string, value interface{}) *MatchPhraseQuery {
+ return &MatchPhraseQuery{name: name, value: value}
+}
+
+// Analyzer explicitly sets the analyzer to use. It defaults to use explicit
+// mapping config for the field, or, if not set, the default search analyzer.
+func (q *MatchPhraseQuery) Analyzer(analyzer string) *MatchPhraseQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+// Slop sets the phrase slop if evaluated to a phrase query type.
+func (q *MatchPhraseQuery) Slop(slop int) *MatchPhraseQuery {
+ q.slop = &slop
+ return q
+}
+
+// Boost sets the boost to apply to this query.
+func (q *MatchPhraseQuery) Boost(boost float64) *MatchPhraseQuery {
+ q.boost = &boost
+ return q
+}
+
+// QueryName sets the query name for the filter that can be used when
+// searching for matched filters per hit.
+func (q *MatchPhraseQuery) QueryName(queryName string) *MatchPhraseQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Source returns JSON for the function score query.
+func (q *MatchPhraseQuery) Source() (interface{}, error) {
+ // {"match_phrase":{"name":{"query":"value","analyzer":"my_analyzer"}}}
+ source := make(map[string]interface{})
+
+ match := make(map[string]interface{})
+ source["match_phrase"] = match
+
+ query := make(map[string]interface{})
+ match[q.name] = query
+
+ query["query"] = q.value
+
+ if q.analyzer != "" {
+ query["analyzer"] = q.analyzer
+ }
+ if q.slop != nil {
+ query["slop"] = *q.slop
+ }
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+ if q.queryName != "" {
+ query["_name"] = q.queryName
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_match_phrase_prefix.go b/vendor/github.com/olivere/elastic/search_queries_match_phrase_prefix.go
new file mode 100644
index 000000000..10a88668d
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_match_phrase_prefix.go
@@ -0,0 +1,89 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MatchPhrasePrefixQuery is the same as match_phrase, except that it allows for
+// prefix matches on the last term in the text.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-match-query-phrase-prefix.html
+type MatchPhrasePrefixQuery struct {
+ name string
+ value interface{}
+ analyzer string
+ slop *int
+ maxExpansions *int
+ boost *float64
+ queryName string
+}
+
+// NewMatchPhrasePrefixQuery creates and initializes a new MatchPhrasePrefixQuery.
+func NewMatchPhrasePrefixQuery(name string, value interface{}) *MatchPhrasePrefixQuery {
+ return &MatchPhrasePrefixQuery{name: name, value: value}
+}
+
+// Analyzer explicitly sets the analyzer to use. It defaults to use explicit
+// mapping config for the field, or, if not set, the default search analyzer.
+func (q *MatchPhrasePrefixQuery) Analyzer(analyzer string) *MatchPhrasePrefixQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+// Slop sets the phrase slop if evaluated to a phrase query type.
+func (q *MatchPhrasePrefixQuery) Slop(slop int) *MatchPhrasePrefixQuery {
+ q.slop = &slop
+ return q
+}
+
+// MaxExpansions sets the number of term expansions to use.
+func (q *MatchPhrasePrefixQuery) MaxExpansions(n int) *MatchPhrasePrefixQuery {
+ q.maxExpansions = &n
+ return q
+}
+
+// Boost sets the boost to apply to this query.
+func (q *MatchPhrasePrefixQuery) Boost(boost float64) *MatchPhrasePrefixQuery {
+ q.boost = &boost
+ return q
+}
+
+// QueryName sets the query name for the filter that can be used when
+// searching for matched filters per hit.
+func (q *MatchPhrasePrefixQuery) QueryName(queryName string) *MatchPhrasePrefixQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Source returns JSON for the function score query.
+func (q *MatchPhrasePrefixQuery) Source() (interface{}, error) {
+ // {"match_phrase_prefix":{"name":{"query":"value","max_expansions":10}}}
+ source := make(map[string]interface{})
+
+ match := make(map[string]interface{})
+ source["match_phrase_prefix"] = match
+
+ query := make(map[string]interface{})
+ match[q.name] = query
+
+ query["query"] = q.value
+
+ if q.analyzer != "" {
+ query["analyzer"] = q.analyzer
+ }
+ if q.slop != nil {
+ query["slop"] = *q.slop
+ }
+ if q.maxExpansions != nil {
+ query["max_expansions"] = *q.maxExpansions
+ }
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+ if q.queryName != "" {
+ query["_name"] = q.queryName
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_match_phrase_prefix_test.go b/vendor/github.com/olivere/elastic/search_queries_match_phrase_prefix_test.go
new file mode 100644
index 000000000..82a02f17d
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_match_phrase_prefix_test.go
@@ -0,0 +1,27 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestMatchPhrasePrefixQuery(t *testing.T) {
+ q := NewMatchPhrasePrefixQuery("message", "this is a test").Boost(0.3).MaxExpansions(5)
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"match_phrase_prefix":{"message":{"boost":0.3,"max_expansions":5,"query":"this is a test"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_match_phrase_test.go b/vendor/github.com/olivere/elastic/search_queries_match_phrase_test.go
new file mode 100644
index 000000000..85e60d8b5
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_match_phrase_test.go
@@ -0,0 +1,29 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestMatchPhraseQuery(t *testing.T) {
+ q := NewMatchPhraseQuery("message", "this is a test").
+ Analyzer("my_analyzer").
+ Boost(0.7)
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"match_phrase":{"message":{"analyzer":"my_analyzer","boost":0.7,"query":"this is a test"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_match_test.go b/vendor/github.com/olivere/elastic/search_queries_match_test.go
new file mode 100644
index 000000000..dd750cf93
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_match_test.go
@@ -0,0 +1,44 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestMatchQuery(t *testing.T) {
+ q := NewMatchQuery("message", "this is a test")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"match":{"message":{"query":"this is a test"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMatchQueryWithOptions(t *testing.T) {
+ q := NewMatchQuery("message", "this is a test").Analyzer("whitespace").Operator("or").Boost(2.5)
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"match":{"message":{"analyzer":"whitespace","boost":2.5,"operator":"or","query":"this is a test"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_more_like_this.go b/vendor/github.com/olivere/elastic/search_queries_more_like_this.go
new file mode 100644
index 000000000..5c71e291f
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_more_like_this.go
@@ -0,0 +1,412 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "errors"
+
+// MoreLikeThis query (MLT Query) finds documents that are "like" a given
+// set of documents. In order to do so, MLT selects a set of representative
+// terms of these input documents, forms a query using these terms, executes
+// the query and returns the results. The user controls the input documents,
+// how the terms should be selected and how the query is formed.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-mlt-query.html
+type MoreLikeThisQuery struct {
+ fields []string
+ docs []*MoreLikeThisQueryItem
+ unlikeDocs []*MoreLikeThisQueryItem
+ include *bool
+ minimumShouldMatch string
+ minTermFreq *int
+ maxQueryTerms *int
+ stopWords []string
+ minDocFreq *int
+ maxDocFreq *int
+ minWordLength *int
+ maxWordLength *int
+ boostTerms *float64
+ boost *float64
+ analyzer string
+ failOnUnsupportedField *bool
+ queryName string
+}
+
+// NewMoreLikeThisQuery creates and initializes a new MoreLikeThisQuery.
+func NewMoreLikeThisQuery() *MoreLikeThisQuery {
+ return &MoreLikeThisQuery{
+ fields: make([]string, 0),
+ stopWords: make([]string, 0),
+ docs: make([]*MoreLikeThisQueryItem, 0),
+ unlikeDocs: make([]*MoreLikeThisQueryItem, 0),
+ }
+}
+
+// Field adds one or more field names to the query.
+func (q *MoreLikeThisQuery) Field(fields ...string) *MoreLikeThisQuery {
+ q.fields = append(q.fields, fields...)
+ return q
+}
+
+// StopWord sets the stopwords. Any word in this set is considered
+// "uninteresting" and ignored. Even if your Analyzer allows stopwords,
+// you might want to tell the MoreLikeThis code to ignore them, as for
+// the purposes of document similarity it seems reasonable to assume that
+// "a stop word is never interesting".
+func (q *MoreLikeThisQuery) StopWord(stopWords ...string) *MoreLikeThisQuery {
+ q.stopWords = append(q.stopWords, stopWords...)
+ return q
+}
+
+// LikeText sets the text to use in order to find documents that are "like" this.
+func (q *MoreLikeThisQuery) LikeText(likeTexts ...string) *MoreLikeThisQuery {
+ for _, s := range likeTexts {
+ item := NewMoreLikeThisQueryItem().LikeText(s)
+ q.docs = append(q.docs, item)
+ }
+ return q
+}
+
+// LikeItems sets the documents to use in order to find documents that are "like" this.
+func (q *MoreLikeThisQuery) LikeItems(docs ...*MoreLikeThisQueryItem) *MoreLikeThisQuery {
+ q.docs = append(q.docs, docs...)
+ return q
+}
+
+// IgnoreLikeText sets the text from which the terms should not be selected from.
+func (q *MoreLikeThisQuery) IgnoreLikeText(ignoreLikeText ...string) *MoreLikeThisQuery {
+ for _, s := range ignoreLikeText {
+ item := NewMoreLikeThisQueryItem().LikeText(s)
+ q.unlikeDocs = append(q.unlikeDocs, item)
+ }
+ return q
+}
+
+// IgnoreLikeItems sets the documents from which the terms should not be selected from.
+func (q *MoreLikeThisQuery) IgnoreLikeItems(ignoreDocs ...*MoreLikeThisQueryItem) *MoreLikeThisQuery {
+ q.unlikeDocs = append(q.unlikeDocs, ignoreDocs...)
+ return q
+}
+
+// Ids sets the document ids to use in order to find documents that are "like" this.
+func (q *MoreLikeThisQuery) Ids(ids ...string) *MoreLikeThisQuery {
+ for _, id := range ids {
+ item := NewMoreLikeThisQueryItem().Id(id)
+ q.docs = append(q.docs, item)
+ }
+ return q
+}
+
+// Include specifies whether the input documents should also be included
+// in the results returned. Defaults to false.
+func (q *MoreLikeThisQuery) Include(include bool) *MoreLikeThisQuery {
+ q.include = &include
+ return q
+}
+
+// MinimumShouldMatch sets the number of terms that must match the generated
+// query expressed in the common syntax for minimum should match.
+// The default value is "30%".
+//
+// This used to be "PercentTermsToMatch" in Elasticsearch versions before 2.0.
+func (q *MoreLikeThisQuery) MinimumShouldMatch(minimumShouldMatch string) *MoreLikeThisQuery {
+ q.minimumShouldMatch = minimumShouldMatch
+ return q
+}
+
+// MinTermFreq is the frequency below which terms will be ignored in the
+// source doc. The default frequency is 2.
+func (q *MoreLikeThisQuery) MinTermFreq(minTermFreq int) *MoreLikeThisQuery {
+ q.minTermFreq = &minTermFreq
+ return q
+}
+
+// MaxQueryTerms sets the maximum number of query terms that will be included
+// in any generated query. It defaults to 25.
+func (q *MoreLikeThisQuery) MaxQueryTerms(maxQueryTerms int) *MoreLikeThisQuery {
+ q.maxQueryTerms = &maxQueryTerms
+ return q
+}
+
+// MinDocFreq sets the frequency at which words will be ignored which do
+// not occur in at least this many docs. The default is 5.
+func (q *MoreLikeThisQuery) MinDocFreq(minDocFreq int) *MoreLikeThisQuery {
+ q.minDocFreq = &minDocFreq
+ return q
+}
+
+// MaxDocFreq sets the maximum frequency for which words may still appear.
+// Words that appear in more than this many docs will be ignored.
+// It defaults to unbounded.
+func (q *MoreLikeThisQuery) MaxDocFreq(maxDocFreq int) *MoreLikeThisQuery {
+ q.maxDocFreq = &maxDocFreq
+ return q
+}
+
+// MinWordLength sets the minimum word length below which words will be
+// ignored. It defaults to 0.
+func (q *MoreLikeThisQuery) MinWordLength(minWordLength int) *MoreLikeThisQuery {
+ q.minWordLength = &minWordLength
+ return q
+}
+
+// MaxWordLength sets the maximum word length above which words will be ignored.
+// Defaults to unbounded (0).
+func (q *MoreLikeThisQuery) MaxWordLength(maxWordLength int) *MoreLikeThisQuery {
+ q.maxWordLength = &maxWordLength
+ return q
+}
+
+// BoostTerms sets the boost factor to use when boosting terms.
+// It defaults to 1.
+func (q *MoreLikeThisQuery) BoostTerms(boostTerms float64) *MoreLikeThisQuery {
+ q.boostTerms = &boostTerms
+ return q
+}
+
+// Analyzer specifies the analyzer that will be use to analyze the text.
+// Defaults to the analyzer associated with the field.
+func (q *MoreLikeThisQuery) Analyzer(analyzer string) *MoreLikeThisQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+// Boost sets the boost for this query.
+func (q *MoreLikeThisQuery) Boost(boost float64) *MoreLikeThisQuery {
+ q.boost = &boost
+ return q
+}
+
+// FailOnUnsupportedField indicates whether to fail or return no result
+// when this query is run against a field which is not supported such as
+// a binary/numeric field.
+func (q *MoreLikeThisQuery) FailOnUnsupportedField(fail bool) *MoreLikeThisQuery {
+ q.failOnUnsupportedField = &fail
+ return q
+}
+
+// QueryName sets the query name for the filter that can be used when
+// searching for matched_filters per hit.
+func (q *MoreLikeThisQuery) QueryName(queryName string) *MoreLikeThisQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Source creates the source for the MLT query.
+// It may return an error if the caller forgot to specify any documents to
+// be "liked" in the MoreLikeThisQuery.
+func (q *MoreLikeThisQuery) Source() (interface{}, error) {
+ // {
+ // "match_all" : { ... }
+ // }
+ if len(q.docs) == 0 {
+ return nil, errors.New(`more_like_this requires some documents to be "liked"`)
+ }
+
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+ source["more_like_this"] = params
+
+ if len(q.fields) > 0 {
+ params["fields"] = q.fields
+ }
+
+ var likes []interface{}
+ for _, doc := range q.docs {
+ src, err := doc.Source()
+ if err != nil {
+ return nil, err
+ }
+ likes = append(likes, src)
+ }
+ params["like"] = likes
+
+ if len(q.unlikeDocs) > 0 {
+ var dontLikes []interface{}
+ for _, doc := range q.unlikeDocs {
+ src, err := doc.Source()
+ if err != nil {
+ return nil, err
+ }
+ dontLikes = append(dontLikes, src)
+ }
+ params["unlike"] = dontLikes
+ }
+
+ if q.minimumShouldMatch != "" {
+ params["minimum_should_match"] = q.minimumShouldMatch
+ }
+ if q.minTermFreq != nil {
+ params["min_term_freq"] = *q.minTermFreq
+ }
+ if q.maxQueryTerms != nil {
+ params["max_query_terms"] = *q.maxQueryTerms
+ }
+ if len(q.stopWords) > 0 {
+ params["stop_words"] = q.stopWords
+ }
+ if q.minDocFreq != nil {
+ params["min_doc_freq"] = *q.minDocFreq
+ }
+ if q.maxDocFreq != nil {
+ params["max_doc_freq"] = *q.maxDocFreq
+ }
+ if q.minWordLength != nil {
+ params["min_word_length"] = *q.minWordLength
+ }
+ if q.maxWordLength != nil {
+ params["max_word_length"] = *q.maxWordLength
+ }
+ if q.boostTerms != nil {
+ params["boost_terms"] = *q.boostTerms
+ }
+ if q.boost != nil {
+ params["boost"] = *q.boost
+ }
+ if q.analyzer != "" {
+ params["analyzer"] = q.analyzer
+ }
+ if q.failOnUnsupportedField != nil {
+ params["fail_on_unsupported_field"] = *q.failOnUnsupportedField
+ }
+ if q.queryName != "" {
+ params["_name"] = q.queryName
+ }
+ if q.include != nil {
+ params["include"] = *q.include
+ }
+
+ return source, nil
+}
+
+// -- MoreLikeThisQueryItem --
+
+// MoreLikeThisQueryItem represents a single item of a MoreLikeThisQuery
+// to be "liked" or "unliked".
+type MoreLikeThisQueryItem struct {
+ likeText string
+
+ index string
+ typ string
+ id string
+ doc interface{}
+ fields []string
+ routing string
+ fsc *FetchSourceContext
+ version int64
+ versionType string
+}
+
+// NewMoreLikeThisQueryItem creates and initializes a MoreLikeThisQueryItem.
+func NewMoreLikeThisQueryItem() *MoreLikeThisQueryItem {
+ return &MoreLikeThisQueryItem{
+ version: -1,
+ }
+}
+
+// LikeText represents a text to be "liked".
+func (item *MoreLikeThisQueryItem) LikeText(likeText string) *MoreLikeThisQueryItem {
+ item.likeText = likeText
+ return item
+}
+
+// Index represents the index of the item.
+func (item *MoreLikeThisQueryItem) Index(index string) *MoreLikeThisQueryItem {
+ item.index = index
+ return item
+}
+
+// Type represents the document type of the item.
+func (item *MoreLikeThisQueryItem) Type(typ string) *MoreLikeThisQueryItem {
+ item.typ = typ
+ return item
+}
+
+// Id represents the document id of the item.
+func (item *MoreLikeThisQueryItem) Id(id string) *MoreLikeThisQueryItem {
+ item.id = id
+ return item
+}
+
+// Doc represents a raw document template for the item.
+func (item *MoreLikeThisQueryItem) Doc(doc interface{}) *MoreLikeThisQueryItem {
+ item.doc = doc
+ return item
+}
+
+// Fields represents the list of fields of the item.
+func (item *MoreLikeThisQueryItem) Fields(fields ...string) *MoreLikeThisQueryItem {
+ item.fields = append(item.fields, fields...)
+ return item
+}
+
+// Routing sets the routing associated with the item.
+func (item *MoreLikeThisQueryItem) Routing(routing string) *MoreLikeThisQueryItem {
+ item.routing = routing
+ return item
+}
+
+// FetchSourceContext represents the fetch source of the item which controls
+// if and how _source should be returned.
+func (item *MoreLikeThisQueryItem) FetchSourceContext(fsc *FetchSourceContext) *MoreLikeThisQueryItem {
+ item.fsc = fsc
+ return item
+}
+
+// Version specifies the version of the item.
+func (item *MoreLikeThisQueryItem) Version(version int64) *MoreLikeThisQueryItem {
+ item.version = version
+ return item
+}
+
+// VersionType represents the version type of the item.
+func (item *MoreLikeThisQueryItem) VersionType(versionType string) *MoreLikeThisQueryItem {
+ item.versionType = versionType
+ return item
+}
+
+// Source returns the JSON-serializable fragment of the entity.
+func (item *MoreLikeThisQueryItem) Source() (interface{}, error) {
+ if item.likeText != "" {
+ return item.likeText, nil
+ }
+
+ source := make(map[string]interface{})
+
+ if item.index != "" {
+ source["_index"] = item.index
+ }
+ if item.typ != "" {
+ source["_type"] = item.typ
+ }
+ if item.id != "" {
+ source["_id"] = item.id
+ }
+ if item.doc != nil {
+ source["doc"] = item.doc
+ }
+ if len(item.fields) > 0 {
+ source["fields"] = item.fields
+ }
+ if item.routing != "" {
+ source["_routing"] = item.routing
+ }
+ if item.fsc != nil {
+ src, err := item.fsc.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["_source"] = src
+ }
+ if item.version >= 0 {
+ source["_version"] = item.version
+ }
+ if item.versionType != "" {
+ source["_version_type"] = item.versionType
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_more_like_this_test.go b/vendor/github.com/olivere/elastic/search_queries_more_like_this_test.go
new file mode 100644
index 000000000..dcbbe74d1
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_more_like_this_test.go
@@ -0,0 +1,92 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+)
+
+func TestMoreLikeThisQuerySourceWithLikeText(t *testing.T) {
+ q := NewMoreLikeThisQuery().LikeText("Golang topic").Field("message")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := string(data)
+ expected := `{"more_like_this":{"fields":["message"],"like":["Golang topic"]}}`
+ if got != expected {
+ t.Fatalf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMoreLikeThisQuerySourceWithLikeAndUnlikeItems(t *testing.T) {
+ q := NewMoreLikeThisQuery()
+ q = q.LikeItems(
+ NewMoreLikeThisQueryItem().Id("1"),
+ NewMoreLikeThisQueryItem().Index(testIndexName2).Type("comment").Id("2").Routing("routing_id"),
+ )
+ q = q.IgnoreLikeItems(NewMoreLikeThisQueryItem().Id("3"))
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := string(data)
+ expected := `{"more_like_this":{"like":[{"_id":"1"},{"_id":"2","_index":"elastic-test2","_routing":"routing_id","_type":"comment"}],"unlike":[{"_id":"3"}]}}`
+ if got != expected {
+ t.Fatalf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMoreLikeThisQuery(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another Golang topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Common query
+ mltq := NewMoreLikeThisQuery().LikeText("Golang topic").Field("message")
+ res, err := client.Search().
+ Index(testIndexName).
+ Query(mltq).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_multi_match.go b/vendor/github.com/olivere/elastic/search_queries_multi_match.go
new file mode 100644
index 000000000..b6ff2107e
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_multi_match.go
@@ -0,0 +1,275 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+ "strings"
+)
+
+// MultiMatchQuery builds on the MatchQuery to allow multi-field queries.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-multi-match-query.html
+type MultiMatchQuery struct {
+ text interface{}
+ fields []string
+ fieldBoosts map[string]*float64
+ typ string // best_fields, boolean, most_fields, cross_fields, phrase, phrase_prefix
+ operator string // AND or OR
+ analyzer string
+ boost *float64
+ slop *int
+ fuzziness string
+ prefixLength *int
+ maxExpansions *int
+ minimumShouldMatch string
+ rewrite string
+ fuzzyRewrite string
+ tieBreaker *float64
+ lenient *bool
+ cutoffFrequency *float64
+ zeroTermsQuery string
+ queryName string
+}
+
+// MultiMatchQuery creates and initializes a new MultiMatchQuery.
+func NewMultiMatchQuery(text interface{}, fields ...string) *MultiMatchQuery {
+ q := &MultiMatchQuery{
+ text: text,
+ fields: make([]string, 0),
+ fieldBoosts: make(map[string]*float64),
+ }
+ q.fields = append(q.fields, fields...)
+ return q
+}
+
+// Field adds a field to run the multi match against.
+func (q *MultiMatchQuery) Field(field string) *MultiMatchQuery {
+ q.fields = append(q.fields, field)
+ return q
+}
+
+// FieldWithBoost adds a field to run the multi match against with a specific boost.
+func (q *MultiMatchQuery) FieldWithBoost(field string, boost float64) *MultiMatchQuery {
+ q.fields = append(q.fields, field)
+ q.fieldBoosts[field] = &boost
+ return q
+}
+
+// Type can be "best_fields", "boolean", "most_fields", "cross_fields",
+// "phrase", or "phrase_prefix".
+func (q *MultiMatchQuery) Type(typ string) *MultiMatchQuery {
+ var zero = float64(0.0)
+ var one = float64(1.0)
+
+ switch strings.ToLower(typ) {
+ default: // best_fields / boolean
+ q.typ = "best_fields"
+ q.tieBreaker = &zero
+ case "most_fields":
+ q.typ = "most_fields"
+ q.tieBreaker = &one
+ case "cross_fields":
+ q.typ = "cross_fields"
+ q.tieBreaker = &zero
+ case "phrase":
+ q.typ = "phrase"
+ q.tieBreaker = &zero
+ case "phrase_prefix":
+ q.typ = "phrase_prefix"
+ q.tieBreaker = &zero
+ }
+ return q
+}
+
+// Operator sets the operator to use when using boolean query.
+// It can be either AND or OR (default).
+func (q *MultiMatchQuery) Operator(operator string) *MultiMatchQuery {
+ q.operator = operator
+ return q
+}
+
+// Analyzer sets the analyzer to use explicitly. It defaults to use explicit
+// mapping config for the field, or, if not set, the default search analyzer.
+func (q *MultiMatchQuery) Analyzer(analyzer string) *MultiMatchQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+// Boost sets the boost for this query.
+func (q *MultiMatchQuery) Boost(boost float64) *MultiMatchQuery {
+ q.boost = &boost
+ return q
+}
+
+// Slop sets the phrase slop if evaluated to a phrase query type.
+func (q *MultiMatchQuery) Slop(slop int) *MultiMatchQuery {
+ q.slop = &slop
+ return q
+}
+
+// Fuzziness sets the fuzziness used when evaluated to a fuzzy query type.
+// It defaults to "AUTO".
+func (q *MultiMatchQuery) Fuzziness(fuzziness string) *MultiMatchQuery {
+ q.fuzziness = fuzziness
+ return q
+}
+
+// PrefixLength for the fuzzy process.
+func (q *MultiMatchQuery) PrefixLength(prefixLength int) *MultiMatchQuery {
+ q.prefixLength = &prefixLength
+ return q
+}
+
+// MaxExpansions is the number of term expansions to use when using fuzzy
+// or prefix type query. It defaults to unbounded so it's recommended
+// to set it to a reasonable value for faster execution.
+func (q *MultiMatchQuery) MaxExpansions(maxExpansions int) *MultiMatchQuery {
+ q.maxExpansions = &maxExpansions
+ return q
+}
+
+// MinimumShouldMatch represents the minimum number of optional should clauses
+// to match.
+func (q *MultiMatchQuery) MinimumShouldMatch(minimumShouldMatch string) *MultiMatchQuery {
+ q.minimumShouldMatch = minimumShouldMatch
+ return q
+}
+
+func (q *MultiMatchQuery) Rewrite(rewrite string) *MultiMatchQuery {
+ q.rewrite = rewrite
+ return q
+}
+
+func (q *MultiMatchQuery) FuzzyRewrite(fuzzyRewrite string) *MultiMatchQuery {
+ q.fuzzyRewrite = fuzzyRewrite
+ return q
+}
+
+// TieBreaker for "best-match" disjunction queries (OR queries).
+// The tie breaker capability allows documents that match more than one
+// query clause (in this case on more than one field) to be scored better
+// than documents that match only the best of the fields, without confusing
+// this with the better case of two distinct matches in the multiple fields.
+//
+// A tie-breaker value of 1.0 is interpreted as a signal to score queries as
+// "most-match" queries where all matching query clauses are considered for scoring.
+func (q *MultiMatchQuery) TieBreaker(tieBreaker float64) *MultiMatchQuery {
+ q.tieBreaker = &tieBreaker
+ return q
+}
+
+// Lenient indicates whether format based failures will be ignored.
+func (q *MultiMatchQuery) Lenient(lenient bool) *MultiMatchQuery {
+ q.lenient = &lenient
+ return q
+}
+
+// CutoffFrequency sets a cutoff value in [0..1] (or absolute number >=1)
+// representing the maximum threshold of a terms document frequency to be
+// considered a low frequency term.
+func (q *MultiMatchQuery) CutoffFrequency(cutoff float64) *MultiMatchQuery {
+ q.cutoffFrequency = &cutoff
+ return q
+}
+
+// ZeroTermsQuery can be "all" or "none".
+func (q *MultiMatchQuery) ZeroTermsQuery(zeroTermsQuery string) *MultiMatchQuery {
+ q.zeroTermsQuery = zeroTermsQuery
+ return q
+}
+
+// QueryName sets the query name for the filter that can be used when
+// searching for matched filters per hit.
+func (q *MultiMatchQuery) QueryName(queryName string) *MultiMatchQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Source returns JSON for the query.
+func (q *MultiMatchQuery) Source() (interface{}, error) {
+ //
+ // {
+ // "multi_match" : {
+ // "query" : "this is a test",
+ // "fields" : [ "subject", "message" ]
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ multiMatch := make(map[string]interface{})
+ source["multi_match"] = multiMatch
+
+ multiMatch["query"] = q.text
+
+ if len(q.fields) > 0 {
+ var fields []string
+ for _, field := range q.fields {
+ if boost, found := q.fieldBoosts[field]; found {
+ if boost != nil {
+ fields = append(fields, fmt.Sprintf("%s^%f", field, *boost))
+ } else {
+ fields = append(fields, field)
+ }
+ } else {
+ fields = append(fields, field)
+ }
+ }
+ multiMatch["fields"] = fields
+ }
+
+ if q.typ != "" {
+ multiMatch["type"] = q.typ
+ }
+
+ if q.operator != "" {
+ multiMatch["operator"] = q.operator
+ }
+ if q.analyzer != "" {
+ multiMatch["analyzer"] = q.analyzer
+ }
+ if q.boost != nil {
+ multiMatch["boost"] = *q.boost
+ }
+ if q.slop != nil {
+ multiMatch["slop"] = *q.slop
+ }
+ if q.fuzziness != "" {
+ multiMatch["fuzziness"] = q.fuzziness
+ }
+ if q.prefixLength != nil {
+ multiMatch["prefix_length"] = *q.prefixLength
+ }
+ if q.maxExpansions != nil {
+ multiMatch["max_expansions"] = *q.maxExpansions
+ }
+ if q.minimumShouldMatch != "" {
+ multiMatch["minimum_should_match"] = q.minimumShouldMatch
+ }
+ if q.rewrite != "" {
+ multiMatch["rewrite"] = q.rewrite
+ }
+ if q.fuzzyRewrite != "" {
+ multiMatch["fuzzy_rewrite"] = q.fuzzyRewrite
+ }
+ if q.tieBreaker != nil {
+ multiMatch["tie_breaker"] = *q.tieBreaker
+ }
+ if q.lenient != nil {
+ multiMatch["lenient"] = *q.lenient
+ }
+ if q.cutoffFrequency != nil {
+ multiMatch["cutoff_frequency"] = *q.cutoffFrequency
+ }
+ if q.zeroTermsQuery != "" {
+ multiMatch["zero_terms_query"] = q.zeroTermsQuery
+ }
+ if q.queryName != "" {
+ multiMatch["_name"] = q.queryName
+ }
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_multi_match_test.go b/vendor/github.com/olivere/elastic/search_queries_multi_match_test.go
new file mode 100644
index 000000000..d897f7e72
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_multi_match_test.go
@@ -0,0 +1,131 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestMultiMatchQuery(t *testing.T) {
+ q := NewMultiMatchQuery("this is a test", "subject", "message")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMultiMatchQueryBestFields(t *testing.T) {
+ q := NewMultiMatchQuery("this is a test", "subject", "message").Type("best_fields")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"best_fields"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMultiMatchQueryMostFields(t *testing.T) {
+ q := NewMultiMatchQuery("this is a test", "subject", "message").Type("most_fields")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":1,"type":"most_fields"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMultiMatchQueryCrossFields(t *testing.T) {
+ q := NewMultiMatchQuery("this is a test", "subject", "message").Type("cross_fields")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"cross_fields"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMultiMatchQueryPhrase(t *testing.T) {
+ q := NewMultiMatchQuery("this is a test", "subject", "message").Type("phrase")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"phrase"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMultiMatchQueryPhrasePrefix(t *testing.T) {
+ q := NewMultiMatchQuery("this is a test", "subject", "message").Type("phrase_prefix")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0,"type":"phrase_prefix"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestMultiMatchQueryBestFieldsWithCustomTieBreaker(t *testing.T) {
+ q := NewMultiMatchQuery("this is a test", "subject", "message").
+ Type("best_fields").
+ TieBreaker(0.3)
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"multi_match":{"fields":["subject","message"],"query":"this is a test","tie_breaker":0.3,"type":"best_fields"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_nested.go b/vendor/github.com/olivere/elastic/search_queries_nested.go
new file mode 100644
index 000000000..d0a342283
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_nested.go
@@ -0,0 +1,96 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// NestedQuery allows to query nested objects / docs.
+// The query is executed against the nested objects / docs as if they were
+// indexed as separate docs (they are, internally) and resulting in the
+// root parent doc (or parent nested mapping).
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-nested-query.html
+type NestedQuery struct {
+ query Query
+ path string
+ scoreMode string
+ boost *float64
+ queryName string
+ innerHit *InnerHit
+ ignoreUnmapped *bool
+}
+
+// NewNestedQuery creates and initializes a new NestedQuery.
+func NewNestedQuery(path string, query Query) *NestedQuery {
+ return &NestedQuery{path: path, query: query}
+}
+
+// ScoreMode specifies the score mode.
+func (q *NestedQuery) ScoreMode(scoreMode string) *NestedQuery {
+ q.scoreMode = scoreMode
+ return q
+}
+
+// Boost sets the boost for this query.
+func (q *NestedQuery) Boost(boost float64) *NestedQuery {
+ q.boost = &boost
+ return q
+}
+
+// QueryName sets the query name for the filter that can be used
+// when searching for matched_filters per hit
+func (q *NestedQuery) QueryName(queryName string) *NestedQuery {
+ q.queryName = queryName
+ return q
+}
+
+// InnerHit sets the inner hit definition in the scope of this nested query
+// and reusing the defined path and query.
+func (q *NestedQuery) InnerHit(innerHit *InnerHit) *NestedQuery {
+ q.innerHit = innerHit
+ return q
+}
+
+// IgnoreUnmapped sets the ignore_unmapped option for the filter that ignores
+// unmapped nested fields
+func (q *NestedQuery) IgnoreUnmapped(value bool) *NestedQuery {
+ q.ignoreUnmapped = &value
+ return q
+}
+
+// Source returns JSON for the query.
+func (q *NestedQuery) Source() (interface{}, error) {
+ query := make(map[string]interface{})
+ nq := make(map[string]interface{})
+ query["nested"] = nq
+
+ src, err := q.query.Source()
+ if err != nil {
+ return nil, err
+ }
+ nq["query"] = src
+
+ nq["path"] = q.path
+
+ if q.scoreMode != "" {
+ nq["score_mode"] = q.scoreMode
+ }
+ if q.boost != nil {
+ nq["boost"] = *q.boost
+ }
+ if q.queryName != "" {
+ nq["_name"] = q.queryName
+ }
+ if q.ignoreUnmapped != nil {
+ nq["ignore_unmapped"] = *q.ignoreUnmapped
+ }
+ if q.innerHit != nil {
+ src, err := q.innerHit.Source()
+ if err != nil {
+ return nil, err
+ }
+ nq["inner_hits"] = src
+ }
+ return query, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_nested_test.go b/vendor/github.com/olivere/elastic/search_queries_nested_test.go
new file mode 100644
index 000000000..c7a5322a6
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_nested_test.go
@@ -0,0 +1,86 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestNestedQuery(t *testing.T) {
+ bq := NewBoolQuery()
+ bq = bq.Must(NewTermQuery("obj1.name", "blue"))
+ bq = bq.Must(NewRangeQuery("obj1.count").Gt(5))
+ q := NewNestedQuery("obj1", bq).QueryName("qname")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"nested":{"_name":"qname","path":"obj1","query":{"bool":{"must":[{"term":{"obj1.name":"blue"}},{"range":{"obj1.count":{"from":5,"include_lower":false,"include_upper":true,"to":null}}}]}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestNestedQueryWithInnerHit(t *testing.T) {
+ bq := NewBoolQuery()
+ bq = bq.Must(NewTermQuery("obj1.name", "blue"))
+ bq = bq.Must(NewRangeQuery("obj1.count").Gt(5))
+ q := NewNestedQuery("obj1", bq)
+ q = q.QueryName("qname")
+ q = q.InnerHit(NewInnerHit().Name("comments").Query(NewTermQuery("user", "olivere")))
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"nested":{"_name":"qname","inner_hits":{"name":"comments","query":{"term":{"user":"olivere"}}},"path":"obj1","query":{"bool":{"must":[{"term":{"obj1.name":"blue"}},{"range":{"obj1.count":{"from":5,"include_lower":false,"include_upper":true,"to":null}}}]}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestNestedQueryWithIgnoreUnmapped(t *testing.T) {
+ var tests = []struct {
+ query *BoolQuery
+ expected string
+ }{
+ {
+ NewBoolQuery().Must(NewNestedQuery("path", NewTermQuery("test", "test"))),
+ `{"bool":{"must":{"nested":{"path":"path","query":{"term":{"test":"test"}}}}}}`,
+ },
+ {
+ NewBoolQuery().Must(NewNestedQuery("path", NewTermQuery("test", "test")).IgnoreUnmapped(true)),
+ `{"bool":{"must":{"nested":{"ignore_unmapped":true,"path":"path","query":{"term":{"test":"test"}}}}}}`,
+ },
+ {
+ NewBoolQuery().Must(NewNestedQuery("path", NewTermQuery("test", "test")).IgnoreUnmapped(false)),
+ `{"bool":{"must":{"nested":{"ignore_unmapped":false,"path":"path","query":{"term":{"test":"test"}}}}}}`,
+ },
+ }
+ for _, test := range tests {
+ src, err := test.query.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ if got != test.expected {
+ t.Errorf("expected\n%s\n,got:\n%s", test.expected, got)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_parent_id.go b/vendor/github.com/olivere/elastic/search_queries_parent_id.go
new file mode 100644
index 000000000..c0b610f12
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_parent_id.go
@@ -0,0 +1,99 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// ParentIdQuery can be used to find child documents which belong to a
+// particular parent. Given the following mapping definition.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-parent-id-query.html
+type ParentIdQuery struct {
+ typ string
+ id string
+ ignoreUnmapped *bool
+ boost *float64
+ queryName string
+ innerHit *InnerHit
+}
+
+// NewParentIdQuery creates and initializes a new parent_id query.
+func NewParentIdQuery(typ, id string) *ParentIdQuery {
+ return &ParentIdQuery{
+ typ: typ,
+ id: id,
+ }
+}
+
+// Type sets the parent type.
+func (q *ParentIdQuery) Type(typ string) *ParentIdQuery {
+ q.typ = typ
+ return q
+}
+
+// Id sets the id.
+func (q *ParentIdQuery) Id(id string) *ParentIdQuery {
+ q.id = id
+ return q
+}
+
+// IgnoreUnmapped specifies whether unmapped types should be ignored.
+// If set to false, the query failes when an unmapped type is found.
+func (q *ParentIdQuery) IgnoreUnmapped(ignore bool) *ParentIdQuery {
+ q.ignoreUnmapped = &ignore
+ return q
+}
+
+// Boost sets the boost for this query.
+func (q *ParentIdQuery) Boost(boost float64) *ParentIdQuery {
+ q.boost = &boost
+ return q
+}
+
+// QueryName specifies the query name for the filter that can be used when
+// searching for matched filters per hit.
+func (q *ParentIdQuery) QueryName(queryName string) *ParentIdQuery {
+ q.queryName = queryName
+ return q
+}
+
+// InnerHit sets the inner hit definition in the scope of this query and
+// reusing the defined type and query.
+func (q *ParentIdQuery) InnerHit(innerHit *InnerHit) *ParentIdQuery {
+ q.innerHit = innerHit
+ return q
+}
+
+// Source returns JSON for the parent_id query.
+func (q *ParentIdQuery) Source() (interface{}, error) {
+ // {
+ // "parent_id" : {
+ // "type" : "blog",
+ // "id" : "1"
+ // }
+ // }
+ source := make(map[string]interface{})
+ query := make(map[string]interface{})
+ source["parent_id"] = query
+
+ query["type"] = q.typ
+ query["id"] = q.id
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+ if q.ignoreUnmapped != nil {
+ query["ignore_unmapped"] = *q.ignoreUnmapped
+ }
+ if q.queryName != "" {
+ query["_name"] = q.queryName
+ }
+ if q.innerHit != nil {
+ src, err := q.innerHit.Source()
+ if err != nil {
+ return nil, err
+ }
+ query["inner_hits"] = src
+ }
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_parent_id_test.go b/vendor/github.com/olivere/elastic/search_queries_parent_id_test.go
new file mode 100644
index 000000000..0d18f216a
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_parent_id_test.go
@@ -0,0 +1,52 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestParentIdQueryTest(t *testing.T) {
+ tests := []struct {
+ Query Query
+ Expected string
+ }{
+ // #0
+ {
+ Query: NewParentIdQuery("blog_tag", "1"),
+ Expected: `{"parent_id":{"id":"1","type":"blog_tag"}}`,
+ },
+ // #1
+ {
+ Query: NewParentIdQuery("blog_tag", "1").IgnoreUnmapped(true),
+ Expected: `{"parent_id":{"id":"1","ignore_unmapped":true,"type":"blog_tag"}}`,
+ },
+ // #2
+ {
+ Query: NewParentIdQuery("blog_tag", "1").IgnoreUnmapped(false),
+ Expected: `{"parent_id":{"id":"1","ignore_unmapped":false,"type":"blog_tag"}}`,
+ },
+ // #3
+ {
+ Query: NewParentIdQuery("blog_tag", "1").IgnoreUnmapped(true).Boost(5).QueryName("my_parent_query"),
+ Expected: `{"parent_id":{"_name":"my_parent_query","boost":5,"id":"1","ignore_unmapped":true,"type":"blog_tag"}}`,
+ },
+ }
+
+ for i, tt := range tests {
+ src, err := tt.Query.Source()
+ if err != nil {
+ t.Fatalf("#%d: encoding Source failed: %v", i, err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("#%d: marshaling to JSON failed: %v", i, err)
+ }
+ if want, got := tt.Expected, string(data); want != got {
+ t.Fatalf("#%d: expected\n%s\ngot:\n%s", i, want, got)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_percolator.go b/vendor/github.com/olivere/elastic/search_queries_percolator.go
new file mode 100644
index 000000000..a7605655b
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_percolator.go
@@ -0,0 +1,115 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "errors"
+
+// PercolatorQuery can be used to match queries stored in an index.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-percolate-query.html
+type PercolatorQuery struct {
+ field string
+ documentType string // deprecated
+ document interface{}
+ indexedDocumentIndex string
+ indexedDocumentType string
+ indexedDocumentId string
+ indexedDocumentRouting string
+ indexedDocumentPreference string
+ indexedDocumentVersion *int64
+}
+
+// NewPercolatorQuery creates and initializes a new Percolator query.
+func NewPercolatorQuery() *PercolatorQuery {
+ return &PercolatorQuery{}
+}
+
+func (q *PercolatorQuery) Field(field string) *PercolatorQuery {
+ q.field = field
+ return q
+}
+
+// Deprecated: DocumentType is deprecated as of 6.0.
+func (q *PercolatorQuery) DocumentType(typ string) *PercolatorQuery {
+ q.documentType = typ
+ return q
+}
+
+func (q *PercolatorQuery) Document(doc interface{}) *PercolatorQuery {
+ q.document = doc
+ return q
+}
+
+func (q *PercolatorQuery) IndexedDocumentIndex(index string) *PercolatorQuery {
+ q.indexedDocumentIndex = index
+ return q
+}
+
+func (q *PercolatorQuery) IndexedDocumentType(typ string) *PercolatorQuery {
+ q.indexedDocumentType = typ
+ return q
+}
+
+func (q *PercolatorQuery) IndexedDocumentId(id string) *PercolatorQuery {
+ q.indexedDocumentId = id
+ return q
+}
+
+func (q *PercolatorQuery) IndexedDocumentRouting(routing string) *PercolatorQuery {
+ q.indexedDocumentRouting = routing
+ return q
+}
+
+func (q *PercolatorQuery) IndexedDocumentPreference(preference string) *PercolatorQuery {
+ q.indexedDocumentPreference = preference
+ return q
+}
+
+func (q *PercolatorQuery) IndexedDocumentVersion(version int64) *PercolatorQuery {
+ q.indexedDocumentVersion = &version
+ return q
+}
+
+// Source returns JSON for the percolate query.
+func (q *PercolatorQuery) Source() (interface{}, error) {
+ if len(q.field) == 0 {
+ return nil, errors.New("elastic: Field is required in PercolatorQuery")
+ }
+ if q.document == nil {
+ return nil, errors.New("elastic: Document is required in PercolatorQuery")
+ }
+
+ // {
+ // "percolate" : { ... }
+ // }
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["percolate"] = params
+ params["field"] = q.field
+ if q.documentType != "" {
+ params["document_type"] = q.documentType
+ }
+ params["document"] = q.document
+ if len(q.indexedDocumentIndex) > 0 {
+ params["index"] = q.indexedDocumentIndex
+ }
+ if len(q.indexedDocumentType) > 0 {
+ params["type"] = q.indexedDocumentType
+ }
+ if len(q.indexedDocumentId) > 0 {
+ params["id"] = q.indexedDocumentId
+ }
+ if len(q.indexedDocumentRouting) > 0 {
+ params["routing"] = q.indexedDocumentRouting
+ }
+ if len(q.indexedDocumentPreference) > 0 {
+ params["preference"] = q.indexedDocumentPreference
+ }
+ if q.indexedDocumentVersion != nil {
+ params["version"] = *q.indexedDocumentVersion
+ }
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_percolator_test.go b/vendor/github.com/olivere/elastic/search_queries_percolator_test.go
new file mode 100644
index 000000000..edc7be626
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_percolator_test.go
@@ -0,0 +1,65 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestPercolatorQuery(t *testing.T) {
+ q := NewPercolatorQuery().
+ Field("query").
+ Document(map[string]interface{}{
+ "message": "Some message",
+ })
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"percolate":{"document":{"message":"Some message"},"field":"query"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestPercolatorQueryWithDetails(t *testing.T) {
+ q := NewPercolatorQuery().
+ Field("query").
+ Document(map[string]interface{}{
+ "message": "Some message",
+ }).
+ IndexedDocumentIndex("index").
+ IndexedDocumentId("1").
+ IndexedDocumentRouting("route").
+ IndexedDocumentPreference("one").
+ IndexedDocumentVersion(1)
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"percolate":{"document":{"message":"Some message"},"field":"query","id":"1","index":"index","preference":"one","routing":"route","version":1}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestPercolatorQueryWithMissingFields(t *testing.T) {
+ q := NewPercolatorQuery() // no Field, Document, or Query
+ _, err := q.Source()
+ if err == nil {
+ t.Fatal("expected error, got nil")
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_prefix.go b/vendor/github.com/olivere/elastic/search_queries_prefix.go
new file mode 100644
index 000000000..075bcc7ba
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_prefix.go
@@ -0,0 +1,67 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// PrefixQuery matches documents that have fields containing terms
+// with a specified prefix (not analyzed).
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-prefix-query.html
+type PrefixQuery struct {
+ name string
+ prefix string
+ boost *float64
+ rewrite string
+ queryName string
+}
+
+// NewPrefixQuery creates and initializes a new PrefixQuery.
+func NewPrefixQuery(name string, prefix string) *PrefixQuery {
+ return &PrefixQuery{name: name, prefix: prefix}
+}
+
+// Boost sets the boost for this query.
+func (q *PrefixQuery) Boost(boost float64) *PrefixQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q *PrefixQuery) Rewrite(rewrite string) *PrefixQuery {
+ q.rewrite = rewrite
+ return q
+}
+
+// QueryName sets the query name for the filter that can be used when
+// searching for matched_filters per hit.
+func (q *PrefixQuery) QueryName(queryName string) *PrefixQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Source returns JSON for the query.
+func (q *PrefixQuery) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ query := make(map[string]interface{})
+ source["prefix"] = query
+
+ if q.boost == nil && q.rewrite == "" && q.queryName == "" {
+ query[q.name] = q.prefix
+ } else {
+ subQuery := make(map[string]interface{})
+ subQuery["value"] = q.prefix
+ if q.boost != nil {
+ subQuery["boost"] = *q.boost
+ }
+ if q.rewrite != "" {
+ subQuery["rewrite"] = q.rewrite
+ }
+ if q.queryName != "" {
+ subQuery["_name"] = q.queryName
+ }
+ query[q.name] = subQuery
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_prefix_example_test.go b/vendor/github.com/olivere/elastic/search_queries_prefix_example_test.go
new file mode 100644
index 000000000..73950f1f3
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_prefix_example_test.go
@@ -0,0 +1,35 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic_test
+
+import (
+ "context"
+
+ "github.com/olivere/elastic"
+)
+
+func ExamplePrefixQuery() {
+ // Get a client to the local Elasticsearch instance.
+ client, err := elastic.NewClient()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+
+ // Define wildcard query
+ q := elastic.NewPrefixQuery("user", "oli")
+ q = q.QueryName("my_query_name")
+
+ searchResult, err := client.Search().
+ Index("twitter").
+ Query(q).
+ Pretty(true).
+ Do(context.Background())
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ _ = searchResult
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_prefix_test.go b/vendor/github.com/olivere/elastic/search_queries_prefix_test.go
new file mode 100644
index 000000000..78d27b600
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_prefix_test.go
@@ -0,0 +1,45 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestPrefixQuery(t *testing.T) {
+ q := NewPrefixQuery("user", "ki")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"prefix":{"user":"ki"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestPrefixQueryWithOptions(t *testing.T) {
+ q := NewPrefixQuery("user", "ki")
+ q = q.QueryName("my_query_name")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"prefix":{"user":{"_name":"my_query_name","value":"ki"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_query_string.go b/vendor/github.com/olivere/elastic/search_queries_query_string.go
new file mode 100644
index 000000000..a52c8b1a5
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_query_string.go
@@ -0,0 +1,350 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+)
+
+// QueryStringQuery uses the query parser in order to parse its content.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-query-string-query.html
+type QueryStringQuery struct {
+ queryString string
+ defaultField string
+ defaultOperator string
+ analyzer string
+ quoteAnalyzer string
+ quoteFieldSuffix string
+ allowLeadingWildcard *bool
+ lowercaseExpandedTerms *bool // Deprecated: Decision is now made by the analyzer
+ enablePositionIncrements *bool
+ analyzeWildcard *bool
+ locale string // Deprecated: Decision is now made by the analyzer
+ boost *float64
+ fuzziness string
+ fuzzyPrefixLength *int
+ fuzzyMaxExpansions *int
+ fuzzyRewrite string
+ phraseSlop *int
+ fields []string
+ fieldBoosts map[string]*float64
+ tieBreaker *float64
+ rewrite string
+ minimumShouldMatch string
+ lenient *bool
+ queryName string
+ timeZone string
+ maxDeterminizedStates *int
+ escape *bool
+ typ string
+}
+
+// NewQueryStringQuery creates and initializes a new QueryStringQuery.
+func NewQueryStringQuery(queryString string) *QueryStringQuery {
+ return &QueryStringQuery{
+ queryString: queryString,
+ fields: make([]string, 0),
+ fieldBoosts: make(map[string]*float64),
+ }
+}
+
+// DefaultField specifies the field to run against when no prefix field
+// is specified. Only relevant when not explicitly adding fields the query
+// string will run against.
+func (q *QueryStringQuery) DefaultField(defaultField string) *QueryStringQuery {
+ q.defaultField = defaultField
+ return q
+}
+
+// Field adds a field to run the query string against.
+func (q *QueryStringQuery) Field(field string) *QueryStringQuery {
+ q.fields = append(q.fields, field)
+ return q
+}
+
+// Type sets how multiple fields should be combined to build textual part queries,
+// e.g. "best_fields".
+func (q *QueryStringQuery) Type(typ string) *QueryStringQuery {
+ q.typ = typ
+ return q
+}
+
+// FieldWithBoost adds a field to run the query string against with a specific boost.
+func (q *QueryStringQuery) FieldWithBoost(field string, boost float64) *QueryStringQuery {
+ q.fields = append(q.fields, field)
+ q.fieldBoosts[field] = &boost
+ return q
+}
+
+// TieBreaker is used when more than one field is used with the query string,
+// and combined queries are using dismax.
+func (q *QueryStringQuery) TieBreaker(tieBreaker float64) *QueryStringQuery {
+ q.tieBreaker = &tieBreaker
+ return q
+}
+
+// DefaultOperator sets the boolean operator of the query parser used to
+// parse the query string.
+//
+// In default mode (OR) terms without any modifiers
+// are considered optional, e.g. "capital of Hungary" is equal to
+// "capital OR of OR Hungary".
+//
+// In AND mode, terms are considered to be in conjunction. The above mentioned
+// query is then parsed as "capital AND of AND Hungary".
+func (q *QueryStringQuery) DefaultOperator(operator string) *QueryStringQuery {
+ q.defaultOperator = operator
+ return q
+}
+
+// Analyzer is an optional analyzer used to analyze the query string.
+// Note, if a field has search analyzer defined for it, then it will be used
+// automatically. Defaults to the smart search analyzer.
+func (q *QueryStringQuery) Analyzer(analyzer string) *QueryStringQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+// QuoteAnalyzer is an optional analyzer to be used to analyze the query string
+// for phrase searches. Note, if a field has search analyzer defined for it,
+// then it will be used automatically. Defaults to the smart search analyzer.
+func (q *QueryStringQuery) QuoteAnalyzer(quoteAnalyzer string) *QueryStringQuery {
+ q.quoteAnalyzer = quoteAnalyzer
+ return q
+}
+
+// MaxDeterminizedState protects against too-difficult regular expression queries.
+func (q *QueryStringQuery) MaxDeterminizedState(maxDeterminizedStates int) *QueryStringQuery {
+ q.maxDeterminizedStates = &maxDeterminizedStates
+ return q
+}
+
+// AllowLeadingWildcard specifies whether leading wildcards should be allowed
+// or not (defaults to true).
+func (q *QueryStringQuery) AllowLeadingWildcard(allowLeadingWildcard bool) *QueryStringQuery {
+ q.allowLeadingWildcard = &allowLeadingWildcard
+ return q
+}
+
+// LowercaseExpandedTerms indicates whether terms of wildcard, prefix, fuzzy
+// and range queries are automatically lower-cased or not. Default is true.
+//
+// Deprecated: Decision is now made by the analyzer.
+func (q *QueryStringQuery) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *QueryStringQuery {
+ q.lowercaseExpandedTerms = &lowercaseExpandedTerms
+ return q
+}
+
+// EnablePositionIncrements indicates whether to enable position increments
+// in result query. Defaults to true.
+//
+// When set, result phrase and multi-phrase queries will be aware of position
+// increments. Useful when e.g. a StopFilter increases the position increment
+// of the token that follows an omitted token.
+func (q *QueryStringQuery) EnablePositionIncrements(enablePositionIncrements bool) *QueryStringQuery {
+ q.enablePositionIncrements = &enablePositionIncrements
+ return q
+}
+
+// Fuzziness sets the edit distance for fuzzy queries. Default is "AUTO".
+func (q *QueryStringQuery) Fuzziness(fuzziness string) *QueryStringQuery {
+ q.fuzziness = fuzziness
+ return q
+}
+
+// FuzzyPrefixLength sets the minimum prefix length for fuzzy queries.
+// Default is 1.
+func (q *QueryStringQuery) FuzzyPrefixLength(fuzzyPrefixLength int) *QueryStringQuery {
+ q.fuzzyPrefixLength = &fuzzyPrefixLength
+ return q
+}
+
+func (q *QueryStringQuery) FuzzyMaxExpansions(fuzzyMaxExpansions int) *QueryStringQuery {
+ q.fuzzyMaxExpansions = &fuzzyMaxExpansions
+ return q
+}
+
+func (q *QueryStringQuery) FuzzyRewrite(fuzzyRewrite string) *QueryStringQuery {
+ q.fuzzyRewrite = fuzzyRewrite
+ return q
+}
+
+// PhraseSlop sets the default slop for phrases. If zero, then exact matches
+// are required. Default value is zero.
+func (q *QueryStringQuery) PhraseSlop(phraseSlop int) *QueryStringQuery {
+ q.phraseSlop = &phraseSlop
+ return q
+}
+
+// AnalyzeWildcard indicates whether to enabled analysis on wildcard and prefix queries.
+func (q *QueryStringQuery) AnalyzeWildcard(analyzeWildcard bool) *QueryStringQuery {
+ q.analyzeWildcard = &analyzeWildcard
+ return q
+}
+
+func (q *QueryStringQuery) Rewrite(rewrite string) *QueryStringQuery {
+ q.rewrite = rewrite
+ return q
+}
+
+func (q *QueryStringQuery) MinimumShouldMatch(minimumShouldMatch string) *QueryStringQuery {
+ q.minimumShouldMatch = minimumShouldMatch
+ return q
+}
+
+// Boost sets the boost for this query.
+func (q *QueryStringQuery) Boost(boost float64) *QueryStringQuery {
+ q.boost = &boost
+ return q
+}
+
+// QuoteFieldSuffix is an optional field name suffix to automatically
+// try and add to the field searched when using quoted text.
+func (q *QueryStringQuery) QuoteFieldSuffix(quoteFieldSuffix string) *QueryStringQuery {
+ q.quoteFieldSuffix = quoteFieldSuffix
+ return q
+}
+
+// Lenient indicates whether the query string parser should be lenient
+// when parsing field values. It defaults to the index setting and if not
+// set, defaults to false.
+func (q *QueryStringQuery) Lenient(lenient bool) *QueryStringQuery {
+ q.lenient = &lenient
+ return q
+}
+
+// QueryName sets the query name for the filter that can be used when
+// searching for matched_filters per hit.
+func (q *QueryStringQuery) QueryName(queryName string) *QueryStringQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Locale specifies the locale to be used for string conversions.
+//
+// Deprecated: Decision is now made by the analyzer.
+func (q *QueryStringQuery) Locale(locale string) *QueryStringQuery {
+ q.locale = locale
+ return q
+}
+
+// TimeZone can be used to automatically adjust to/from fields using a
+// timezone. Only used with date fields, of course.
+func (q *QueryStringQuery) TimeZone(timeZone string) *QueryStringQuery {
+ q.timeZone = timeZone
+ return q
+}
+
+// Escape performs escaping of the query string.
+func (q *QueryStringQuery) Escape(escape bool) *QueryStringQuery {
+ q.escape = &escape
+ return q
+}
+
+// Source returns JSON for the query.
+func (q *QueryStringQuery) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ query := make(map[string]interface{})
+ source["query_string"] = query
+
+ query["query"] = q.queryString
+
+ if q.defaultField != "" {
+ query["default_field"] = q.defaultField
+ }
+
+ if len(q.fields) > 0 {
+ var fields []string
+ for _, field := range q.fields {
+ if boost, found := q.fieldBoosts[field]; found {
+ if boost != nil {
+ fields = append(fields, fmt.Sprintf("%s^%f", field, *boost))
+ } else {
+ fields = append(fields, field)
+ }
+ } else {
+ fields = append(fields, field)
+ }
+ }
+ query["fields"] = fields
+ }
+
+ if q.tieBreaker != nil {
+ query["tie_breaker"] = *q.tieBreaker
+ }
+ if q.defaultOperator != "" {
+ query["default_operator"] = q.defaultOperator
+ }
+ if q.analyzer != "" {
+ query["analyzer"] = q.analyzer
+ }
+ if q.quoteAnalyzer != "" {
+ query["quote_analyzer"] = q.quoteAnalyzer
+ }
+ if q.maxDeterminizedStates != nil {
+ query["max_determinized_states"] = *q.maxDeterminizedStates
+ }
+ if q.allowLeadingWildcard != nil {
+ query["allow_leading_wildcard"] = *q.allowLeadingWildcard
+ }
+ if q.lowercaseExpandedTerms != nil {
+ query["lowercase_expanded_terms"] = *q.lowercaseExpandedTerms
+ }
+ if q.enablePositionIncrements != nil {
+ query["enable_position_increments"] = *q.enablePositionIncrements
+ }
+ if q.fuzziness != "" {
+ query["fuzziness"] = q.fuzziness
+ }
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+ if q.fuzzyPrefixLength != nil {
+ query["fuzzy_prefix_length"] = *q.fuzzyPrefixLength
+ }
+ if q.fuzzyMaxExpansions != nil {
+ query["fuzzy_max_expansions"] = *q.fuzzyMaxExpansions
+ }
+ if q.fuzzyRewrite != "" {
+ query["fuzzy_rewrite"] = q.fuzzyRewrite
+ }
+ if q.phraseSlop != nil {
+ query["phrase_slop"] = *q.phraseSlop
+ }
+ if q.analyzeWildcard != nil {
+ query["analyze_wildcard"] = *q.analyzeWildcard
+ }
+ if q.rewrite != "" {
+ query["rewrite"] = q.rewrite
+ }
+ if q.minimumShouldMatch != "" {
+ query["minimum_should_match"] = q.minimumShouldMatch
+ }
+ if q.quoteFieldSuffix != "" {
+ query["quote_field_suffix"] = q.quoteFieldSuffix
+ }
+ if q.lenient != nil {
+ query["lenient"] = *q.lenient
+ }
+ if q.queryName != "" {
+ query["_name"] = q.queryName
+ }
+ if q.locale != "" {
+ query["locale"] = q.locale
+ }
+ if q.timeZone != "" {
+ query["time_zone"] = q.timeZone
+ }
+ if q.escape != nil {
+ query["escape"] = *q.escape
+ }
+ if q.typ != "" {
+ query["type"] = q.typ
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_query_string_test.go b/vendor/github.com/olivere/elastic/search_queries_query_string_test.go
new file mode 100644
index 000000000..5030c3382
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_query_string_test.go
@@ -0,0 +1,46 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestQueryStringQuery(t *testing.T) {
+ q := NewQueryStringQuery(`this AND that OR thus`)
+ q = q.DefaultField("content")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"query_string":{"default_field":"content","query":"this AND that OR thus"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestQueryStringQueryTimeZone(t *testing.T) {
+ q := NewQueryStringQuery(`tweet_date:[2015-01-01 TO 2017-12-31]`)
+ q = q.TimeZone("Europe/Berlin")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"query_string":{"query":"tweet_date:[2015-01-01 TO 2017-12-31]","time_zone":"Europe/Berlin"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_range.go b/vendor/github.com/olivere/elastic/search_queries_range.go
new file mode 100644
index 000000000..1b92dee23
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_range.go
@@ -0,0 +1,144 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// RangeQuery matches documents with fields that have terms within a certain range.
+//
+// For details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-range-query.html
+type RangeQuery struct {
+ name string
+ from interface{}
+ to interface{}
+ timeZone string
+ includeLower bool
+ includeUpper bool
+ boost *float64
+ queryName string
+ format string
+}
+
+// NewRangeQuery creates and initializes a new RangeQuery.
+func NewRangeQuery(name string) *RangeQuery {
+ return &RangeQuery{name: name, includeLower: true, includeUpper: true}
+}
+
+// From indicates the from part of the RangeQuery.
+// Use nil to indicate an unbounded from part.
+func (q *RangeQuery) From(from interface{}) *RangeQuery {
+ q.from = from
+ return q
+}
+
+// Gt indicates a greater-than value for the from part.
+// Use nil to indicate an unbounded from part.
+func (q *RangeQuery) Gt(from interface{}) *RangeQuery {
+ q.from = from
+ q.includeLower = false
+ return q
+}
+
+// Gte indicates a greater-than-or-equal value for the from part.
+// Use nil to indicate an unbounded from part.
+func (q *RangeQuery) Gte(from interface{}) *RangeQuery {
+ q.from = from
+ q.includeLower = true
+ return q
+}
+
+// To indicates the to part of the RangeQuery.
+// Use nil to indicate an unbounded to part.
+func (q *RangeQuery) To(to interface{}) *RangeQuery {
+ q.to = to
+ return q
+}
+
+// Lt indicates a less-than value for the to part.
+// Use nil to indicate an unbounded to part.
+func (q *RangeQuery) Lt(to interface{}) *RangeQuery {
+ q.to = to
+ q.includeUpper = false
+ return q
+}
+
+// Lte indicates a less-than-or-equal value for the to part.
+// Use nil to indicate an unbounded to part.
+func (q *RangeQuery) Lte(to interface{}) *RangeQuery {
+ q.to = to
+ q.includeUpper = true
+ return q
+}
+
+// IncludeLower indicates whether the lower bound should be included or not.
+// Defaults to true.
+func (q *RangeQuery) IncludeLower(includeLower bool) *RangeQuery {
+ q.includeLower = includeLower
+ return q
+}
+
+// IncludeUpper indicates whether the upper bound should be included or not.
+// Defaults to true.
+func (q *RangeQuery) IncludeUpper(includeUpper bool) *RangeQuery {
+ q.includeUpper = includeUpper
+ return q
+}
+
+// Boost sets the boost for this query.
+func (q *RangeQuery) Boost(boost float64) *RangeQuery {
+ q.boost = &boost
+ return q
+}
+
+// QueryName sets the query name for the filter that can be used when
+// searching for matched_filters per hit.
+func (q *RangeQuery) QueryName(queryName string) *RangeQuery {
+ q.queryName = queryName
+ return q
+}
+
+// TimeZone is used for date fields. In that case, we can adjust the
+// from/to fields using a timezone.
+func (q *RangeQuery) TimeZone(timeZone string) *RangeQuery {
+ q.timeZone = timeZone
+ return q
+}
+
+// Format is used for date fields. In that case, we can set the format
+// to be used instead of the mapper format.
+func (q *RangeQuery) Format(format string) *RangeQuery {
+ q.format = format
+ return q
+}
+
+// Source returns JSON for the query.
+func (q *RangeQuery) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+
+ rangeQ := make(map[string]interface{})
+ source["range"] = rangeQ
+
+ params := make(map[string]interface{})
+ rangeQ[q.name] = params
+
+ params["from"] = q.from
+ params["to"] = q.to
+ if q.timeZone != "" {
+ params["time_zone"] = q.timeZone
+ }
+ if q.format != "" {
+ params["format"] = q.format
+ }
+ if q.boost != nil {
+ params["boost"] = *q.boost
+ }
+ params["include_lower"] = q.includeLower
+ params["include_upper"] = q.includeUpper
+
+ if q.queryName != "" {
+ rangeQ["_name"] = q.queryName
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_range_test.go b/vendor/github.com/olivere/elastic/search_queries_range_test.go
new file mode 100644
index 000000000..86d018a86
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_range_test.go
@@ -0,0 +1,68 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestRangeQuery(t *testing.T) {
+ q := NewRangeQuery("postDate").From("2010-03-01").To("2010-04-01").Boost(3)
+ q = q.QueryName("my_query")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"_name":"my_query","postDate":{"boost":3,"from":"2010-03-01","include_lower":true,"include_upper":true,"to":"2010-04-01"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestRangeQueryWithTimeZone(t *testing.T) {
+ q := NewRangeQuery("born").
+ Gte("2012-01-01").
+ Lte("now").
+ TimeZone("+1:00")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"born":{"from":"2012-01-01","include_lower":true,"include_upper":true,"time_zone":"+1:00","to":"now"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestRangeQueryWithFormat(t *testing.T) {
+ q := NewRangeQuery("born").
+ Gte("2012/01/01").
+ Lte("now").
+ Format("yyyy/MM/dd")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"range":{"born":{"format":"yyyy/MM/dd","from":"2012/01/01","include_lower":true,"include_upper":true,"to":"now"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_raw_string.go b/vendor/github.com/olivere/elastic/search_queries_raw_string.go
new file mode 100644
index 000000000..3f9685c41
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_raw_string.go
@@ -0,0 +1,26 @@
+// Copyright 2012-present Oliver Eilhard, John Stanford. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "encoding/json"
+
+// RawStringQuery can be used to treat a string representation of an ES query
+// as a Query. Example usage:
+// q := RawStringQuery("{\"match_all\":{}}")
+// db.Search().Query(q).From(1).Size(100).Do()
+type RawStringQuery string
+
+// NewRawStringQuery ininitializes a new RawStringQuery.
+// It is the same as RawStringQuery(q).
+func NewRawStringQuery(q string) RawStringQuery {
+ return RawStringQuery(q)
+}
+
+// Source returns the JSON encoded body
+func (q RawStringQuery) Source() (interface{}, error) {
+ var f interface{}
+ err := json.Unmarshal([]byte(q), &f)
+ return f, err
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_raw_string_test.go b/vendor/github.com/olivere/elastic/search_queries_raw_string_test.go
new file mode 100644
index 000000000..5bb3dac41
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_raw_string_test.go
@@ -0,0 +1,44 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestRawStringQuery(t *testing.T) {
+ q := RawStringQuery(`{"match_all":{}}`)
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"match_all":{}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestNewRawStringQuery(t *testing.T) {
+ q := NewRawStringQuery(`{"match_all":{}}`)
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"match_all":{}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_regexp.go b/vendor/github.com/olivere/elastic/search_queries_regexp.go
new file mode 100644
index 000000000..a08b533cb
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_regexp.go
@@ -0,0 +1,82 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// RegexpQuery allows you to use regular expression term queries.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-regexp-query.html
+type RegexpQuery struct {
+ name string
+ regexp string
+ flags string
+ boost *float64
+ rewrite string
+ queryName string
+ maxDeterminizedStates *int
+}
+
+// NewRegexpQuery creates and initializes a new RegexpQuery.
+func NewRegexpQuery(name string, regexp string) *RegexpQuery {
+ return &RegexpQuery{name: name, regexp: regexp}
+}
+
+// Flags sets the regexp flags.
+func (q *RegexpQuery) Flags(flags string) *RegexpQuery {
+ q.flags = flags
+ return q
+}
+
+// MaxDeterminizedStates protects against complex regular expressions.
+func (q *RegexpQuery) MaxDeterminizedStates(maxDeterminizedStates int) *RegexpQuery {
+ q.maxDeterminizedStates = &maxDeterminizedStates
+ return q
+}
+
+// Boost sets the boost for this query.
+func (q *RegexpQuery) Boost(boost float64) *RegexpQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q *RegexpQuery) Rewrite(rewrite string) *RegexpQuery {
+ q.rewrite = rewrite
+ return q
+}
+
+// QueryName sets the query name for the filter that can be used
+// when searching for matched_filters per hit
+func (q *RegexpQuery) QueryName(queryName string) *RegexpQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Source returns the JSON-serializable query data.
+func (q *RegexpQuery) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ query := make(map[string]interface{})
+ source["regexp"] = query
+
+ x := make(map[string]interface{})
+ x["value"] = q.regexp
+ if q.flags != "" {
+ x["flags"] = q.flags
+ }
+ if q.maxDeterminizedStates != nil {
+ x["max_determinized_states"] = *q.maxDeterminizedStates
+ }
+ if q.boost != nil {
+ x["boost"] = *q.boost
+ }
+ if q.rewrite != "" {
+ x["rewrite"] = q.rewrite
+ }
+ if q.queryName != "" {
+ x["name"] = q.queryName
+ }
+ query[q.name] = x
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_regexp_test.go b/vendor/github.com/olivere/elastic/search_queries_regexp_test.go
new file mode 100644
index 000000000..d30c0a36d
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_regexp_test.go
@@ -0,0 +1,47 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestRegexpQuery(t *testing.T) {
+ q := NewRegexpQuery("name.first", "s.*y")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"regexp":{"name.first":{"value":"s.*y"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestRegexpQueryWithOptions(t *testing.T) {
+ q := NewRegexpQuery("name.first", "s.*y").
+ Boost(1.2).
+ Flags("INTERSECTION|COMPLEMENT|EMPTY").
+ QueryName("my_query_name")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"regexp":{"name.first":{"boost":1.2,"flags":"INTERSECTION|COMPLEMENT|EMPTY","name":"my_query_name","value":"s.*y"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_script.go b/vendor/github.com/olivere/elastic/search_queries_script.go
new file mode 100644
index 000000000..d430f4c8f
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_script.go
@@ -0,0 +1,51 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "errors"
+
+// ScriptQuery allows to define scripts as filters.
+//
+// For details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-script-query.html
+type ScriptQuery struct {
+ script *Script
+ queryName string
+}
+
+// NewScriptQuery creates and initializes a new ScriptQuery.
+func NewScriptQuery(script *Script) *ScriptQuery {
+ return &ScriptQuery{
+ script: script,
+ }
+}
+
+// QueryName sets the query name for the filter that can be used
+// when searching for matched_filters per hit
+func (q *ScriptQuery) QueryName(queryName string) *ScriptQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Source returns JSON for the query.
+func (q *ScriptQuery) Source() (interface{}, error) {
+ if q.script == nil {
+ return nil, errors.New("ScriptQuery expected a script")
+ }
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["script"] = params
+
+ src, err := q.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ params["script"] = src
+
+ if q.queryName != "" {
+ params["_name"] = q.queryName
+ }
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_script_test.go b/vendor/github.com/olivere/elastic/search_queries_script_test.go
new file mode 100644
index 000000000..66ec106d5
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_script_test.go
@@ -0,0 +1,45 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestScriptQuery(t *testing.T) {
+ q := NewScriptQuery(NewScript("doc['num1'.value > 1"))
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"script":{"script":{"source":"doc['num1'.value \u003e 1"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestScriptQueryWithParams(t *testing.T) {
+ q := NewScriptQuery(NewScript("doc['num1'.value > 1"))
+ q = q.QueryName("MyQueryName")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"script":{"_name":"MyQueryName","script":{"source":"doc['num1'.value \u003e 1"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_simple_query_string.go b/vendor/github.com/olivere/elastic/search_queries_simple_query_string.go
new file mode 100644
index 000000000..462ea5533
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_simple_query_string.go
@@ -0,0 +1,185 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+ "strings"
+)
+
+// SimpleQueryStringQuery is a query that uses the SimpleQueryParser
+// to parse its context. Unlike the regular query_string query,
+// the simple_query_string query will never throw an exception,
+// and discards invalid parts of the query.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-simple-query-string-query.html
+type SimpleQueryStringQuery struct {
+ queryText string
+ analyzer string
+ operator string
+ fields []string
+ fieldBoosts map[string]*float64
+ minimumShouldMatch string
+ flags string
+ boost *float64
+ lowercaseExpandedTerms *bool
+ lenient *bool
+ analyzeWildcard *bool
+ locale string
+ queryName string
+}
+
+// NewSimpleQueryStringQuery creates and initializes a new SimpleQueryStringQuery.
+func NewSimpleQueryStringQuery(text string) *SimpleQueryStringQuery {
+ return &SimpleQueryStringQuery{
+ queryText: text,
+ fields: make([]string, 0),
+ fieldBoosts: make(map[string]*float64),
+ }
+}
+
+// Field adds a field to run the query against.
+func (q *SimpleQueryStringQuery) Field(field string) *SimpleQueryStringQuery {
+ q.fields = append(q.fields, field)
+ return q
+}
+
+// Field adds a field to run the query against with a specific boost.
+func (q *SimpleQueryStringQuery) FieldWithBoost(field string, boost float64) *SimpleQueryStringQuery {
+ q.fields = append(q.fields, field)
+ q.fieldBoosts[field] = &boost
+ return q
+}
+
+// Boost sets the boost for this query.
+func (q *SimpleQueryStringQuery) Boost(boost float64) *SimpleQueryStringQuery {
+ q.boost = &boost
+ return q
+}
+
+// QueryName sets the query name for the filter that can be used when
+// searching for matched_filters per hit.
+func (q *SimpleQueryStringQuery) QueryName(queryName string) *SimpleQueryStringQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Analyzer specifies the analyzer to use for the query.
+func (q *SimpleQueryStringQuery) Analyzer(analyzer string) *SimpleQueryStringQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+// DefaultOperator specifies the default operator for the query.
+func (q *SimpleQueryStringQuery) DefaultOperator(defaultOperator string) *SimpleQueryStringQuery {
+ q.operator = defaultOperator
+ return q
+}
+
+// Flags sets the flags for the query.
+func (q *SimpleQueryStringQuery) Flags(flags string) *SimpleQueryStringQuery {
+ q.flags = flags
+ return q
+}
+
+// LowercaseExpandedTerms indicates whether terms of wildcard, prefix, fuzzy
+// and range queries are automatically lower-cased or not. Default is true.
+func (q *SimpleQueryStringQuery) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *SimpleQueryStringQuery {
+ q.lowercaseExpandedTerms = &lowercaseExpandedTerms
+ return q
+}
+
+func (q *SimpleQueryStringQuery) Locale(locale string) *SimpleQueryStringQuery {
+ q.locale = locale
+ return q
+}
+
+// Lenient indicates whether the query string parser should be lenient
+// when parsing field values. It defaults to the index setting and if not
+// set, defaults to false.
+func (q *SimpleQueryStringQuery) Lenient(lenient bool) *SimpleQueryStringQuery {
+ q.lenient = &lenient
+ return q
+}
+
+// AnalyzeWildcard indicates whether to enabled analysis on wildcard and prefix queries.
+func (q *SimpleQueryStringQuery) AnalyzeWildcard(analyzeWildcard bool) *SimpleQueryStringQuery {
+ q.analyzeWildcard = &analyzeWildcard
+ return q
+}
+
+func (q *SimpleQueryStringQuery) MinimumShouldMatch(minimumShouldMatch string) *SimpleQueryStringQuery {
+ q.minimumShouldMatch = minimumShouldMatch
+ return q
+}
+
+// Source returns JSON for the query.
+func (q *SimpleQueryStringQuery) Source() (interface{}, error) {
+ // {
+ // "simple_query_string" : {
+ // "query" : "\"fried eggs\" +(eggplant | potato) -frittata",
+ // "analyzer" : "snowball",
+ // "fields" : ["body^5","_all"],
+ // "default_operator" : "and"
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ query := make(map[string]interface{})
+ source["simple_query_string"] = query
+
+ query["query"] = q.queryText
+
+ if len(q.fields) > 0 {
+ var fields []string
+ for _, field := range q.fields {
+ if boost, found := q.fieldBoosts[field]; found {
+ if boost != nil {
+ fields = append(fields, fmt.Sprintf("%s^%f", field, *boost))
+ } else {
+ fields = append(fields, field)
+ }
+ } else {
+ fields = append(fields, field)
+ }
+ }
+ query["fields"] = fields
+ }
+
+ if q.flags != "" {
+ query["flags"] = q.flags
+ }
+ if q.analyzer != "" {
+ query["analyzer"] = q.analyzer
+ }
+ if q.operator != "" {
+ query["default_operator"] = strings.ToLower(q.operator)
+ }
+ if q.lowercaseExpandedTerms != nil {
+ query["lowercase_expanded_terms"] = *q.lowercaseExpandedTerms
+ }
+ if q.lenient != nil {
+ query["lenient"] = *q.lenient
+ }
+ if q.analyzeWildcard != nil {
+ query["analyze_wildcard"] = *q.analyzeWildcard
+ }
+ if q.locale != "" {
+ query["locale"] = q.locale
+ }
+ if q.queryName != "" {
+ query["_name"] = q.queryName
+ }
+ if q.minimumShouldMatch != "" {
+ query["minimum_should_match"] = q.minimumShouldMatch
+ }
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_simple_query_string_test.go b/vendor/github.com/olivere/elastic/search_queries_simple_query_string_test.go
new file mode 100644
index 000000000..ea4a341ec
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_simple_query_string_test.go
@@ -0,0 +1,87 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+)
+
+func TestSimpleQueryStringQuery(t *testing.T) {
+ q := NewSimpleQueryStringQuery(`"fried eggs" +(eggplant | potato) -frittata`)
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"simple_query_string":{"query":"\"fried eggs\" +(eggplant | potato) -frittata"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSimpleQueryStringQueryExec(t *testing.T) {
+ // client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(NewSimpleQueryStringQuery("+Golang +Elasticsearch")).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 1 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 1 {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits))
+ }
+
+ for _, hit := range searchResult.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_slice.go b/vendor/github.com/olivere/elastic/search_queries_slice.go
new file mode 100644
index 000000000..e1b1db928
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_slice.go
@@ -0,0 +1,53 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// SliceQuery allows to partition the documents into several slices.
+// It is used e.g. to slice scroll operations in Elasticsearch 5.0 or later.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-scroll.html#sliced-scroll
+// for details.
+type SliceQuery struct {
+ field string
+ id *int
+ max *int
+}
+
+// NewSliceQuery creates a new SliceQuery.
+func NewSliceQuery() *SliceQuery {
+ return &SliceQuery{}
+}
+
+// Field is the name of the field to slice against (_uid by default).
+func (s *SliceQuery) Field(field string) *SliceQuery {
+ s.field = field
+ return s
+}
+
+// Id is the id of the slice.
+func (s *SliceQuery) Id(id int) *SliceQuery {
+ s.id = &id
+ return s
+}
+
+// Max is the maximum number of slices.
+func (s *SliceQuery) Max(max int) *SliceQuery {
+ s.max = &max
+ return s
+}
+
+// Source returns the JSON body.
+func (s *SliceQuery) Source() (interface{}, error) {
+ m := make(map[string]interface{})
+ if s.field != "" {
+ m["field"] = s.field
+ }
+ if s.id != nil {
+ m["id"] = *s.id
+ }
+ if s.max != nil {
+ m["max"] = *s.max
+ }
+ return m, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_slice_test.go b/vendor/github.com/olivere/elastic/search_queries_slice_test.go
new file mode 100644
index 000000000..0589f4e29
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_slice_test.go
@@ -0,0 +1,27 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestSliceQuery(t *testing.T) {
+ q := NewSliceQuery().Field("date").Id(0).Max(2)
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"field":"date","id":0,"max":2}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_term.go b/vendor/github.com/olivere/elastic/search_queries_term.go
new file mode 100644
index 000000000..9a445e0ec
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_term.go
@@ -0,0 +1,58 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// TermQuery finds documents that contain the exact term specified
+// in the inverted index.
+//
+// For details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-term-query.html
+type TermQuery struct {
+ name string
+ value interface{}
+ boost *float64
+ queryName string
+}
+
+// NewTermQuery creates and initializes a new TermQuery.
+func NewTermQuery(name string, value interface{}) *TermQuery {
+ return &TermQuery{name: name, value: value}
+}
+
+// Boost sets the boost for this query.
+func (q *TermQuery) Boost(boost float64) *TermQuery {
+ q.boost = &boost
+ return q
+}
+
+// QueryName sets the query name for the filter that can be used
+// when searching for matched_filters per hit
+func (q *TermQuery) QueryName(queryName string) *TermQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Source returns JSON for the query.
+func (q *TermQuery) Source() (interface{}, error) {
+ // {"term":{"name":"value"}}
+ source := make(map[string]interface{})
+ tq := make(map[string]interface{})
+ source["term"] = tq
+
+ if q.boost == nil && q.queryName == "" {
+ tq[q.name] = q.value
+ } else {
+ subQ := make(map[string]interface{})
+ subQ["value"] = q.value
+ if q.boost != nil {
+ subQ["boost"] = *q.boost
+ }
+ if q.queryName != "" {
+ subQ["_name"] = q.queryName
+ }
+ tq[q.name] = subQ
+ }
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_term_test.go b/vendor/github.com/olivere/elastic/search_queries_term_test.go
new file mode 100644
index 000000000..f800fa954
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_term_test.go
@@ -0,0 +1,46 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestTermQuery(t *testing.T) {
+ q := NewTermQuery("user", "ki")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"term":{"user":"ki"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestTermQueryWithOptions(t *testing.T) {
+ q := NewTermQuery("user", "ki")
+ q = q.Boost(2.79)
+ q = q.QueryName("my_tq")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"term":{"user":{"_name":"my_tq","boost":2.79,"value":"ki"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_terms.go b/vendor/github.com/olivere/elastic/search_queries_terms.go
new file mode 100644
index 000000000..3649576dc
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_terms.go
@@ -0,0 +1,75 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// TermsQuery filters documents that have fields that match any
+// of the provided terms (not analyzed).
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-terms-query.html
+type TermsQuery struct {
+ name string
+ values []interface{}
+ termsLookup *TermsLookup
+ queryName string
+ boost *float64
+}
+
+// NewTermsQuery creates and initializes a new TermsQuery.
+func NewTermsQuery(name string, values ...interface{}) *TermsQuery {
+ q := &TermsQuery{
+ name: name,
+ values: make([]interface{}, 0),
+ }
+ if len(values) > 0 {
+ q.values = append(q.values, values...)
+ }
+ return q
+}
+
+// TermsLookup adds terms lookup details to the query.
+func (q *TermsQuery) TermsLookup(lookup *TermsLookup) *TermsQuery {
+ q.termsLookup = lookup
+ return q
+}
+
+// Boost sets the boost for this query.
+func (q *TermsQuery) Boost(boost float64) *TermsQuery {
+ q.boost = &boost
+ return q
+}
+
+// QueryName sets the query name for the filter that can be used
+// when searching for matched_filters per hit
+func (q *TermsQuery) QueryName(queryName string) *TermsQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Creates the query source for the term query.
+func (q *TermsQuery) Source() (interface{}, error) {
+ // {"terms":{"name":["value1","value2"]}}
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["terms"] = params
+
+ if q.termsLookup != nil {
+ src, err := q.termsLookup.Source()
+ if err != nil {
+ return nil, err
+ }
+ params[q.name] = src
+ } else {
+ params[q.name] = q.values
+ if q.boost != nil {
+ params["boost"] = *q.boost
+ }
+ if q.queryName != "" {
+ params["_name"] = q.queryName
+ }
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_terms_test.go b/vendor/github.com/olivere/elastic/search_queries_terms_test.go
new file mode 100644
index 000000000..72f472d17
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_terms_test.go
@@ -0,0 +1,82 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestTermsQuery(t *testing.T) {
+ q := NewTermsQuery("user", "ki")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"terms":{"user":["ki"]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestTermsQueryWithEmptyArray(t *testing.T) {
+ included := make([]interface{}, 0)
+ q := NewTermsQuery("tags", included...)
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"terms":{"tags":[]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestTermsQueryWithTermsLookup(t *testing.T) {
+ q := NewTermsQuery("user").
+ TermsLookup(NewTermsLookup().Index("users").Type("user").Id("2").Path("followers"))
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"terms":{"user":{"id":"2","index":"users","path":"followers","type":"user"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestTermQuerysWithOptions(t *testing.T) {
+ q := NewTermsQuery("user", "ki", "ko")
+ q = q.Boost(2.79)
+ q = q.QueryName("my_tq")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"terms":{"_name":"my_tq","boost":2.79,"user":["ki","ko"]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_type.go b/vendor/github.com/olivere/elastic/search_queries_type.go
new file mode 100644
index 000000000..e7aef30df
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_type.go
@@ -0,0 +1,26 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// TypeQuery filters documents matching the provided document / mapping type.
+//
+// For details, see:
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-type-query.html
+type TypeQuery struct {
+ typ string
+}
+
+func NewTypeQuery(typ string) *TypeQuery {
+ return &TypeQuery{typ: typ}
+}
+
+// Source returns JSON for the query.
+func (q *TypeQuery) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["type"] = params
+ params["value"] = q.typ
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_type_test.go b/vendor/github.com/olivere/elastic/search_queries_type_test.go
new file mode 100644
index 000000000..176b82abb
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_type_test.go
@@ -0,0 +1,27 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestTypeQuery(t *testing.T) {
+ q := NewTypeQuery("my_type")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"type":{"value":"my_type"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_wildcard.go b/vendor/github.com/olivere/elastic/search_queries_wildcard.go
new file mode 100644
index 000000000..ea8a0901c
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_wildcard.go
@@ -0,0 +1,81 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// WildcardQuery matches documents that have fields matching a wildcard
+// expression (not analyzed). Supported wildcards are *, which matches
+// any character sequence (including the empty one), and ?, which matches
+// any single character. Note this query can be slow, as it needs to iterate
+// over many terms. In order to prevent extremely slow wildcard queries,
+// a wildcard term should not start with one of the wildcards * or ?.
+// The wildcard query maps to Lucene WildcardQuery.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-wildcard-query.html
+type WildcardQuery struct {
+ name string
+ wildcard string
+ boost *float64
+ rewrite string
+ queryName string
+}
+
+// NewWildcardQuery creates and initializes a new WildcardQuery.
+func NewWildcardQuery(name, wildcard string) *WildcardQuery {
+ return &WildcardQuery{
+ name: name,
+ wildcard: wildcard,
+ }
+}
+
+// Boost sets the boost for this query.
+func (q *WildcardQuery) Boost(boost float64) *WildcardQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q *WildcardQuery) Rewrite(rewrite string) *WildcardQuery {
+ q.rewrite = rewrite
+ return q
+}
+
+// QueryName sets the name of this query.
+func (q *WildcardQuery) QueryName(queryName string) *WildcardQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Source returns the JSON serializable body of this query.
+func (q *WildcardQuery) Source() (interface{}, error) {
+ // {
+ // "wildcard" : {
+ // "user" : {
+ // "wildcard" : "ki*y",
+ // "boost" : 1.0
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ query := make(map[string]interface{})
+ source["wildcard"] = query
+
+ wq := make(map[string]interface{})
+ query[q.name] = wq
+
+ wq["wildcard"] = q.wildcard
+
+ if q.boost != nil {
+ wq["boost"] = *q.boost
+ }
+ if q.rewrite != "" {
+ wq["rewrite"] = q.rewrite
+ }
+ if q.queryName != "" {
+ wq["_name"] = q.queryName
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_queries_wildcard_test.go b/vendor/github.com/olivere/elastic/search_queries_wildcard_test.go
new file mode 100644
index 000000000..b41c8ab7b
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_queries_wildcard_test.go
@@ -0,0 +1,68 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic_test
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+
+ "github.com/olivere/elastic"
+)
+
+func ExampleWildcardQuery() {
+ // Get a client to the local Elasticsearch instance.
+ client, err := elastic.NewClient()
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+
+ // Define wildcard query
+ q := elastic.NewWildcardQuery("user", "oli*er?").Boost(1.2)
+ searchResult, err := client.Search().
+ Index("twitter"). // search in index "twitter"
+ Query(q). // use wildcard query defined above
+ Do(context.TODO()) // execute
+ if err != nil {
+ // Handle error
+ panic(err)
+ }
+ _ = searchResult
+}
+
+func TestWildcardQuery(t *testing.T) {
+ q := elastic.NewWildcardQuery("user", "ki*y??")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"wildcard":{"user":{"wildcard":"ki*y??"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestWildcardQueryWithBoost(t *testing.T) {
+ q := elastic.NewWildcardQuery("user", "ki*y??").Boost(1.2)
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"wildcard":{"user":{"boost":1.2,"wildcard":"ki*y??"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_request.go b/vendor/github.com/olivere/elastic/search_request.go
new file mode 100644
index 000000000..6f40ff028
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_request.go
@@ -0,0 +1,205 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "strings"
+
+// SearchRequest combines a search request and its
+// query details (see SearchSource).
+// It is used in combination with MultiSearch.
+type SearchRequest struct {
+ searchType string // default in ES is "query_then_fetch"
+ indices []string
+ types []string
+ routing *string
+ preference *string
+ requestCache *bool
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+ scroll string
+ source interface{}
+}
+
+// NewSearchRequest creates a new search request.
+func NewSearchRequest() *SearchRequest {
+ return &SearchRequest{}
+}
+
+// SearchRequest must be one of "query_then_fetch", "query_and_fetch",
+// "scan", "count", "dfs_query_then_fetch", or "dfs_query_and_fetch".
+// Use one of the constants defined via SearchType.
+func (r *SearchRequest) SearchType(searchType string) *SearchRequest {
+ r.searchType = searchType
+ return r
+}
+
+func (r *SearchRequest) SearchTypeDfsQueryThenFetch() *SearchRequest {
+ return r.SearchType("dfs_query_then_fetch")
+}
+
+func (r *SearchRequest) SearchTypeDfsQueryAndFetch() *SearchRequest {
+ return r.SearchType("dfs_query_and_fetch")
+}
+
+func (r *SearchRequest) SearchTypeQueryThenFetch() *SearchRequest {
+ return r.SearchType("query_then_fetch")
+}
+
+func (r *SearchRequest) SearchTypeQueryAndFetch() *SearchRequest {
+ return r.SearchType("query_and_fetch")
+}
+
+func (r *SearchRequest) SearchTypeScan() *SearchRequest {
+ return r.SearchType("scan")
+}
+
+func (r *SearchRequest) SearchTypeCount() *SearchRequest {
+ return r.SearchType("count")
+}
+
+func (r *SearchRequest) Index(indices ...string) *SearchRequest {
+ r.indices = append(r.indices, indices...)
+ return r
+}
+
+func (r *SearchRequest) HasIndices() bool {
+ return len(r.indices) > 0
+}
+
+func (r *SearchRequest) Type(types ...string) *SearchRequest {
+ r.types = append(r.types, types...)
+ return r
+}
+
+func (r *SearchRequest) Routing(routing string) *SearchRequest {
+ r.routing = &routing
+ return r
+}
+
+func (r *SearchRequest) Routings(routings ...string) *SearchRequest {
+ if routings != nil {
+ routings := strings.Join(routings, ",")
+ r.routing = &routings
+ } else {
+ r.routing = nil
+ }
+ return r
+}
+
+func (r *SearchRequest) Preference(preference string) *SearchRequest {
+ r.preference = &preference
+ return r
+}
+
+func (r *SearchRequest) RequestCache(requestCache bool) *SearchRequest {
+ r.requestCache = &requestCache
+ return r
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *SearchRequest) IgnoreUnavailable(ignoreUnavailable bool) *SearchRequest {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified).
+func (s *SearchRequest) AllowNoIndices(allowNoIndices bool) *SearchRequest {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *SearchRequest) ExpandWildcards(expandWildcards string) *SearchRequest {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+func (r *SearchRequest) Scroll(scroll string) *SearchRequest {
+ r.scroll = scroll
+ return r
+}
+
+func (r *SearchRequest) SearchSource(searchSource *SearchSource) *SearchRequest {
+ return r.Source(searchSource)
+}
+
+func (r *SearchRequest) Source(source interface{}) *SearchRequest {
+ switch v := source.(type) {
+ case *SearchSource:
+ src, err := v.Source()
+ if err != nil {
+ // Do not do anything in case of an error
+ return r
+ }
+ r.source = src
+ default:
+ r.source = source
+ }
+ return r
+}
+
+// header is used e.g. by MultiSearch to get information about the search header
+// of one SearchRequest.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-multi-search.html
+func (r *SearchRequest) header() interface{} {
+ h := make(map[string]interface{})
+ if r.searchType != "" {
+ h["search_type"] = r.searchType
+ }
+
+ switch len(r.indices) {
+ case 0:
+ case 1:
+ h["index"] = r.indices[0]
+ default:
+ h["indices"] = r.indices
+ }
+
+ switch len(r.types) {
+ case 0:
+ case 1:
+ h["type"] = r.types[0]
+ default:
+ h["types"] = r.types
+ }
+
+ if r.routing != nil && *r.routing != "" {
+ h["routing"] = *r.routing
+ }
+ if r.preference != nil && *r.preference != "" {
+ h["preference"] = *r.preference
+ }
+ if r.requestCache != nil {
+ h["request_cache"] = *r.requestCache
+ }
+ if r.ignoreUnavailable != nil {
+ h["ignore_unavailable"] = *r.ignoreUnavailable
+ }
+ if r.allowNoIndices != nil {
+ h["allow_no_indices"] = *r.allowNoIndices
+ }
+ if r.expandWildcards != "" {
+ h["expand_wildcards"] = r.expandWildcards
+ }
+ if r.scroll != "" {
+ h["scroll"] = r.scroll
+ }
+
+ return h
+}
+
+// Body allows to access the search body of the request, as generated by the DSL.
+// Notice that Body is read-only. You must not change the request body.
+//
+// Body is used e.g. by MultiSearch to get information about the search body
+// of one SearchRequest.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-multi-search.html
+func (r *SearchRequest) Body() interface{} {
+ return r.source
+}
diff --git a/vendor/github.com/olivere/elastic/search_request_test.go b/vendor/github.com/olivere/elastic/search_request_test.go
new file mode 100644
index 000000000..fa03af2c8
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_request_test.go
@@ -0,0 +1,61 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ _ "net/http"
+ "testing"
+)
+
+func TestSearchRequestIndex(t *testing.T) {
+ builder := NewSearchRequest().Index("test")
+ data, err := json.Marshal(builder.header())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"index":"test"}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchRequestIndices(t *testing.T) {
+ builder := NewSearchRequest().Index("test", "test2")
+ data, err := json.Marshal(builder.header())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"indices":["test","test2"]}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchRequestHasIndices(t *testing.T) {
+ builder := NewSearchRequest()
+ if builder.HasIndices() {
+ t.Errorf("expected HasIndices to return true; got %v", builder.HasIndices())
+ }
+ builder = builder.Index("test", "test2")
+ if !builder.HasIndices() {
+ t.Errorf("expected HasIndices to return false; got %v", builder.HasIndices())
+ }
+}
+
+func TestSearchRequestIgnoreUnavailable(t *testing.T) {
+ builder := NewSearchRequest().Index("test").IgnoreUnavailable(true)
+ data, err := json.Marshal(builder.header())
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"ignore_unavailable":true,"index":"test"}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_source.go b/vendor/github.com/olivere/elastic/search_source.go
new file mode 100644
index 000000000..77b1c5093
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_source.go
@@ -0,0 +1,546 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+)
+
+// SearchSource enables users to build the search source.
+// It resembles the SearchSourceBuilder in Elasticsearch.
+type SearchSource struct {
+ query Query
+ postQuery Query
+ sliceQuery Query
+ from int
+ size int
+ explain *bool
+ version *bool
+ sorters []Sorter
+ trackScores bool
+ searchAfterSortValues []interface{}
+ minScore *float64
+ timeout string
+ terminateAfter *int
+ storedFieldNames []string
+ docvalueFields []string
+ scriptFields []*ScriptField
+ fetchSourceContext *FetchSourceContext
+ aggregations map[string]Aggregation
+ highlight *Highlight
+ globalSuggestText string
+ suggesters []Suggester
+ rescores []*Rescore
+ defaultRescoreWindowSize *int
+ indexBoosts map[string]float64
+ stats []string
+ innerHits map[string]*InnerHit
+ collapse *CollapseBuilder
+ profile bool
+}
+
+// NewSearchSource initializes a new SearchSource.
+func NewSearchSource() *SearchSource {
+ return &SearchSource{
+ from: -1,
+ size: -1,
+ trackScores: false,
+ aggregations: make(map[string]Aggregation),
+ indexBoosts: make(map[string]float64),
+ innerHits: make(map[string]*InnerHit),
+ }
+}
+
+// Query sets the query to use with this search source.
+func (s *SearchSource) Query(query Query) *SearchSource {
+ s.query = query
+ return s
+}
+
+// Profile specifies that this search source should activate the
+// Profile API for queries made on it.
+func (s *SearchSource) Profile(profile bool) *SearchSource {
+ s.profile = profile
+ return s
+}
+
+// PostFilter will be executed after the query has been executed and
+// only affects the search hits, not the aggregations.
+// This filter is always executed as the last filtering mechanism.
+func (s *SearchSource) PostFilter(postFilter Query) *SearchSource {
+ s.postQuery = postFilter
+ return s
+}
+
+// Slice allows partitioning the documents in multiple slices.
+// It is e.g. used to slice a scroll operation, supported in
+// Elasticsearch 5.0 or later.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-scroll.html#sliced-scroll
+// for details.
+func (s *SearchSource) Slice(sliceQuery Query) *SearchSource {
+ s.sliceQuery = sliceQuery
+ return s
+}
+
+// From index to start the search from. Defaults to 0.
+func (s *SearchSource) From(from int) *SearchSource {
+ s.from = from
+ return s
+}
+
+// Size is the number of search hits to return. Defaults to 10.
+func (s *SearchSource) Size(size int) *SearchSource {
+ s.size = size
+ return s
+}
+
+// MinScore sets the minimum score below which docs will be filtered out.
+func (s *SearchSource) MinScore(minScore float64) *SearchSource {
+ s.minScore = &minScore
+ return s
+}
+
+// Explain indicates whether each search hit should be returned with
+// an explanation of the hit (ranking).
+func (s *SearchSource) Explain(explain bool) *SearchSource {
+ s.explain = &explain
+ return s
+}
+
+// Version indicates whether each search hit should be returned with
+// a version associated to it.
+func (s *SearchSource) Version(version bool) *SearchSource {
+ s.version = &version
+ return s
+}
+
+// Timeout controls how long a search is allowed to take, e.g. "1s" or "500ms".
+func (s *SearchSource) Timeout(timeout string) *SearchSource {
+ s.timeout = timeout
+ return s
+}
+
+// TimeoutInMillis controls how many milliseconds a search is allowed
+// to take before it is canceled.
+func (s *SearchSource) TimeoutInMillis(timeoutInMillis int) *SearchSource {
+ s.timeout = fmt.Sprintf("%dms", timeoutInMillis)
+ return s
+}
+
+// TerminateAfter allows the request to stop after the given number
+// of search hits are collected.
+func (s *SearchSource) TerminateAfter(terminateAfter int) *SearchSource {
+ s.terminateAfter = &terminateAfter
+ return s
+}
+
+// Sort adds a sort order.
+func (s *SearchSource) Sort(field string, ascending bool) *SearchSource {
+ s.sorters = append(s.sorters, SortInfo{Field: field, Ascending: ascending})
+ return s
+}
+
+// SortWithInfo adds a sort order.
+func (s *SearchSource) SortWithInfo(info SortInfo) *SearchSource {
+ s.sorters = append(s.sorters, info)
+ return s
+}
+
+// SortBy adds a sort order.
+func (s *SearchSource) SortBy(sorter ...Sorter) *SearchSource {
+ s.sorters = append(s.sorters, sorter...)
+ return s
+}
+
+func (s *SearchSource) hasSort() bool {
+ return len(s.sorters) > 0
+}
+
+// TrackScores is applied when sorting and controls if scores will be
+// tracked as well. Defaults to false.
+func (s *SearchSource) TrackScores(trackScores bool) *SearchSource {
+ s.trackScores = trackScores
+ return s
+}
+
+// SearchAfter allows a different form of pagination by using a live cursor,
+// using the results of the previous page to help the retrieval of the next.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-search-after.html
+func (s *SearchSource) SearchAfter(sortValues ...interface{}) *SearchSource {
+ s.searchAfterSortValues = append(s.searchAfterSortValues, sortValues...)
+ return s
+}
+
+// Aggregation adds an aggreation to perform as part of the search.
+func (s *SearchSource) Aggregation(name string, aggregation Aggregation) *SearchSource {
+ s.aggregations[name] = aggregation
+ return s
+}
+
+// DefaultRescoreWindowSize sets the rescore window size for rescores
+// that don't specify their window.
+func (s *SearchSource) DefaultRescoreWindowSize(defaultRescoreWindowSize int) *SearchSource {
+ s.defaultRescoreWindowSize = &defaultRescoreWindowSize
+ return s
+}
+
+// Highlight adds highlighting to the search.
+func (s *SearchSource) Highlight(highlight *Highlight) *SearchSource {
+ s.highlight = highlight
+ return s
+}
+
+// Highlighter returns the highlighter.
+func (s *SearchSource) Highlighter() *Highlight {
+ if s.highlight == nil {
+ s.highlight = NewHighlight()
+ }
+ return s.highlight
+}
+
+// GlobalSuggestText defines the global text to use with all suggesters.
+// This avoids repetition.
+func (s *SearchSource) GlobalSuggestText(text string) *SearchSource {
+ s.globalSuggestText = text
+ return s
+}
+
+// Suggester adds a suggester to the search.
+func (s *SearchSource) Suggester(suggester Suggester) *SearchSource {
+ s.suggesters = append(s.suggesters, suggester)
+ return s
+}
+
+// Rescorer adds a rescorer to the search.
+func (s *SearchSource) Rescorer(rescore *Rescore) *SearchSource {
+ s.rescores = append(s.rescores, rescore)
+ return s
+}
+
+// ClearRescorers removes all rescorers from the search.
+func (s *SearchSource) ClearRescorers() *SearchSource {
+ s.rescores = make([]*Rescore, 0)
+ return s
+}
+
+// FetchSource indicates whether the response should contain the stored
+// _source for every hit.
+func (s *SearchSource) FetchSource(fetchSource bool) *SearchSource {
+ if s.fetchSourceContext == nil {
+ s.fetchSourceContext = NewFetchSourceContext(fetchSource)
+ } else {
+ s.fetchSourceContext.SetFetchSource(fetchSource)
+ }
+ return s
+}
+
+// FetchSourceContext indicates how the _source should be fetched.
+func (s *SearchSource) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchSource {
+ s.fetchSourceContext = fetchSourceContext
+ return s
+}
+
+// NoStoredFields indicates that no fields should be loaded, resulting in only
+// id and type to be returned per field.
+func (s *SearchSource) NoStoredFields() *SearchSource {
+ s.storedFieldNames = nil
+ return s
+}
+
+// StoredField adds a single field to load and return (note, must be stored) as
+// part of the search request. If none are specified, the source of the
+// document will be returned.
+func (s *SearchSource) StoredField(storedFieldName string) *SearchSource {
+ s.storedFieldNames = append(s.storedFieldNames, storedFieldName)
+ return s
+}
+
+// StoredFields sets the fields to load and return as part of the search request.
+// If none are specified, the source of the document will be returned.
+func (s *SearchSource) StoredFields(storedFieldNames ...string) *SearchSource {
+ s.storedFieldNames = append(s.storedFieldNames, storedFieldNames...)
+ return s
+}
+
+// DocvalueField adds a single field to load from the field data cache
+// and return as part of the search request.
+func (s *SearchSource) DocvalueField(fieldDataField string) *SearchSource {
+ s.docvalueFields = append(s.docvalueFields, fieldDataField)
+ return s
+}
+
+// DocvalueFields adds one or more fields to load from the field data cache
+// and return as part of the search request.
+func (s *SearchSource) DocvalueFields(docvalueFields ...string) *SearchSource {
+ s.docvalueFields = append(s.docvalueFields, docvalueFields...)
+ return s
+}
+
+// ScriptField adds a single script field with the provided script.
+func (s *SearchSource) ScriptField(scriptField *ScriptField) *SearchSource {
+ s.scriptFields = append(s.scriptFields, scriptField)
+ return s
+}
+
+// ScriptFields adds one or more script fields with the provided scripts.
+func (s *SearchSource) ScriptFields(scriptFields ...*ScriptField) *SearchSource {
+ s.scriptFields = append(s.scriptFields, scriptFields...)
+ return s
+}
+
+// IndexBoost sets the boost that a specific index will receive when the
+// query is executed against it.
+func (s *SearchSource) IndexBoost(index string, boost float64) *SearchSource {
+ s.indexBoosts[index] = boost
+ return s
+}
+
+// Stats group this request will be aggregated under.
+func (s *SearchSource) Stats(statsGroup ...string) *SearchSource {
+ s.stats = append(s.stats, statsGroup...)
+ return s
+}
+
+// InnerHit adds an inner hit to return with the result.
+func (s *SearchSource) InnerHit(name string, innerHit *InnerHit) *SearchSource {
+ s.innerHits[name] = innerHit
+ return s
+}
+
+// Collapse adds field collapsing.
+func (s *SearchSource) Collapse(collapse *CollapseBuilder) *SearchSource {
+ s.collapse = collapse
+ return s
+}
+
+// Source returns the serializable JSON for the source builder.
+func (s *SearchSource) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+
+ if s.from != -1 {
+ source["from"] = s.from
+ }
+ if s.size != -1 {
+ source["size"] = s.size
+ }
+ if s.timeout != "" {
+ source["timeout"] = s.timeout
+ }
+ if s.terminateAfter != nil {
+ source["terminate_after"] = *s.terminateAfter
+ }
+ if s.query != nil {
+ src, err := s.query.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["query"] = src
+ }
+ if s.postQuery != nil {
+ src, err := s.postQuery.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["post_filter"] = src
+ }
+ if s.sliceQuery != nil {
+ src, err := s.sliceQuery.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["slice"] = src
+ }
+ if s.minScore != nil {
+ source["min_score"] = *s.minScore
+ }
+ if s.version != nil {
+ source["version"] = *s.version
+ }
+ if s.explain != nil {
+ source["explain"] = *s.explain
+ }
+ if s.profile {
+ source["profile"] = s.profile
+ }
+ if s.collapse != nil {
+ src, err := s.collapse.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["collapse"] = src
+ }
+ if s.fetchSourceContext != nil {
+ src, err := s.fetchSourceContext.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["_source"] = src
+ }
+
+ if s.storedFieldNames != nil {
+ switch len(s.storedFieldNames) {
+ case 1:
+ source["stored_fields"] = s.storedFieldNames[0]
+ default:
+ source["stored_fields"] = s.storedFieldNames
+ }
+ }
+
+ if len(s.docvalueFields) > 0 {
+ source["docvalue_fields"] = s.docvalueFields
+ }
+
+ if len(s.scriptFields) > 0 {
+ sfmap := make(map[string]interface{})
+ for _, scriptField := range s.scriptFields {
+ src, err := scriptField.Source()
+ if err != nil {
+ return nil, err
+ }
+ sfmap[scriptField.FieldName] = src
+ }
+ source["script_fields"] = sfmap
+ }
+
+ if len(s.sorters) > 0 {
+ var sortarr []interface{}
+ for _, sorter := range s.sorters {
+ src, err := sorter.Source()
+ if err != nil {
+ return nil, err
+ }
+ sortarr = append(sortarr, src)
+ }
+ source["sort"] = sortarr
+ }
+
+ if s.trackScores {
+ source["track_scores"] = s.trackScores
+ }
+
+ if len(s.searchAfterSortValues) > 0 {
+ source["search_after"] = s.searchAfterSortValues
+ }
+
+ if len(s.indexBoosts) > 0 {
+ source["indices_boost"] = s.indexBoosts
+ }
+
+ if len(s.aggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ for name, aggregate := range s.aggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ source["aggregations"] = aggsMap
+ }
+
+ if s.highlight != nil {
+ src, err := s.highlight.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["highlight"] = src
+ }
+
+ if len(s.suggesters) > 0 {
+ suggesters := make(map[string]interface{})
+ for _, s := range s.suggesters {
+ src, err := s.Source(false)
+ if err != nil {
+ return nil, err
+ }
+ suggesters[s.Name()] = src
+ }
+ if s.globalSuggestText != "" {
+ suggesters["text"] = s.globalSuggestText
+ }
+ source["suggest"] = suggesters
+ }
+
+ if len(s.rescores) > 0 {
+ // Strip empty rescores from request
+ var rescores []*Rescore
+ for _, r := range s.rescores {
+ if !r.IsEmpty() {
+ rescores = append(rescores, r)
+ }
+ }
+
+ if len(rescores) == 1 {
+ rescores[0].defaultRescoreWindowSize = s.defaultRescoreWindowSize
+ src, err := rescores[0].Source()
+ if err != nil {
+ return nil, err
+ }
+ source["rescore"] = src
+ } else {
+ var slice []interface{}
+ for _, r := range rescores {
+ r.defaultRescoreWindowSize = s.defaultRescoreWindowSize
+ src, err := r.Source()
+ if err != nil {
+ return nil, err
+ }
+ slice = append(slice, src)
+ }
+ source["rescore"] = slice
+ }
+ }
+
+ if len(s.stats) > 0 {
+ source["stats"] = s.stats
+ }
+
+ if len(s.innerHits) > 0 {
+ // Top-level inner hits
+ // See http://www.elastic.co/guide/en/elasticsearch/reference/1.5/search-request-inner-hits.html#top-level-inner-hits
+ // "inner_hits": {
+ // "<inner_hits_name>": {
+ // "<path|type>": {
+ // "<path-to-nested-object-field|child-or-parent-type>": {
+ // <inner_hits_body>,
+ // [,"inner_hits" : { [<sub_inner_hits>]+ } ]?
+ // }
+ // }
+ // },
+ // [,"<inner_hits_name_2>" : { ... } ]*
+ // }
+ m := make(map[string]interface{})
+ for name, hit := range s.innerHits {
+ if hit.path != "" {
+ src, err := hit.Source()
+ if err != nil {
+ return nil, err
+ }
+ path := make(map[string]interface{})
+ path[hit.path] = src
+ m[name] = map[string]interface{}{
+ "path": path,
+ }
+ } else if hit.typ != "" {
+ src, err := hit.Source()
+ if err != nil {
+ return nil, err
+ }
+ typ := make(map[string]interface{})
+ typ[hit.typ] = src
+ m[name] = map[string]interface{}{
+ "type": typ,
+ }
+ } else {
+ // TODO the Java client throws here, because either path or typ must be specified
+ _ = m
+ }
+ }
+ source["inner_hits"] = m
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_source_test.go b/vendor/github.com/olivere/elastic/search_source_test.go
new file mode 100644
index 000000000..a78991bf0
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_source_test.go
@@ -0,0 +1,295 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestSearchSourceMatchAllQuery(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ builder := NewSearchSource().Query(matchAllQ)
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"query":{"match_all":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceNoStoredFields(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ builder := NewSearchSource().Query(matchAllQ).NoStoredFields()
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"query":{"match_all":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceStoredFields(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ builder := NewSearchSource().Query(matchAllQ).StoredFields("message", "tags")
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"query":{"match_all":{}},"stored_fields":["message","tags"]}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceFetchSourceDisabled(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ builder := NewSearchSource().Query(matchAllQ).FetchSource(false)
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"_source":false,"query":{"match_all":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceFetchSourceByWildcards(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ fsc := NewFetchSourceContext(true).Include("obj1.*", "obj2.*").Exclude("*.description")
+ builder := NewSearchSource().Query(matchAllQ).FetchSourceContext(fsc)
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"_source":{"excludes":["*.description"],"includes":["obj1.*","obj2.*"]},"query":{"match_all":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceDocvalueFields(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ builder := NewSearchSource().Query(matchAllQ).DocvalueFields("test1", "test2")
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"docvalue_fields":["test1","test2"],"query":{"match_all":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceScriptFields(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ sf1 := NewScriptField("test1", NewScript("doc['my_field_name'].value * 2"))
+ sf2 := NewScriptField("test2", NewScript("doc['my_field_name'].value * factor").Param("factor", 3.1415927))
+ builder := NewSearchSource().Query(matchAllQ).ScriptFields(sf1, sf2)
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"query":{"match_all":{}},"script_fields":{"test1":{"script":{"source":"doc['my_field_name'].value * 2"}},"test2":{"script":{"params":{"factor":3.1415927},"source":"doc['my_field_name'].value * factor"}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourcePostFilter(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ pf := NewTermQuery("tag", "important")
+ builder := NewSearchSource().Query(matchAllQ).PostFilter(pf)
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"post_filter":{"term":{"tag":"important"}},"query":{"match_all":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceHighlight(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ hl := NewHighlight().Field("content")
+ builder := NewSearchSource().Query(matchAllQ).Highlight(hl)
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"highlight":{"fields":{"content":{}}},"query":{"match_all":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceRescoring(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ rescorerQuery := NewMatchPhraseQuery("field1", "the quick brown fox").Slop(2)
+ rescorer := NewQueryRescorer(rescorerQuery)
+ rescorer = rescorer.QueryWeight(0.7)
+ rescorer = rescorer.RescoreQueryWeight(1.2)
+ rescore := NewRescore().WindowSize(50).Rescorer(rescorer)
+ builder := NewSearchSource().Query(matchAllQ).Rescorer(rescore)
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"query":{"match_all":{}},"rescore":{"query":{"query_weight":0.7,"rescore_query":{"match_phrase":{"field1":{"query":"the quick brown fox","slop":2}}},"rescore_query_weight":1.2},"window_size":50}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceIndexBoost(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ builder := NewSearchSource().Query(matchAllQ).IndexBoost("index1", 1.4).IndexBoost("index2", 1.3)
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"indices_boost":{"index1":1.4,"index2":1.3},"query":{"match_all":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceMixDifferentSorters(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ builder := NewSearchSource().Query(matchAllQ).
+ Sort("a", false).
+ SortWithInfo(SortInfo{Field: "b", Ascending: true}).
+ SortBy(NewScriptSort(NewScript("doc['field_name'].value * factor").Param("factor", 1.1), "number"))
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"query":{"match_all":{}},"sort":[{"a":{"order":"desc"}},{"b":{"order":"asc"}},{"_script":{"order":"asc","script":{"params":{"factor":1.1},"source":"doc['field_name'].value * factor"},"type":"number"}}]}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceInnerHits(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ builder := NewSearchSource().Query(matchAllQ).
+ InnerHit("comments", NewInnerHit().Type("comment").Query(NewMatchQuery("user", "olivere"))).
+ InnerHit("views", NewInnerHit().Path("view"))
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"inner_hits":{"comments":{"type":{"comment":{"query":{"match":{"user":{"query":"olivere"}}}}}},"views":{"path":{"view":{}}}},"query":{"match_all":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceSearchAfter(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ builder := NewSearchSource().Query(matchAllQ).SearchAfter(1463538857, "tweet#654323")
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"query":{"match_all":{}},"search_after":[1463538857,"tweet#654323"]}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSearchSourceProfiledQuery(t *testing.T) {
+ matchAllQ := NewMatchAllQuery()
+ builder := NewSearchSource().Query(matchAllQ).Profile(true)
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"profile":true,"query":{"match_all":{}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_suggester_test.go b/vendor/github.com/olivere/elastic/search_suggester_test.go
new file mode 100644
index 000000000..33bdc9275
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_suggester_test.go
@@ -0,0 +1,355 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestTermSuggester(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ tsName := "my-suggestions"
+ ts := NewTermSuggester(tsName)
+ ts = ts.Text("Goolang")
+ ts = ts.Field("message")
+
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(NewMatchAllQuery()).
+ Suggester(ts).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Suggest == nil {
+ t.Errorf("expected SearchResult.Suggest != nil; got nil")
+ }
+ mySuggestions, found := searchResult.Suggest[tsName]
+ if !found {
+ t.Errorf("expected to find SearchResult.Suggest[%s]; got false", tsName)
+ }
+ if mySuggestions == nil {
+ t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", tsName)
+ }
+
+ if len(mySuggestions) != 1 {
+ t.Errorf("expected 1 suggestion; got %d", len(mySuggestions))
+ }
+ mySuggestion := mySuggestions[0]
+ if mySuggestion.Text != "goolang" {
+ t.Errorf("expected Text = 'goolang'; got %s", mySuggestion.Text)
+ }
+ if mySuggestion.Offset != 0 {
+ t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset)
+ }
+ if mySuggestion.Length != 7 {
+ t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length)
+ }
+ if len(mySuggestion.Options) != 1 {
+ t.Errorf("expected 1 option; got %d", len(mySuggestion.Options))
+ }
+ myOption := mySuggestion.Options[0]
+ if myOption.Text != "golang" {
+ t.Errorf("expected Text = 'golang'; got %s", myOption.Text)
+ }
+}
+
+func TestPhraseSuggester(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ phraseSuggesterName := "my-suggestions"
+ ps := NewPhraseSuggester(phraseSuggesterName)
+ ps = ps.Text("Goolang")
+ ps = ps.Field("message")
+
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(NewMatchAllQuery()).
+ Suggester(ps).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Suggest == nil {
+ t.Errorf("expected SearchResult.Suggest != nil; got nil")
+ }
+ mySuggestions, found := searchResult.Suggest[phraseSuggesterName]
+ if !found {
+ t.Errorf("expected to find SearchResult.Suggest[%s]; got false", phraseSuggesterName)
+ }
+ if mySuggestions == nil {
+ t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", phraseSuggesterName)
+ }
+
+ if len(mySuggestions) != 1 {
+ t.Errorf("expected 1 suggestion; got %d", len(mySuggestions))
+ }
+ mySuggestion := mySuggestions[0]
+ if mySuggestion.Text != "Goolang" {
+ t.Errorf("expected Text = 'Goolang'; got %s", mySuggestion.Text)
+ }
+ if mySuggestion.Offset != 0 {
+ t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset)
+ }
+ if mySuggestion.Length != 7 {
+ t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length)
+ }
+ if want, have := 1, len(mySuggestion.Options); want != have {
+ t.Errorf("expected len(options) = %d; got %d", want, have)
+ }
+ if want, have := "golang", mySuggestion.Options[0].Text; want != have {
+ t.Errorf("expected options[0].Text = %q; got %q", want, have)
+ }
+ if score := mySuggestion.Options[0].Score; score <= 0.0 {
+ t.Errorf("expected options[0].Score > 0.0; got %v", score)
+ }
+}
+
+func TestCompletionSuggester(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ tweet1 := tweet{
+ User: "olivere",
+ Message: "Welcome to Golang and Elasticsearch.",
+ Suggest: NewSuggestField("Golang", "Elasticsearch"),
+ }
+ tweet2 := tweet{
+ User: "olivere",
+ Message: "Another unrelated topic.",
+ Suggest: NewSuggestField("Another unrelated topic."),
+ }
+ tweet3 := tweet{
+ User: "sandrae",
+ Message: "Cycling is fun.",
+ Suggest: NewSuggestField("Cycling is fun."),
+ }
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ suggesterName := "my-suggestions"
+ cs := NewCompletionSuggester(suggesterName)
+ cs = cs.Text("Golang")
+ cs = cs.Field("suggest_field")
+
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(NewMatchAllQuery()).
+ Suggester(cs).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Suggest == nil {
+ t.Errorf("expected SearchResult.Suggest != nil; got nil")
+ }
+ mySuggestions, found := searchResult.Suggest[suggesterName]
+ if !found {
+ t.Errorf("expected to find SearchResult.Suggest[%s]; got false", suggesterName)
+ }
+ if mySuggestions == nil {
+ t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", suggesterName)
+ }
+
+ if len(mySuggestions) != 1 {
+ t.Errorf("expected 1 suggestion; got %d", len(mySuggestions))
+ }
+ mySuggestion := mySuggestions[0]
+ if mySuggestion.Text != "Golang" {
+ t.Errorf("expected Text = 'Golang'; got %s", mySuggestion.Text)
+ }
+ if mySuggestion.Offset != 0 {
+ t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset)
+ }
+ if mySuggestion.Length != 6 {
+ t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length)
+ }
+ if len(mySuggestion.Options) != 1 {
+ t.Errorf("expected 1 option; got %d", len(mySuggestion.Options))
+ }
+ myOption := mySuggestion.Options[0]
+ if myOption.Text != "Golang" {
+ t.Errorf("expected Text = 'Golang'; got %s", myOption.Text)
+ }
+}
+
+func TestContextSuggester(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ // TODO make a nice way of creating tweets, as currently the context fields are unsupported as part of the suggestion fields
+ tweet1 := `
+ {
+ "user":"olivere",
+ "message":"Welcome to Golang and Elasticsearch.",
+ "retweets":0,
+ "created":"0001-01-01T00:00:00Z",
+ "suggest_field":{
+ "input":[
+ "Golang",
+ "Elasticsearch"
+ ],
+ "contexts":{
+ "user_name": ["olivere"]
+ }
+ }
+ }
+ `
+ tweet2 := `
+ {
+ "user":"sandrae",
+ "message":"I like golfing",
+ "retweets":0,
+ "created":"0001-01-01T00:00:00Z",
+ "suggest_field":{
+ "input":[
+ "Golfing"
+ ],
+ "contexts":{
+ "user_name": ["sandrae"]
+ }
+ }
+ }
+ `
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyString(tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyString(tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ suggesterName := "my-suggestions"
+ cs := NewContextSuggester(suggesterName)
+ cs = cs.Prefix("Gol")
+ cs = cs.Field("suggest_field")
+ cs = cs.ContextQueries(
+ NewSuggesterCategoryQuery("user_name", "olivere"),
+ )
+
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Suggester(cs).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Suggest == nil {
+ t.Errorf("expected SearchResult.Suggest != nil; got nil")
+ }
+ mySuggestions, found := searchResult.Suggest[suggesterName]
+ if !found {
+ t.Errorf("expected to find SearchResult.Suggest[%s]; got false", suggesterName)
+ }
+ if mySuggestions == nil {
+ t.Errorf("expected SearchResult.Suggest[%s] != nil; got nil", suggesterName)
+ }
+
+ // sandra's tweet is not returned because of the user_name context
+ if len(mySuggestions) != 1 {
+ t.Errorf("expected 1 suggestion; got %d", len(mySuggestions))
+ }
+ mySuggestion := mySuggestions[0]
+ if mySuggestion.Text != "Gol" {
+ t.Errorf("expected Text = 'Gol'; got %s", mySuggestion.Text)
+ }
+ if mySuggestion.Offset != 0 {
+ t.Errorf("expected Offset = %d; got %d", 0, mySuggestion.Offset)
+ }
+ if mySuggestion.Length != 3 {
+ t.Errorf("expected Length = %d; got %d", 3, mySuggestion.Length)
+ }
+ if len(mySuggestion.Options) != 1 {
+ t.Errorf("expected 1 option; got %d", len(mySuggestion.Options))
+ }
+ myOption := mySuggestion.Options[0]
+ if myOption.Text != "Golang" {
+ t.Errorf("expected Text = 'Golang'; got %s", myOption.Text)
+ }
+ if myOption.Id != "1" {
+ t.Errorf("expected Id = '1'; got %s", myOption.Id)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_terms_lookup.go b/vendor/github.com/olivere/elastic/search_terms_lookup.go
new file mode 100644
index 000000000..9a2456bdd
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_terms_lookup.go
@@ -0,0 +1,74 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// TermsLookup encapsulates the parameters needed to fetch terms.
+//
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-terms-query.html#query-dsl-terms-lookup.
+type TermsLookup struct {
+ index string
+ typ string
+ id string
+ path string
+ routing string
+}
+
+// NewTermsLookup creates and initializes a new TermsLookup.
+func NewTermsLookup() *TermsLookup {
+ t := &TermsLookup{}
+ return t
+}
+
+// Index name.
+func (t *TermsLookup) Index(index string) *TermsLookup {
+ t.index = index
+ return t
+}
+
+// Type name.
+func (t *TermsLookup) Type(typ string) *TermsLookup {
+ t.typ = typ
+ return t
+}
+
+// Id to look up.
+func (t *TermsLookup) Id(id string) *TermsLookup {
+ t.id = id
+ return t
+}
+
+// Path to use for lookup.
+func (t *TermsLookup) Path(path string) *TermsLookup {
+ t.path = path
+ return t
+}
+
+// Routing value.
+func (t *TermsLookup) Routing(routing string) *TermsLookup {
+ t.routing = routing
+ return t
+}
+
+// Source creates the JSON source of the builder.
+func (t *TermsLookup) Source() (interface{}, error) {
+ src := make(map[string]interface{})
+ if t.index != "" {
+ src["index"] = t.index
+ }
+ if t.typ != "" {
+ src["type"] = t.typ
+ }
+ if t.id != "" {
+ src["id"] = t.id
+ }
+ if t.path != "" {
+ src["path"] = t.path
+ }
+ if t.routing != "" {
+ src["routing"] = t.routing
+ }
+ return src, nil
+}
diff --git a/vendor/github.com/olivere/elastic/search_terms_lookup_test.go b/vendor/github.com/olivere/elastic/search_terms_lookup_test.go
new file mode 100644
index 000000000..369f72346
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_terms_lookup_test.go
@@ -0,0 +1,27 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestTermsLookup(t *testing.T) {
+ tl := NewTermsLookup().Index("users").Type("user").Id("2").Path("followers")
+ src, err := tl.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"id":"2","index":"users","path":"followers","type":"user"}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/search_test.go b/vendor/github.com/olivere/elastic/search_test.go
new file mode 100644
index 000000000..097c26525
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/search_test.go
@@ -0,0 +1,1265 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestSearchMatchAll(t *testing.T) {
+ //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ // Match all should return all documents
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(NewMatchAllQuery()).
+ Size(100).
+ Pretty(true).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if got, want := searchResult.Hits.TotalHits, int64(3); got != want {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", want, got)
+ }
+ if got, want := len(searchResult.Hits.Hits), 3; got != want {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, got)
+ }
+
+ for _, hit := range searchResult.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestSearchMatchAllWithRequestCacheDisabled(t *testing.T) {
+ //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ // Match all should return all documents, with request cache disabled
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(NewMatchAllQuery()).
+ Size(100).
+ Pretty(true).
+ RequestCache(false).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if got, want := searchResult.Hits.TotalHits, int64(3); got != want {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", want, got)
+ }
+ if got, want := len(searchResult.Hits.Hits), 3; got != want {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, got)
+ }
+}
+
+func BenchmarkSearchMatchAll(b *testing.B) {
+ client := setupTestClientAndCreateIndexAndAddDocs(b)
+
+ for n := 0; n < b.N; n++ {
+ // Match all should return all documents
+ all := NewMatchAllQuery()
+ searchResult, err := client.Search().Index(testIndexName).Query(all).Do(context.TODO())
+ if err != nil {
+ b.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ b.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits == 0 {
+ b.Errorf("expected SearchResult.Hits.TotalHits > %d; got %d", 0, searchResult.Hits.TotalHits)
+ }
+ }
+}
+
+func TestSearchResultTotalHits(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ count, err := client.Count(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ all := NewMatchAllQuery()
+ searchResult, err := client.Search().Index(testIndexName).Query(all).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ got := searchResult.TotalHits()
+ if got != count {
+ t.Fatalf("expected %d hits; got: %d", count, got)
+ }
+
+ // No hits
+ searchResult = &SearchResult{}
+ got = searchResult.TotalHits()
+ if got != 0 {
+ t.Errorf("expected %d hits; got: %d", 0, got)
+ }
+}
+
+func TestSearchResultWithProfiling(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ all := NewMatchAllQuery()
+ searchResult, err := client.Search().Index(testIndexName).Query(all).Profile(true).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if searchResult.Profile == nil {
+ t.Fatal("Profiled MatchAll query did not return profiling data with results")
+ }
+}
+
+func TestSearchResultEach(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ all := NewMatchAllQuery()
+ searchResult, err := client.Search().Index(testIndexName).Query(all).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Iterate over non-ptr type
+ var aTweet tweet
+ count := 0
+ for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) {
+ count++
+ _, ok := item.(tweet)
+ if !ok {
+ t.Fatalf("expected hit to be serialized as tweet; got: %v", reflect.ValueOf(item))
+ }
+ }
+ if count == 0 {
+ t.Errorf("expected to find some hits; got: %d", count)
+ }
+
+ // Iterate over ptr-type
+ count = 0
+ var aTweetPtr *tweet
+ for _, item := range searchResult.Each(reflect.TypeOf(aTweetPtr)) {
+ count++
+ tw, ok := item.(*tweet)
+ if !ok {
+ t.Fatalf("expected hit to be serialized as tweet; got: %v", reflect.ValueOf(item))
+ }
+ if tw == nil {
+ t.Fatal("expected hit to not be nil")
+ }
+ }
+ if count == 0 {
+ t.Errorf("expected to find some hits; got: %d", count)
+ }
+
+ // Does not iterate when no hits are found
+ searchResult = &SearchResult{Hits: nil}
+ count = 0
+ for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) {
+ count++
+ _ = item
+ }
+ if count != 0 {
+ t.Errorf("expected to not find any hits; got: %d", count)
+ }
+ searchResult = &SearchResult{Hits: &SearchHits{Hits: make([]*SearchHit, 0)}}
+ count = 0
+ for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) {
+ count++
+ _ = item
+ }
+ if count != 0 {
+ t.Errorf("expected to not find any hits; got: %d", count)
+ }
+}
+
+func TestSearchResultEachNoSource(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocsNoSource(t)
+
+ all := NewMatchAllQuery()
+ searchResult, err := client.Search().Index(testNoSourceIndexName).Query(all).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Iterate over non-ptr type
+ var aTweet tweet
+ count := 0
+ for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) {
+ count++
+ tw, ok := item.(tweet)
+ if !ok {
+ t.Fatalf("expected hit to be serialized as tweet; got: %v", reflect.ValueOf(item))
+ }
+
+ if tw.User != "" {
+ t.Fatalf("expected no _source hit to be empty tweet; got: %v", reflect.ValueOf(item))
+ }
+ }
+ if count != 2 {
+ t.Errorf("expected to find 2 hits; got: %d", count)
+ }
+
+ // Iterate over ptr-type
+ count = 0
+ var aTweetPtr *tweet
+ for _, item := range searchResult.Each(reflect.TypeOf(aTweetPtr)) {
+ count++
+ tw, ok := item.(*tweet)
+ if !ok {
+ t.Fatalf("expected hit to be serialized as tweet; got: %v", reflect.ValueOf(item))
+ }
+ if tw != nil {
+ t.Fatal("expected hit to be nil")
+ }
+ }
+ if count != 2 {
+ t.Errorf("expected to find 2 hits; got: %d", count)
+ }
+}
+
+func TestSearchSorting(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{
+ User: "olivere", Retweets: 108,
+ Message: "Welcome to Golang and Elasticsearch.",
+ Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
+ }
+ tweet2 := tweet{
+ User: "olivere", Retweets: 0,
+ Message: "Another unrelated topic.",
+ Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
+ }
+ tweet3 := tweet{
+ User: "sandrae", Retweets: 12,
+ Message: "Cycling is fun.",
+ Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
+ }
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ all := NewMatchAllQuery()
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(all).
+ Sort("created", false).
+ Timeout("1s").
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 3 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 3 {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits))
+ }
+
+ for _, hit := range searchResult.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestSearchSortingBySorters(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{
+ User: "olivere", Retweets: 108,
+ Message: "Welcome to Golang and Elasticsearch.",
+ Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
+ }
+ tweet2 := tweet{
+ User: "olivere", Retweets: 0,
+ Message: "Another unrelated topic.",
+ Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
+ }
+ tweet3 := tweet{
+ User: "sandrae", Retweets: 12,
+ Message: "Cycling is fun.",
+ Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
+ }
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ all := NewMatchAllQuery()
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(all).
+ SortBy(NewFieldSort("created").Desc(), NewScoreSort()).
+ Timeout("1s").
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 3 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 3 {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits))
+ }
+
+ for _, hit := range searchResult.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestSearchSpecificFields(t *testing.T) {
+ // client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ all := NewMatchAllQuery()
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(all).
+ StoredFields("message").
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 3 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 3 {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits))
+ }
+
+ for _, hit := range searchResult.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ if hit.Source != nil {
+ t.Fatalf("expected SearchResult.Hits.Hit.Source to be nil; got: %q", hit.Source)
+ }
+ if hit.Fields == nil {
+ t.Fatal("expected SearchResult.Hits.Hit.Fields to be != nil")
+ }
+ field, found := hit.Fields["message"]
+ if !found {
+ t.Errorf("expected SearchResult.Hits.Hit.Fields[%s] to be found", "message")
+ }
+ fields, ok := field.([]interface{})
+ if !ok {
+ t.Errorf("expected []interface{}; got: %v", reflect.TypeOf(fields))
+ }
+ if len(fields) != 1 {
+ t.Errorf("expected a field with 1 entry; got: %d", len(fields))
+ }
+ message, ok := fields[0].(string)
+ if !ok {
+ t.Errorf("expected a string; got: %v", reflect.TypeOf(fields[0]))
+ }
+ if message == "" {
+ t.Errorf("expected a message; got: %q", message)
+ }
+ }
+}
+
+func TestSearchExplain(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+ // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ tweet1 := tweet{
+ User: "olivere", Retweets: 108,
+ Message: "Welcome to Golang and Elasticsearch.",
+ Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
+ }
+ tweet2 := tweet{
+ User: "olivere", Retweets: 0,
+ Message: "Another unrelated topic.",
+ Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
+ }
+ tweet3 := tweet{
+ User: "sandrae", Retweets: 12,
+ Message: "Cycling is fun.",
+ Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
+ }
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Match all should return all documents
+ all := NewMatchAllQuery()
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(all).
+ Explain(true).
+ Timeout("1s").
+ // Pretty(true).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 3 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 3 {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits))
+ }
+
+ for _, hit := range searchResult.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ if hit.Explanation == nil {
+ t.Fatal("expected search explanation")
+ }
+ if hit.Explanation.Value <= 0.0 {
+ t.Errorf("expected explanation value to be > 0.0; got: %v", hit.Explanation.Value)
+ }
+ if hit.Explanation.Description == "" {
+ t.Errorf("expected explanation description != %q; got: %q", "", hit.Explanation.Description)
+ }
+ }
+}
+
+func TestSearchSource(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{
+ User: "olivere", Retweets: 108,
+ Message: "Welcome to Golang and Elasticsearch.",
+ Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
+ }
+ tweet2 := tweet{
+ User: "olivere", Retweets: 0,
+ Message: "Another unrelated topic.",
+ Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
+ }
+ tweet3 := tweet{
+ User: "sandrae", Retweets: 12,
+ Message: "Cycling is fun.",
+ Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
+ }
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Set up the request JSON manually to pass to the search service via Source()
+ source := map[string]interface{}{
+ "query": map[string]interface{}{
+ "match_all": map[string]interface{}{},
+ },
+ }
+
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Source(source). // sets the JSON request
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 3 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
+ }
+}
+
+func TestSearchRawString(t *testing.T) {
+ // client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{
+ User: "olivere", Retweets: 108,
+ Message: "Welcome to Golang and Elasticsearch.",
+ Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
+ }
+ tweet2 := tweet{
+ User: "olivere", Retweets: 0,
+ Message: "Another unrelated topic.",
+ Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
+ }
+ tweet3 := tweet{
+ User: "sandrae", Retweets: 12,
+ Message: "Cycling is fun.",
+ Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
+ }
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ query := RawStringQuery(`{"match_all":{}}`)
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(query).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 3 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
+ }
+}
+
+func TestSearchSearchSource(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{
+ User: "olivere", Retweets: 108,
+ Message: "Welcome to Golang and Elasticsearch.",
+ Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
+ }
+ tweet2 := tweet{
+ User: "olivere", Retweets: 0,
+ Message: "Another unrelated topic.",
+ Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
+ }
+ tweet3 := tweet{
+ User: "sandrae", Retweets: 12,
+ Message: "Cycling is fun.",
+ Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
+ }
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Set up the search source manually and pass it to the search service via SearchSource()
+ ss := NewSearchSource().Query(NewMatchAllQuery()).From(0).Size(2)
+
+ // One can use ss.Source() to get to the raw interface{} that will be used
+ // as the search request JSON by the SearchService.
+
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ SearchSource(ss). // sets the SearchSource
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 3 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 2 {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 2, len(searchResult.Hits.Hits))
+ }
+}
+
+func TestSearchInnerHitsOnHasChild(t *testing.T) {
+ // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+ client := setupTestClientAndCreateIndex(t)
+
+ ctx := context.Background()
+
+ // Create join index
+ createIndex, err := client.CreateIndex(testJoinIndex).Body(testJoinMapping).Do(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if createIndex == nil {
+ t.Errorf("expected result to be != nil; got: %v", createIndex)
+ }
+
+ // Add documents
+ // See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/parent-join.html for example code.
+ doc1 := joinDoc{
+ Message: "This is a question",
+ JoinField: &joinField{Name: "question"},
+ }
+ _, err = client.Index().Index(testJoinIndex).Type("doc").Id("1").BodyJson(&doc1).Refresh("true").Do(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ doc2 := joinDoc{
+ Message: "This is another question",
+ JoinField: "question",
+ }
+ _, err = client.Index().Index(testJoinIndex).Type("doc").Id("2").BodyJson(&doc2).Refresh("true").Do(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ doc3 := joinDoc{
+ Message: "This is an answer",
+ JoinField: &joinField{
+ Name: "answer",
+ Parent: "1",
+ },
+ }
+ _, err = client.Index().Index(testJoinIndex).Type("doc").Id("3").BodyJson(&doc3).Routing("1").Refresh("true").Do(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ doc4 := joinDoc{
+ Message: "This is another answer",
+ JoinField: &joinField{
+ Name: "answer",
+ Parent: "1",
+ },
+ }
+ _, err = client.Index().Index(testJoinIndex).Type("doc").Id("4").BodyJson(&doc4).Routing("1").Refresh("true").Do(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testJoinIndex).Do(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Search for all documents that have an answer, and return those answers as inner hits
+ bq := NewBoolQuery()
+ bq = bq.Must(NewMatchAllQuery())
+ bq = bq.Filter(NewHasChildQuery("answer", NewMatchAllQuery()).
+ InnerHit(NewInnerHit().Name("answers")))
+
+ searchResult, err := client.Search().
+ Index(testJoinIndex).
+ Query(bq).
+ Pretty(true).
+ Do(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 1 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 2, searchResult.Hits.TotalHits)
+ }
+ if len(searchResult.Hits.Hits) != 1 {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 2, len(searchResult.Hits.Hits))
+ }
+
+ hit := searchResult.Hits.Hits[0]
+ if want, have := "1", hit.Id; want != have {
+ t.Fatalf("expected tweet %q; got: %q", want, have)
+ }
+ if hit.InnerHits == nil {
+ t.Fatalf("expected inner hits; got: %v", hit.InnerHits)
+ }
+ if want, have := 1, len(hit.InnerHits); want != have {
+ t.Fatalf("expected %d inner hits; got: %d", want, have)
+ }
+ innerHits, found := hit.InnerHits["answers"]
+ if !found {
+ t.Fatalf("expected inner hits for name %q", "answers")
+ }
+ if innerHits == nil || innerHits.Hits == nil {
+ t.Fatal("expected inner hits != nil")
+ }
+ if want, have := 2, len(innerHits.Hits.Hits); want != have {
+ t.Fatalf("expected %d inner hits; got: %d", want, have)
+ }
+ if want, have := "3", innerHits.Hits.Hits[0].Id; want != have {
+ t.Fatalf("expected inner hit with id %q; got: %q", want, have)
+ }
+ if want, have := "4", innerHits.Hits.Hits[1].Id; want != have {
+ t.Fatalf("expected inner hit with id %q; got: %q", want, have)
+ }
+}
+
+func TestSearchInnerHitsOnHasParent(t *testing.T) {
+ // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+ client := setupTestClientAndCreateIndex(t)
+
+ ctx := context.Background()
+
+ // Create join index
+ createIndex, err := client.CreateIndex(testJoinIndex).Body(testJoinMapping).Do(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if createIndex == nil {
+ t.Errorf("expected result to be != nil; got: %v", createIndex)
+ }
+
+ // Add documents
+ // See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/parent-join.html for example code.
+ doc1 := joinDoc{
+ Message: "This is a question",
+ JoinField: &joinField{Name: "question"},
+ }
+ _, err = client.Index().Index(testJoinIndex).Type("doc").Id("1").BodyJson(&doc1).Refresh("true").Do(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ doc2 := joinDoc{
+ Message: "This is another question",
+ JoinField: "question",
+ }
+ _, err = client.Index().Index(testJoinIndex).Type("doc").Id("2").BodyJson(&doc2).Refresh("true").Do(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ doc3 := joinDoc{
+ Message: "This is an answer",
+ JoinField: &joinField{
+ Name: "answer",
+ Parent: "1",
+ },
+ }
+ _, err = client.Index().Index(testJoinIndex).Type("doc").Id("3").BodyJson(&doc3).Routing("1").Refresh("true").Do(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ doc4 := joinDoc{
+ Message: "This is another answer",
+ JoinField: &joinField{
+ Name: "answer",
+ Parent: "1",
+ },
+ }
+ _, err = client.Index().Index(testJoinIndex).Type("doc").Id("4").BodyJson(&doc4).Routing("1").Refresh("true").Do(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testJoinIndex).Do(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Search for all documents that have an answer, and return those answers as inner hits
+ bq := NewBoolQuery()
+ bq = bq.Must(NewMatchAllQuery())
+ bq = bq.Filter(NewHasParentQuery("question", NewMatchAllQuery()).
+ InnerHit(NewInnerHit().Name("answers")))
+
+ searchResult, err := client.Search().
+ Index(testJoinIndex).
+ Query(bq).
+ Pretty(true).
+ Do(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if want, have := int64(2), searchResult.Hits.TotalHits; want != have {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", want, have)
+ }
+ if want, have := 2, len(searchResult.Hits.Hits); want != have {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, have)
+ }
+
+ hit := searchResult.Hits.Hits[0]
+ if want, have := "3", hit.Id; want != have {
+ t.Fatalf("expected tweet %q; got: %q", want, have)
+ }
+ if hit.InnerHits == nil {
+ t.Fatalf("expected inner hits; got: %v", hit.InnerHits)
+ }
+ if want, have := 1, len(hit.InnerHits); want != have {
+ t.Fatalf("expected %d inner hits; got: %d", want, have)
+ }
+ innerHits, found := hit.InnerHits["answers"]
+ if !found {
+ t.Fatalf("expected inner hits for name %q", "tweets")
+ }
+ if innerHits == nil || innerHits.Hits == nil {
+ t.Fatal("expected inner hits != nil")
+ }
+ if want, have := 1, len(innerHits.Hits.Hits); want != have {
+ t.Fatalf("expected %d inner hits; got: %d", want, have)
+ }
+ if want, have := "1", innerHits.Hits.Hits[0].Id; want != have {
+ t.Fatalf("expected inner hit with id %q; got: %q", want, have)
+ }
+
+ hit = searchResult.Hits.Hits[1]
+ if want, have := "4", hit.Id; want != have {
+ t.Fatalf("expected tweet %q; got: %q", want, have)
+ }
+ if hit.InnerHits == nil {
+ t.Fatalf("expected inner hits; got: %v", hit.InnerHits)
+ }
+ if want, have := 1, len(hit.InnerHits); want != have {
+ t.Fatalf("expected %d inner hits; got: %d", want, have)
+ }
+ innerHits, found = hit.InnerHits["answers"]
+ if !found {
+ t.Fatalf("expected inner hits for name %q", "tweets")
+ }
+ if innerHits == nil || innerHits.Hits == nil {
+ t.Fatal("expected inner hits != nil")
+ }
+ if want, have := 1, len(innerHits.Hits.Hits); want != have {
+ t.Fatalf("expected %d inner hits; got: %d", want, have)
+ }
+ if want, have := "1", innerHits.Hits.Hits[0].Id; want != have {
+ t.Fatalf("expected inner hit with id %q; got: %q", want, have)
+ }
+}
+
+func TestSearchBuildURL(t *testing.T) {
+ client := setupTestClient(t)
+
+ tests := []struct {
+ Indices []string
+ Types []string
+ Expected string
+ }{
+ {
+ []string{},
+ []string{},
+ "/_search",
+ },
+ {
+ []string{"index1"},
+ []string{},
+ "/index1/_search",
+ },
+ {
+ []string{"index1", "index2"},
+ []string{},
+ "/index1%2Cindex2/_search",
+ },
+ {
+ []string{},
+ []string{"type1"},
+ "/_all/type1/_search",
+ },
+ {
+ []string{"index1"},
+ []string{"type1"},
+ "/index1/type1/_search",
+ },
+ {
+ []string{"index1", "index2"},
+ []string{"type1", "type2"},
+ "/index1%2Cindex2/type1%2Ctype2/_search",
+ },
+ {
+ []string{},
+ []string{"type1", "type2"},
+ "/_all/type1%2Ctype2/_search",
+ },
+ }
+
+ for i, test := range tests {
+ path, _, err := client.Search().Index(test.Indices...).Type(test.Types...).buildURL()
+ if err != nil {
+ t.Errorf("case #%d: %v", i+1, err)
+ continue
+ }
+ if path != test.Expected {
+ t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
+ }
+ }
+}
+
+func TestSearchFilterPath(t *testing.T) {
+ // client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+
+ // Match all should return all documents
+ all := NewMatchAllQuery()
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Type("doc").
+ Query(all).
+ FilterPath(
+ "took",
+ "hits.hits._id",
+ "hits.hits._source.user",
+ "hits.hits._source.message",
+ ).
+ Timeout("1s").
+ Pretty(true).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Fatalf("expected SearchResult.Hits != nil; got nil")
+ }
+ // 0 because it was filtered out
+ if want, got := int64(0), searchResult.Hits.TotalHits; want != got {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", want, got)
+ }
+ if want, got := 3, len(searchResult.Hits.Hits); want != got {
+ t.Fatalf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, got)
+ }
+
+ for _, hit := range searchResult.Hits.Hits {
+ if want, got := "", hit.Index; want != got {
+ t.Fatalf("expected index %q, got %q", want, got)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // user field
+ v, found := item["user"]
+ if !found {
+ t.Fatalf("expected SearchResult.Hits.Hit[%q] to be found", "user")
+ }
+ if v == "" {
+ t.Fatalf("expected user field, got %v (%T)", v, v)
+ }
+ // No retweets field
+ v, found = item["retweets"]
+ if found {
+ t.Fatalf("expected SearchResult.Hits.Hit[%q] to not be found, got %v", "retweets", v)
+ }
+ if v == "" {
+ t.Fatalf("expected user field, got %v (%T)", v, v)
+ }
+ }
+}
+
+func TestSearchAfter(t *testing.T) {
+ // client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{
+ User: "olivere", Retweets: 108,
+ Message: "Welcome to Golang and Elasticsearch.",
+ Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
+ }
+ tweet2 := tweet{
+ User: "olivere", Retweets: 0,
+ Message: "Another unrelated topic.",
+ Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
+ }
+ tweet3 := tweet{
+ User: "sandrae", Retweets: 12,
+ Message: "Cycling is fun.",
+ Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
+ }
+
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Query(NewMatchAllQuery()).
+ SearchAfter("olivere").
+ Sort("user", true).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if searchResult.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ }
+ if searchResult.Hits.TotalHits != 3 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
+ }
+ if want, got := 1, len(searchResult.Hits.Hits); want != got {
+ t.Fatalf("expected len(SearchResult.Hits.Hits) = %d; got: %d", want, got)
+ }
+ hit := searchResult.Hits.Hits[0]
+ if want, got := "3", hit.Id; want != got {
+ t.Fatalf("expected tweet %q; got: %q", want, got)
+ }
+}
+
+func TestSearchResultWithFieldCollapsing(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Type("doc").
+ Query(NewMatchAllQuery()).
+ Collapse(NewCollapseBuilder("user")).
+ Pretty(true).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if searchResult.Hits == nil {
+ t.Fatalf("expected SearchResult.Hits != nil; got nil")
+ }
+ if got := searchResult.Hits.TotalHits; got == 0 {
+ t.Fatalf("expected SearchResult.Hits.TotalHits > 0; got %d", got)
+ }
+
+ for _, hit := range searchResult.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(hit.Fields) == 0 {
+ t.Fatal("expected fields in SearchResult")
+ }
+ usersVal, ok := hit.Fields["user"]
+ if !ok {
+ t.Fatalf("expected %q field in fields of SearchResult", "user")
+ }
+ users, ok := usersVal.([]interface{})
+ if !ok {
+ t.Fatalf("expected slice of strings in field of SearchResult, got %T", usersVal)
+ }
+ if len(users) != 1 {
+ t.Fatalf("expected 1 entry in users slice, got %d", len(users))
+ }
+ }
+}
+
+func TestSearchResultWithFieldCollapsingAndInnerHits(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ searchResult, err := client.Search().
+ Index(testIndexName).
+ Type("doc").
+ Query(NewMatchAllQuery()).
+ Collapse(
+ NewCollapseBuilder("user").
+ InnerHit(
+ NewInnerHit().Name("last_tweets").Size(5).Sort("created", true),
+ ).
+ MaxConcurrentGroupRequests(4)).
+ Pretty(true).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if searchResult.Hits == nil {
+ t.Fatalf("expected SearchResult.Hits != nil; got nil")
+ }
+ if got := searchResult.Hits.TotalHits; got == 0 {
+ t.Fatalf("expected SearchResult.Hits.TotalHits > 0; got %d", got)
+ }
+
+ for _, hit := range searchResult.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ }
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(hit.Fields) == 0 {
+ t.Fatal("expected fields in SearchResult")
+ }
+ usersVal, ok := hit.Fields["user"]
+ if !ok {
+ t.Fatalf("expected %q field in fields of SearchResult", "user")
+ }
+ users, ok := usersVal.([]interface{})
+ if !ok {
+ t.Fatalf("expected slice of strings in field of SearchResult, got %T", usersVal)
+ }
+ if len(users) != 1 {
+ t.Fatalf("expected 1 entry in users slice, got %d", len(users))
+ }
+ lastTweets, ok := hit.InnerHits["last_tweets"]
+ if !ok {
+ t.Fatalf("expected inner_hits named %q in SearchResult", "last_tweets")
+ }
+ if lastTweets == nil {
+ t.Fatal("expected inner_hits in SearchResult")
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/setup_test.go b/vendor/github.com/olivere/elastic/setup_test.go
new file mode 100644
index 000000000..480ae5d20
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/setup_test.go
@@ -0,0 +1,445 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "math/rand"
+ "os"
+ "time"
+)
+
+const (
+ testIndexName = "elastic-test"
+ testIndexName2 = "elastic-test2"
+ testIndexName3 = "elastic-test3"
+ testMapping = `
+{
+ "settings":{
+ "number_of_shards":1,
+ "number_of_replicas":0
+ },
+ "mappings":{
+ "doc":{
+ "properties":{
+ "user":{
+ "type":"keyword"
+ },
+ "message":{
+ "type":"text",
+ "store": true,
+ "fielddata": true
+ },
+ "tags":{
+ "type":"keyword"
+ },
+ "location":{
+ "type":"geo_point"
+ },
+ "suggest_field":{
+ "type":"completion",
+ "contexts":[
+ {
+ "name":"user_name",
+ "type":"category"
+ }
+ ]
+ }
+ }
+ }
+ }
+}
+`
+
+ testNoSourceIndexName = "elastic-nosource-test"
+ testNoSourceMapping = `
+{
+ "settings":{
+ "number_of_shards":1,
+ "number_of_replicas":0
+ },
+ "mappings":{
+ "doc":{
+ "_source": {
+ "enabled": false
+ },
+ "properties":{
+ "user":{
+ "type":"keyword"
+ },
+ "message":{
+ "type":"text",
+ "store": true,
+ "fielddata": true
+ },
+ "tags":{
+ "type":"keyword"
+ },
+ "location":{
+ "type":"geo_point"
+ },
+ "suggest_field":{
+ "type":"completion",
+ "contexts":[
+ {
+ "name":"user_name",
+ "type":"category"
+ }
+ ]
+ }
+ }
+ }
+ }
+}
+`
+
+ testJoinIndex = "elastic-joins"
+ testJoinMapping = `
+ {
+ "settings":{
+ "number_of_shards":1,
+ "number_of_replicas":0
+ },
+ "mappings":{
+ "doc":{
+ "properties":{
+ "message":{
+ "type":"text"
+ },
+ "my_join_field": {
+ "type": "join",
+ "relations": {
+ "question": "answer"
+ }
+ }
+ }
+ }
+ }
+ }
+`
+
+ testOrderIndex = "elastic-orders"
+ testOrderMapping = `
+{
+ "settings":{
+ "number_of_shards":1,
+ "number_of_replicas":0
+ },
+ "mappings":{
+ "doc":{
+ "properties":{
+ "article":{
+ "type":"text"
+ },
+ "manufacturer":{
+ "type":"keyword"
+ },
+ "price":{
+ "type":"float"
+ },
+ "time":{
+ "type":"date",
+ "format": "YYYY-MM-dd"
+ }
+ }
+ }
+ }
+}
+`
+
+ /*
+ testDoctypeIndex = "elastic-doctypes"
+ testDoctypeMapping = `
+ {
+ "settings":{
+ "number_of_shards":1,
+ "number_of_replicas":0
+ },
+ "mappings":{
+ "doc":{
+ "properties":{
+ "message":{
+ "type":"text",
+ "store": true,
+ "fielddata": true
+ }
+ }
+ }
+ }
+ }
+ `
+ */
+
+ testQueryIndex = "elastic-queries"
+ testQueryMapping = `
+{
+ "settings":{
+ "number_of_shards":1,
+ "number_of_replicas":0
+ },
+ "mappings":{
+ "doc":{
+ "properties":{
+ "message":{
+ "type":"text",
+ "store": true,
+ "fielddata": true
+ },
+ "query": {
+ "type": "percolator"
+ }
+ }
+ }
+ }
+}
+`
+)
+
+type tweet struct {
+ User string `json:"user"`
+ Message string `json:"message"`
+ Retweets int `json:"retweets"`
+ Image string `json:"image,omitempty"`
+ Created time.Time `json:"created,omitempty"`
+ Tags []string `json:"tags,omitempty"`
+ Location string `json:"location,omitempty"`
+ Suggest *SuggestField `json:"suggest_field,omitempty"`
+}
+
+func (t tweet) String() string {
+ return fmt.Sprintf("tweet{User:%q,Message:%q,Retweets:%d}", t.User, t.Message, t.Retweets)
+}
+
+type comment struct {
+ User string `json:"user"`
+ Comment string `json:"comment"`
+ Created time.Time `json:"created,omitempty"`
+}
+
+func (c comment) String() string {
+ return fmt.Sprintf("comment{User:%q,Comment:%q}", c.User, c.Comment)
+}
+
+type joinDoc struct {
+ Message string `json:"message"`
+ JoinField interface{} `json:"my_join_field,omitempty"`
+}
+
+type joinField struct {
+ Name string `json:"name"`
+ Parent string `json:"parent,omitempty"`
+}
+
+type order struct {
+ Article string `json:"article"`
+ Manufacturer string `json:"manufacturer"`
+ Price float64 `json:"price"`
+ Time string `json:"time,omitempty"`
+}
+
+func (o order) String() string {
+ return fmt.Sprintf("order{Article:%q,Manufacturer:%q,Price:%v,Time:%v}", o.Article, o.Manufacturer, o.Price, o.Time)
+}
+
+// doctype is required for Percolate tests.
+type doctype struct {
+ Message string `json:"message"`
+}
+
+// queries is required for Percolate tests.
+type queries struct {
+ Query string `json:"query"`
+}
+
+func isTravis() bool {
+ return os.Getenv("TRAVIS") != ""
+}
+
+func travisGoVersion() string {
+ return os.Getenv("TRAVIS_GO_VERSION")
+}
+
+type logger interface {
+ Error(args ...interface{})
+ Errorf(format string, args ...interface{})
+ Fatal(args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Fail()
+ FailNow()
+ Log(args ...interface{})
+ Logf(format string, args ...interface{})
+}
+
+func setupTestClient(t logger, options ...ClientOptionFunc) (client *Client) {
+ var err error
+
+ client, err = NewClient(options...)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ client.DeleteIndex(testIndexName).Do(context.TODO())
+ client.DeleteIndex(testIndexName2).Do(context.TODO())
+ client.DeleteIndex(testIndexName3).Do(context.TODO())
+ client.DeleteIndex(testOrderIndex).Do(context.TODO())
+ client.DeleteIndex(testNoSourceIndexName).Do(context.TODO())
+ //client.DeleteIndex(testDoctypeIndex).Do(context.TODO())
+ client.DeleteIndex(testQueryIndex).Do(context.TODO())
+ client.DeleteIndex(testJoinIndex).Do(context.TODO())
+
+ return client
+}
+
+func setupTestClientAndCreateIndex(t logger, options ...ClientOptionFunc) *Client {
+ client := setupTestClient(t, options...)
+
+ // Create index
+ createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if createIndex == nil {
+ t.Errorf("expected result to be != nil; got: %v", createIndex)
+ }
+
+ // Create second index
+ createIndex2, err := client.CreateIndex(testIndexName2).Body(testMapping).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if createIndex2 == nil {
+ t.Errorf("expected result to be != nil; got: %v", createIndex2)
+ }
+
+ // Create no source index
+ createNoSourceIndex, err := client.CreateIndex(testNoSourceIndexName).Body(testNoSourceMapping).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if createNoSourceIndex == nil {
+ t.Errorf("expected result to be != nil; got: %v", createNoSourceIndex)
+ }
+
+ // Create order index
+ createOrderIndex, err := client.CreateIndex(testOrderIndex).Body(testOrderMapping).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if createOrderIndex == nil {
+ t.Errorf("expected result to be != nil; got: %v", createOrderIndex)
+ }
+
+ return client
+}
+
+func setupTestClientAndCreateIndexAndLog(t logger, options ...ClientOptionFunc) *Client {
+ return setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+}
+
+func setupTestClientAndCreateIndexAndAddDocs(t logger, options ...ClientOptionFunc) *Client {
+ client := setupTestClientAndCreateIndex(t, options...)
+
+ // Add tweets
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+ //comment1 := comment{User: "nico", Comment: "You bet."}
+
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").Routing("someroutingkey").BodyJson(&tweet3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ /*
+ _, err = client.Index().Index(testIndexName).Type("comment").Id("1").Parent("3").BodyJson(&comment1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ */
+
+ // Add orders
+ var orders []order
+ orders = append(orders, order{Article: "Apple MacBook", Manufacturer: "Apple", Price: 1290, Time: "2015-01-18"})
+ orders = append(orders, order{Article: "Paper", Manufacturer: "Canon", Price: 100, Time: "2015-03-01"})
+ orders = append(orders, order{Article: "Apple iPad", Manufacturer: "Apple", Price: 499, Time: "2015-04-12"})
+ orders = append(orders, order{Article: "Dell XPS 13", Manufacturer: "Dell", Price: 1600, Time: "2015-04-18"})
+ orders = append(orders, order{Article: "Apple Watch", Manufacturer: "Apple", Price: 349, Time: "2015-04-29"})
+ orders = append(orders, order{Article: "Samsung TV", Manufacturer: "Samsung", Price: 790, Time: "2015-05-03"})
+ orders = append(orders, order{Article: "Hoodie", Manufacturer: "h&m", Price: 49, Time: "2015-06-03"})
+ orders = append(orders, order{Article: "T-Shirt", Manufacturer: "h&m", Price: 19, Time: "2015-06-18"})
+ for i, o := range orders {
+ id := fmt.Sprintf("%d", i)
+ _, err = client.Index().Index(testOrderIndex).Type("doc").Id(id).BodyJson(&o).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ // Flush
+ _, err = client.Flush().Index(testIndexName, testOrderIndex).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ return client
+}
+
+func setupTestClientAndCreateIndexAndAddDocsNoSource(t logger, options ...ClientOptionFunc) *Client {
+ client := setupTestClientAndCreateIndex(t, options...)
+
+ // Add tweets
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+
+ _, err := client.Index().Index(testNoSourceIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Index().Index(testNoSourceIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Flush
+ _, err = client.Flush().Index(testNoSourceIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return client
+}
+
+var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+
+func randomString(n int) string {
+ b := make([]rune, n)
+ for i := range b {
+ b[i] = letters[rand.Intn(len(letters))]
+ }
+ return string(b)
+}
+
+type lexicographically struct {
+ strings []string
+}
+
+func (l lexicographically) Len() int {
+ return len(l.strings)
+}
+
+func (l lexicographically) Less(i, j int) bool {
+ return l.strings[i] < l.strings[j]
+}
+
+func (l lexicographically) Swap(i, j int) {
+ l.strings[i], l.strings[j] = l.strings[j], l.strings[i]
+}
diff --git a/vendor/github.com/olivere/elastic/snapshot_create.go b/vendor/github.com/olivere/elastic/snapshot_create.go
new file mode 100644
index 000000000..1bbd2762e
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/snapshot_create.go
@@ -0,0 +1,191 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "time"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// SnapshotCreateService is documented at https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-snapshots.html.
+type SnapshotCreateService struct {
+ client *Client
+ pretty bool
+ repository string
+ snapshot string
+ masterTimeout string
+ waitForCompletion *bool
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewSnapshotCreateService creates a new SnapshotCreateService.
+func NewSnapshotCreateService(client *Client) *SnapshotCreateService {
+ return &SnapshotCreateService{
+ client: client,
+ }
+}
+
+// Repository is the repository name.
+func (s *SnapshotCreateService) Repository(repository string) *SnapshotCreateService {
+ s.repository = repository
+ return s
+}
+
+// Snapshot is the snapshot name.
+func (s *SnapshotCreateService) Snapshot(snapshot string) *SnapshotCreateService {
+ s.snapshot = snapshot
+ return s
+}
+
+// MasterTimeout is documented as: Explicit operation timeout for connection to master node.
+func (s *SnapshotCreateService) MasterTimeout(masterTimeout string) *SnapshotCreateService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// WaitForCompletion is documented as: Should this request wait until the operation has completed before returning.
+func (s *SnapshotCreateService) WaitForCompletion(waitForCompletion bool) *SnapshotCreateService {
+ s.waitForCompletion = &waitForCompletion
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *SnapshotCreateService) Pretty(pretty bool) *SnapshotCreateService {
+ s.pretty = pretty
+ return s
+}
+
+// BodyJson is documented as: The snapshot definition.
+func (s *SnapshotCreateService) BodyJson(body interface{}) *SnapshotCreateService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString is documented as: The snapshot definition.
+func (s *SnapshotCreateService) BodyString(body string) *SnapshotCreateService {
+ s.bodyString = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *SnapshotCreateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_snapshot/{repository}/{snapshot}", map[string]string{
+ "snapshot": s.snapshot,
+ "repository": s.repository,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ if s.waitForCompletion != nil {
+ params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *SnapshotCreateService) Validate() error {
+ var invalid []string
+ if s.repository == "" {
+ invalid = append(invalid, "Repository")
+ }
+ if s.snapshot == "" {
+ invalid = append(invalid, "Snapshot")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *SnapshotCreateService) Do(ctx context.Context) (*SnapshotCreateResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else {
+ body = s.bodyString
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "PUT",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(SnapshotCreateResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// SnapshotShardFailure stores information about failures that occurred during shard snapshotting process.
+type SnapshotShardFailure struct {
+ Index string `json:"index"`
+ IndexUUID string `json:"index_uuid"`
+ ShardID int `json:"shard_id"`
+ Reason string `json:"reason"`
+ NodeID string `json:"node_id"`
+ Status string `json:"status"`
+}
+
+// SnapshotCreateResponse is the response of SnapshotCreateService.Do.
+type SnapshotCreateResponse struct {
+ // Accepted indicates whether the request was accepted by elasticsearch.
+ // It's available when waitForCompletion is false.
+ Accepted *bool `json:"accepted"`
+
+ // Snapshot is available when waitForCompletion is true.
+ Snapshot *struct {
+ Snapshot string `json:"snapshot"`
+ UUID string `json:"uuid"`
+ VersionID int `json:"version_id"`
+ Version string `json:"version"`
+ Indices []string `json:"indices"`
+ State string `json:"state"`
+ Reason string `json:"reason"`
+ StartTime time.Time `json:"start_time"`
+ StartTimeInMillis int64 `json:"start_time_in_millis"`
+ EndTime time.Time `json:"end_time"`
+ EndTimeInMillis int64 `json:"end_time_in_millis"`
+ DurationInMillis int64 `json:"duration_in_millis"`
+ Failures []SnapshotShardFailure `json:"failures"`
+ Shards shardsInfo `json:"shards"`
+ } `json:"snapshot"`
+}
diff --git a/vendor/github.com/olivere/elastic/snapshot_create_repository.go b/vendor/github.com/olivere/elastic/snapshot_create_repository.go
new file mode 100644
index 000000000..e7f6d5336
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/snapshot_create_repository.go
@@ -0,0 +1,205 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// SnapshotCreateRepositoryService creates a snapshot repository.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-snapshots.html
+// for details.
+type SnapshotCreateRepositoryService struct {
+ client *Client
+ pretty bool
+ repository string
+ masterTimeout string
+ timeout string
+ verify *bool
+ typ string
+ settings map[string]interface{}
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewSnapshotCreateRepositoryService creates a new SnapshotCreateRepositoryService.
+func NewSnapshotCreateRepositoryService(client *Client) *SnapshotCreateRepositoryService {
+ return &SnapshotCreateRepositoryService{
+ client: client,
+ }
+}
+
+// Repository is the repository name.
+func (s *SnapshotCreateRepositoryService) Repository(repository string) *SnapshotCreateRepositoryService {
+ s.repository = repository
+ return s
+}
+
+// MasterTimeout specifies an explicit operation timeout for connection to master node.
+func (s *SnapshotCreateRepositoryService) MasterTimeout(masterTimeout string) *SnapshotCreateRepositoryService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *SnapshotCreateRepositoryService) Timeout(timeout string) *SnapshotCreateRepositoryService {
+ s.timeout = timeout
+ return s
+}
+
+// Verify indicates whether to verify the repository after creation.
+func (s *SnapshotCreateRepositoryService) Verify(verify bool) *SnapshotCreateRepositoryService {
+ s.verify = &verify
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *SnapshotCreateRepositoryService) Pretty(pretty bool) *SnapshotCreateRepositoryService {
+ s.pretty = pretty
+ return s
+}
+
+// Type sets the snapshot repository type, e.g. "fs".
+func (s *SnapshotCreateRepositoryService) Type(typ string) *SnapshotCreateRepositoryService {
+ s.typ = typ
+ return s
+}
+
+// Settings sets all settings of the snapshot repository.
+func (s *SnapshotCreateRepositoryService) Settings(settings map[string]interface{}) *SnapshotCreateRepositoryService {
+ s.settings = settings
+ return s
+}
+
+// Setting sets a single settings of the snapshot repository.
+func (s *SnapshotCreateRepositoryService) Setting(name string, value interface{}) *SnapshotCreateRepositoryService {
+ if s.settings == nil {
+ s.settings = make(map[string]interface{})
+ }
+ s.settings[name] = value
+ return s
+}
+
+// BodyJson is documented as: The repository definition.
+func (s *SnapshotCreateRepositoryService) BodyJson(body interface{}) *SnapshotCreateRepositoryService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString is documented as: The repository definition.
+func (s *SnapshotCreateRepositoryService) BodyString(body string) *SnapshotCreateRepositoryService {
+ s.bodyString = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *SnapshotCreateRepositoryService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_snapshot/{repository}", map[string]string{
+ "repository": s.repository,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.verify != nil {
+ params.Set("verify", fmt.Sprintf("%v", *s.verify))
+ }
+ return path, params, nil
+}
+
+// buildBody builds the body for the operation.
+func (s *SnapshotCreateRepositoryService) buildBody() (interface{}, error) {
+ if s.bodyJson != nil {
+ return s.bodyJson, nil
+ }
+ if s.bodyString != "" {
+ return s.bodyString, nil
+ }
+
+ body := map[string]interface{}{
+ "type": s.typ,
+ }
+ if len(s.settings) > 0 {
+ body["settings"] = s.settings
+ }
+ return body, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *SnapshotCreateRepositoryService) Validate() error {
+ var invalid []string
+ if s.repository == "" {
+ invalid = append(invalid, "Repository")
+ }
+ if s.bodyString == "" && s.bodyJson == nil {
+ invalid = append(invalid, "BodyJson")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *SnapshotCreateRepositoryService) Do(ctx context.Context) (*SnapshotCreateRepositoryResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ body, err := s.buildBody()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "PUT",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(SnapshotCreateRepositoryResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// SnapshotCreateRepositoryResponse is the response of SnapshotCreateRepositoryService.Do.
+type SnapshotCreateRepositoryResponse struct {
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/snapshot_create_repository_test.go b/vendor/github.com/olivere/elastic/snapshot_create_repository_test.go
new file mode 100644
index 000000000..2045c700d
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/snapshot_create_repository_test.go
@@ -0,0 +1,61 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestSnapshotPutRepositoryURL(t *testing.T) {
+ client := setupTestClient(t)
+
+ tests := []struct {
+ Repository string
+ Expected string
+ }{
+ {
+ "repo",
+ "/_snapshot/repo",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := client.SnapshotCreateRepository(test.Repository).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
+
+func TestSnapshotPutRepositoryBody(t *testing.T) {
+ client := setupTestClient(t)
+
+ service := client.SnapshotCreateRepository("my_backup")
+ service = service.Type("fs").
+ Settings(map[string]interface{}{
+ "location": "my_backup_location",
+ "compress": false,
+ }).
+ Setting("compress", true).
+ Setting("chunk_size", 16*1024*1024)
+
+ src, err := service.buildBody()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"settings":{"chunk_size":16777216,"compress":true,"location":"my_backup_location"},"type":"fs"}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/snapshot_create_test.go b/vendor/github.com/olivere/elastic/snapshot_create_test.go
new file mode 100644
index 000000000..74b009cfe
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/snapshot_create_test.go
@@ -0,0 +1,63 @@
+package elastic
+
+import (
+ "net/url"
+ "reflect"
+ "testing"
+)
+
+func TestSnapshotValidate(t *testing.T) {
+ var client *Client
+
+ err := NewSnapshotCreateService(client).Validate()
+ got := err.Error()
+ expected := "missing required fields: [Repository Snapshot]"
+ if got != expected {
+ t.Errorf("expected %q; got: %q", expected, got)
+ }
+}
+
+func TestSnapshotPutURL(t *testing.T) {
+ client := setupTestClient(t)
+
+ tests := []struct {
+ Repository string
+ Snapshot string
+ Pretty bool
+ MasterTimeout string
+ WaitForCompletion bool
+ ExpectedPath string
+ ExpectedParams url.Values
+ }{
+ {
+ Repository: "repo",
+ Snapshot: "snapshot_of_sunday",
+ Pretty: true,
+ MasterTimeout: "60s",
+ WaitForCompletion: true,
+ ExpectedPath: "/_snapshot/repo/snapshot_of_sunday",
+ ExpectedParams: url.Values{
+ "pretty": []string{"true"},
+ "master_timeout": []string{"60s"},
+ "wait_for_completion": []string{"true"},
+ },
+ },
+ }
+
+ for _, test := range tests {
+ path, params, err := client.SnapshotCreate(test.Repository, test.Snapshot).
+ Pretty(test.Pretty).
+ MasterTimeout(test.MasterTimeout).
+ WaitForCompletion(test.WaitForCompletion).
+ buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.ExpectedPath {
+ t.Errorf("expected %q; got: %q", test.ExpectedPath, path)
+ }
+ if !reflect.DeepEqual(params, test.ExpectedParams) {
+ t.Errorf("expected %q; got: %q", test.ExpectedParams, params)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/snapshot_delete_repository.go b/vendor/github.com/olivere/elastic/snapshot_delete_repository.go
new file mode 100644
index 000000000..ad3e49b0e
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/snapshot_delete_repository.go
@@ -0,0 +1,132 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// SnapshotDeleteRepositoryService deletes a snapshot repository.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-snapshots.html
+// for details.
+type SnapshotDeleteRepositoryService struct {
+ client *Client
+ pretty bool
+ repository []string
+ masterTimeout string
+ timeout string
+}
+
+// NewSnapshotDeleteRepositoryService creates a new SnapshotDeleteRepositoryService.
+func NewSnapshotDeleteRepositoryService(client *Client) *SnapshotDeleteRepositoryService {
+ return &SnapshotDeleteRepositoryService{
+ client: client,
+ repository: make([]string, 0),
+ }
+}
+
+// Repository is the list of repository names.
+func (s *SnapshotDeleteRepositoryService) Repository(repositories ...string) *SnapshotDeleteRepositoryService {
+ s.repository = append(s.repository, repositories...)
+ return s
+}
+
+// MasterTimeout specifies an explicit operation timeout for connection to master node.
+func (s *SnapshotDeleteRepositoryService) MasterTimeout(masterTimeout string) *SnapshotDeleteRepositoryService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *SnapshotDeleteRepositoryService) Timeout(timeout string) *SnapshotDeleteRepositoryService {
+ s.timeout = timeout
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *SnapshotDeleteRepositoryService) Pretty(pretty bool) *SnapshotDeleteRepositoryService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *SnapshotDeleteRepositoryService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_snapshot/{repository}", map[string]string{
+ "repository": strings.Join(s.repository, ","),
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *SnapshotDeleteRepositoryService) Validate() error {
+ var invalid []string
+ if len(s.repository) == 0 {
+ invalid = append(invalid, "Repository")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *SnapshotDeleteRepositoryService) Do(ctx context.Context) (*SnapshotDeleteRepositoryResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "DELETE",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(SnapshotDeleteRepositoryResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// SnapshotDeleteRepositoryResponse is the response of SnapshotDeleteRepositoryService.Do.
+type SnapshotDeleteRepositoryResponse struct {
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/snapshot_delete_repository_test.go b/vendor/github.com/olivere/elastic/snapshot_delete_repository_test.go
new file mode 100644
index 000000000..aec793a60
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/snapshot_delete_repository_test.go
@@ -0,0 +1,35 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "testing"
+
+func TestSnapshotDeleteRepositoryURL(t *testing.T) {
+ client := setupTestClient(t)
+
+ tests := []struct {
+ Repository []string
+ Expected string
+ }{
+ {
+ []string{"repo1"},
+ "/_snapshot/repo1",
+ },
+ {
+ []string{"repo1", "repo2"},
+ "/_snapshot/repo1%2Crepo2",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := client.SnapshotDeleteRepository(test.Repository...).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/snapshot_get_repository.go b/vendor/github.com/olivere/elastic/snapshot_get_repository.go
new file mode 100644
index 000000000..2d24c5e4c
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/snapshot_get_repository.go
@@ -0,0 +1,134 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// SnapshotGetRepositoryService reads a snapshot repository.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-snapshots.html
+// for details.
+type SnapshotGetRepositoryService struct {
+ client *Client
+ pretty bool
+ repository []string
+ local *bool
+ masterTimeout string
+}
+
+// NewSnapshotGetRepositoryService creates a new SnapshotGetRepositoryService.
+func NewSnapshotGetRepositoryService(client *Client) *SnapshotGetRepositoryService {
+ return &SnapshotGetRepositoryService{
+ client: client,
+ repository: make([]string, 0),
+ }
+}
+
+// Repository is the list of repository names.
+func (s *SnapshotGetRepositoryService) Repository(repositories ...string) *SnapshotGetRepositoryService {
+ s.repository = append(s.repository, repositories...)
+ return s
+}
+
+// Local indicates whether to return local information, i.e. do not retrieve the state from master node (default: false).
+func (s *SnapshotGetRepositoryService) Local(local bool) *SnapshotGetRepositoryService {
+ s.local = &local
+ return s
+}
+
+// MasterTimeout specifies an explicit operation timeout for connection to master node.
+func (s *SnapshotGetRepositoryService) MasterTimeout(masterTimeout string) *SnapshotGetRepositoryService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *SnapshotGetRepositoryService) Pretty(pretty bool) *SnapshotGetRepositoryService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *SnapshotGetRepositoryService) buildURL() (string, url.Values, error) {
+ // Build URL
+ var err error
+ var path string
+ if len(s.repository) > 0 {
+ path, err = uritemplates.Expand("/_snapshot/{repository}", map[string]string{
+ "repository": strings.Join(s.repository, ","),
+ })
+ } else {
+ path = "/_snapshot"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *SnapshotGetRepositoryService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *SnapshotGetRepositoryService) Do(ctx context.Context) (SnapshotGetRepositoryResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ var ret SnapshotGetRepositoryResponse
+ if err := json.Unmarshal(res.Body, &ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// SnapshotGetRepositoryResponse is the response of SnapshotGetRepositoryService.Do.
+type SnapshotGetRepositoryResponse map[string]*SnapshotRepositoryMetaData
+
+// SnapshotRepositoryMetaData contains all information about
+// a single snapshot repository.
+type SnapshotRepositoryMetaData struct {
+ Type string `json:"type"`
+ Settings map[string]interface{} `json:"settings,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/snapshot_get_repository_test.go b/vendor/github.com/olivere/elastic/snapshot_get_repository_test.go
new file mode 100644
index 000000000..0dcd0bb90
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/snapshot_get_repository_test.go
@@ -0,0 +1,39 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "testing"
+
+func TestSnapshotGetRepositoryURL(t *testing.T) {
+ client := setupTestClient(t)
+
+ tests := []struct {
+ Repository []string
+ Expected string
+ }{
+ {
+ []string{},
+ "/_snapshot",
+ },
+ {
+ []string{"repo1"},
+ "/_snapshot/repo1",
+ },
+ {
+ []string{"repo1", "repo2"},
+ "/_snapshot/repo1%2Crepo2",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := client.SnapshotGetRepository(test.Repository...).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/snapshot_verify_repository.go b/vendor/github.com/olivere/elastic/snapshot_verify_repository.go
new file mode 100644
index 000000000..5494ab475
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/snapshot_verify_repository.go
@@ -0,0 +1,132 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// SnapshotVerifyRepositoryService verifies a snapshop repository.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-snapshots.html
+// for details.
+type SnapshotVerifyRepositoryService struct {
+ client *Client
+ pretty bool
+ repository string
+ masterTimeout string
+ timeout string
+}
+
+// NewSnapshotVerifyRepositoryService creates a new SnapshotVerifyRepositoryService.
+func NewSnapshotVerifyRepositoryService(client *Client) *SnapshotVerifyRepositoryService {
+ return &SnapshotVerifyRepositoryService{
+ client: client,
+ }
+}
+
+// Repository specifies the repository name.
+func (s *SnapshotVerifyRepositoryService) Repository(repository string) *SnapshotVerifyRepositoryService {
+ s.repository = repository
+ return s
+}
+
+// MasterTimeout is the explicit operation timeout for connection to master node.
+func (s *SnapshotVerifyRepositoryService) MasterTimeout(masterTimeout string) *SnapshotVerifyRepositoryService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *SnapshotVerifyRepositoryService) Timeout(timeout string) *SnapshotVerifyRepositoryService {
+ s.timeout = timeout
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *SnapshotVerifyRepositoryService) Pretty(pretty bool) *SnapshotVerifyRepositoryService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *SnapshotVerifyRepositoryService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_snapshot/{repository}/_verify", map[string]string{
+ "repository": s.repository,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *SnapshotVerifyRepositoryService) Validate() error {
+ var invalid []string
+ if s.repository == "" {
+ invalid = append(invalid, "Repository")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *SnapshotVerifyRepositoryService) Do(ctx context.Context) (*SnapshotVerifyRepositoryResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(SnapshotVerifyRepositoryResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// SnapshotVerifyRepositoryResponse is the response of SnapshotVerifyRepositoryService.Do.
+type SnapshotVerifyRepositoryResponse struct {
+ Nodes map[string]*SnapshotVerifyRepositoryNode `json:"nodes"`
+}
+
+type SnapshotVerifyRepositoryNode struct {
+ Name string `json:"name"`
+}
diff --git a/vendor/github.com/olivere/elastic/snapshot_verify_repository_test.go b/vendor/github.com/olivere/elastic/snapshot_verify_repository_test.go
new file mode 100644
index 000000000..9776782d2
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/snapshot_verify_repository_test.go
@@ -0,0 +1,31 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "testing"
+
+func TestSnapshotVerifyRepositoryURL(t *testing.T) {
+ client := setupTestClient(t)
+
+ tests := []struct {
+ Repository string
+ Expected string
+ }{
+ {
+ "repo",
+ "/_snapshot/repo/_verify",
+ },
+ }
+
+ for _, test := range tests {
+ path, _, err := client.SnapshotVerifyRepository(test.Repository).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/sort.go b/vendor/github.com/olivere/elastic/sort.go
new file mode 100644
index 000000000..7e2b32183
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/sort.go
@@ -0,0 +1,614 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "errors"
+
+// -- Sorter --
+
+// Sorter is an interface for sorting strategies, e.g. ScoreSort or FieldSort.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-sort.html.
+type Sorter interface {
+ Source() (interface{}, error)
+}
+
+// -- SortInfo --
+
+// SortInfo contains information about sorting a field.
+type SortInfo struct {
+ Sorter
+ Field string
+ Ascending bool
+ Missing interface{}
+ IgnoreUnmapped *bool
+ UnmappedType string
+ SortMode string
+ NestedFilter Query
+ NestedPath string
+ NestedSort *NestedSort // available in 6.1 or later
+}
+
+func (info SortInfo) Source() (interface{}, error) {
+ prop := make(map[string]interface{})
+ if info.Ascending {
+ prop["order"] = "asc"
+ } else {
+ prop["order"] = "desc"
+ }
+ if info.Missing != nil {
+ prop["missing"] = info.Missing
+ }
+ if info.IgnoreUnmapped != nil {
+ prop["ignore_unmapped"] = *info.IgnoreUnmapped
+ }
+ if info.UnmappedType != "" {
+ prop["unmapped_type"] = info.UnmappedType
+ }
+ if info.SortMode != "" {
+ prop["mode"] = info.SortMode
+ }
+ if info.NestedFilter != nil {
+ src, err := info.NestedFilter.Source()
+ if err != nil {
+ return nil, err
+ }
+ prop["nested_filter"] = src
+ }
+ if info.NestedPath != "" {
+ prop["nested_path"] = info.NestedPath
+ }
+ if info.NestedSort != nil {
+ src, err := info.NestedSort.Source()
+ if err != nil {
+ return nil, err
+ }
+ prop["nested"] = src
+ }
+ source := make(map[string]interface{})
+ source[info.Field] = prop
+ return source, nil
+}
+
+// -- SortByDoc --
+
+// SortByDoc sorts by the "_doc" field, as described in
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-scroll.html.
+//
+// Example:
+// ss := elastic.NewSearchSource()
+// ss = ss.SortBy(elastic.SortByDoc{})
+type SortByDoc struct {
+ Sorter
+}
+
+// Source returns the JSON-serializable data.
+func (s SortByDoc) Source() (interface{}, error) {
+ return "_doc", nil
+}
+
+// -- ScoreSort --
+
+// ScoreSort sorts by relevancy score.
+type ScoreSort struct {
+ Sorter
+ ascending bool
+}
+
+// NewScoreSort creates a new ScoreSort.
+func NewScoreSort() *ScoreSort {
+ return &ScoreSort{ascending: false} // Descending by default!
+}
+
+// Order defines whether sorting ascending (default) or descending.
+func (s *ScoreSort) Order(ascending bool) *ScoreSort {
+ s.ascending = ascending
+ return s
+}
+
+// Asc sets ascending sort order.
+func (s *ScoreSort) Asc() *ScoreSort {
+ s.ascending = true
+ return s
+}
+
+// Desc sets descending sort order.
+func (s *ScoreSort) Desc() *ScoreSort {
+ s.ascending = false
+ return s
+}
+
+// Source returns the JSON-serializable data.
+func (s *ScoreSort) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ x := make(map[string]interface{})
+ source["_score"] = x
+ if s.ascending {
+ x["order"] = "asc"
+ } else {
+ x["order"] = "desc"
+ }
+ return source, nil
+}
+
+// -- FieldSort --
+
+// FieldSort sorts by a given field.
+type FieldSort struct {
+ Sorter
+ fieldName string
+ ascending bool
+ missing interface{}
+ unmappedType *string
+ sortMode *string
+ nestedFilter Query
+ nestedPath *string
+ nestedSort *NestedSort
+}
+
+// NewFieldSort creates a new FieldSort.
+func NewFieldSort(fieldName string) *FieldSort {
+ return &FieldSort{
+ fieldName: fieldName,
+ ascending: true,
+ }
+}
+
+// FieldName specifies the name of the field to be used for sorting.
+func (s *FieldSort) FieldName(fieldName string) *FieldSort {
+ s.fieldName = fieldName
+ return s
+}
+
+// Order defines whether sorting ascending (default) or descending.
+func (s *FieldSort) Order(ascending bool) *FieldSort {
+ s.ascending = ascending
+ return s
+}
+
+// Asc sets ascending sort order.
+func (s *FieldSort) Asc() *FieldSort {
+ s.ascending = true
+ return s
+}
+
+// Desc sets descending sort order.
+func (s *FieldSort) Desc() *FieldSort {
+ s.ascending = false
+ return s
+}
+
+// Missing sets the value to be used when a field is missing in a document.
+// You can also use "_last" or "_first" to sort missing last or first
+// respectively.
+func (s *FieldSort) Missing(missing interface{}) *FieldSort {
+ s.missing = missing
+ return s
+}
+
+// UnmappedType sets the type to use when the current field is not mapped
+// in an index.
+func (s *FieldSort) UnmappedType(typ string) *FieldSort {
+ s.unmappedType = &typ
+ return s
+}
+
+// SortMode specifies what values to pick in case a document contains
+// multiple values for the targeted sort field. Possible values are:
+// min, max, sum, and avg.
+func (s *FieldSort) SortMode(sortMode string) *FieldSort {
+ s.sortMode = &sortMode
+ return s
+}
+
+// NestedFilter sets a filter that nested objects should match with
+// in order to be taken into account for sorting.
+func (s *FieldSort) NestedFilter(nestedFilter Query) *FieldSort {
+ s.nestedFilter = nestedFilter
+ return s
+}
+
+// NestedPath is used if sorting occurs on a field that is inside a
+// nested object.
+func (s *FieldSort) NestedPath(nestedPath string) *FieldSort {
+ s.nestedPath = &nestedPath
+ return s
+}
+
+// NestedSort is available starting with 6.1 and will replace NestedFilter
+// and NestedPath.
+func (s *FieldSort) NestedSort(nestedSort *NestedSort) *FieldSort {
+ s.nestedSort = nestedSort
+ return s
+}
+
+// Source returns the JSON-serializable data.
+func (s *FieldSort) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ x := make(map[string]interface{})
+ source[s.fieldName] = x
+ if s.ascending {
+ x["order"] = "asc"
+ } else {
+ x["order"] = "desc"
+ }
+ if s.missing != nil {
+ x["missing"] = s.missing
+ }
+ if s.unmappedType != nil {
+ x["unmapped_type"] = *s.unmappedType
+ }
+ if s.sortMode != nil {
+ x["mode"] = *s.sortMode
+ }
+ if s.nestedFilter != nil {
+ src, err := s.nestedFilter.Source()
+ if err != nil {
+ return nil, err
+ }
+ x["nested_filter"] = src
+ }
+ if s.nestedPath != nil {
+ x["nested_path"] = *s.nestedPath
+ }
+ if s.nestedSort != nil {
+ src, err := s.nestedSort.Source()
+ if err != nil {
+ return nil, err
+ }
+ x["nested"] = src
+ }
+ return source, nil
+}
+
+// -- GeoDistanceSort --
+
+// GeoDistanceSort allows for sorting by geographic distance.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-sort.html#_geo_distance_sorting.
+type GeoDistanceSort struct {
+ Sorter
+ fieldName string
+ points []*GeoPoint
+ geohashes []string
+ distanceType *string
+ unit string
+ ascending bool
+ sortMode *string
+ nestedFilter Query
+ nestedPath *string
+ nestedSort *NestedSort
+}
+
+// NewGeoDistanceSort creates a new sorter for geo distances.
+func NewGeoDistanceSort(fieldName string) *GeoDistanceSort {
+ return &GeoDistanceSort{
+ fieldName: fieldName,
+ ascending: true,
+ }
+}
+
+// FieldName specifies the name of the (geo) field to use for sorting.
+func (s *GeoDistanceSort) FieldName(fieldName string) *GeoDistanceSort {
+ s.fieldName = fieldName
+ return s
+}
+
+// Order defines whether sorting ascending (default) or descending.
+func (s *GeoDistanceSort) Order(ascending bool) *GeoDistanceSort {
+ s.ascending = ascending
+ return s
+}
+
+// Asc sets ascending sort order.
+func (s *GeoDistanceSort) Asc() *GeoDistanceSort {
+ s.ascending = true
+ return s
+}
+
+// Desc sets descending sort order.
+func (s *GeoDistanceSort) Desc() *GeoDistanceSort {
+ s.ascending = false
+ return s
+}
+
+// Point specifies a point to create the range distance aggregations from.
+func (s *GeoDistanceSort) Point(lat, lon float64) *GeoDistanceSort {
+ s.points = append(s.points, GeoPointFromLatLon(lat, lon))
+ return s
+}
+
+// Points specifies the geo point(s) to create the range distance aggregations from.
+func (s *GeoDistanceSort) Points(points ...*GeoPoint) *GeoDistanceSort {
+ s.points = append(s.points, points...)
+ return s
+}
+
+// GeoHashes specifies the geo point to create the range distance aggregations from.
+func (s *GeoDistanceSort) GeoHashes(geohashes ...string) *GeoDistanceSort {
+ s.geohashes = append(s.geohashes, geohashes...)
+ return s
+}
+
+// Unit specifies the distance unit to use. It defaults to km.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/common-options.html#distance-units
+// for details.
+func (s *GeoDistanceSort) Unit(unit string) *GeoDistanceSort {
+ s.unit = unit
+ return s
+}
+
+// GeoDistance is an alias for DistanceType.
+func (s *GeoDistanceSort) GeoDistance(geoDistance string) *GeoDistanceSort {
+ return s.DistanceType(geoDistance)
+}
+
+// DistanceType describes how to compute the distance, e.g. "arc" or "plane".
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-sort.html#geo-sorting
+// for details.
+func (s *GeoDistanceSort) DistanceType(distanceType string) *GeoDistanceSort {
+ s.distanceType = &distanceType
+ return s
+}
+
+// SortMode specifies what values to pick in case a document contains
+// multiple values for the targeted sort field. Possible values are:
+// min, max, sum, and avg.
+func (s *GeoDistanceSort) SortMode(sortMode string) *GeoDistanceSort {
+ s.sortMode = &sortMode
+ return s
+}
+
+// NestedFilter sets a filter that nested objects should match with
+// in order to be taken into account for sorting.
+func (s *GeoDistanceSort) NestedFilter(nestedFilter Query) *GeoDistanceSort {
+ s.nestedFilter = nestedFilter
+ return s
+}
+
+// NestedPath is used if sorting occurs on a field that is inside a
+// nested object.
+func (s *GeoDistanceSort) NestedPath(nestedPath string) *GeoDistanceSort {
+ s.nestedPath = &nestedPath
+ return s
+}
+
+// NestedSort is available starting with 6.1 and will replace NestedFilter
+// and NestedPath.
+func (s *GeoDistanceSort) NestedSort(nestedSort *NestedSort) *GeoDistanceSort {
+ s.nestedSort = nestedSort
+ return s
+}
+
+// Source returns the JSON-serializable data.
+func (s *GeoDistanceSort) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ x := make(map[string]interface{})
+ source["_geo_distance"] = x
+
+ // Points
+ var ptarr []interface{}
+ for _, pt := range s.points {
+ ptarr = append(ptarr, pt.Source())
+ }
+ for _, geohash := range s.geohashes {
+ ptarr = append(ptarr, geohash)
+ }
+ x[s.fieldName] = ptarr
+
+ if s.unit != "" {
+ x["unit"] = s.unit
+ }
+ if s.distanceType != nil {
+ x["distance_type"] = *s.distanceType
+ }
+
+ if s.ascending {
+ x["order"] = "asc"
+ } else {
+ x["order"] = "desc"
+ }
+ if s.sortMode != nil {
+ x["mode"] = *s.sortMode
+ }
+ if s.nestedFilter != nil {
+ src, err := s.nestedFilter.Source()
+ if err != nil {
+ return nil, err
+ }
+ x["nested_filter"] = src
+ }
+ if s.nestedPath != nil {
+ x["nested_path"] = *s.nestedPath
+ }
+ if s.nestedSort != nil {
+ src, err := s.nestedSort.Source()
+ if err != nil {
+ return nil, err
+ }
+ x["nested"] = src
+ }
+ return source, nil
+}
+
+// -- ScriptSort --
+
+// ScriptSort sorts by a custom script. See
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-scripting.html#modules-scripting
+// for details about scripting.
+type ScriptSort struct {
+ Sorter
+ script *Script
+ typ string
+ ascending bool
+ sortMode *string
+ nestedFilter Query
+ nestedPath *string
+ nestedSort *NestedSort
+}
+
+// NewScriptSort creates and initializes a new ScriptSort.
+// You must provide a script and a type, e.g. "string" or "number".
+func NewScriptSort(script *Script, typ string) *ScriptSort {
+ return &ScriptSort{
+ script: script,
+ typ: typ,
+ ascending: true,
+ }
+}
+
+// Type sets the script type, which can be either "string" or "number".
+func (s *ScriptSort) Type(typ string) *ScriptSort {
+ s.typ = typ
+ return s
+}
+
+// Order defines whether sorting ascending (default) or descending.
+func (s *ScriptSort) Order(ascending bool) *ScriptSort {
+ s.ascending = ascending
+ return s
+}
+
+// Asc sets ascending sort order.
+func (s *ScriptSort) Asc() *ScriptSort {
+ s.ascending = true
+ return s
+}
+
+// Desc sets descending sort order.
+func (s *ScriptSort) Desc() *ScriptSort {
+ s.ascending = false
+ return s
+}
+
+// SortMode specifies what values to pick in case a document contains
+// multiple values for the targeted sort field. Possible values are:
+// min or max.
+func (s *ScriptSort) SortMode(sortMode string) *ScriptSort {
+ s.sortMode = &sortMode
+ return s
+}
+
+// NestedFilter sets a filter that nested objects should match with
+// in order to be taken into account for sorting.
+func (s *ScriptSort) NestedFilter(nestedFilter Query) *ScriptSort {
+ s.nestedFilter = nestedFilter
+ return s
+}
+
+// NestedPath is used if sorting occurs on a field that is inside a
+// nested object.
+func (s *ScriptSort) NestedPath(nestedPath string) *ScriptSort {
+ s.nestedPath = &nestedPath
+ return s
+}
+
+// NestedSort is available starting with 6.1 and will replace NestedFilter
+// and NestedPath.
+func (s *ScriptSort) NestedSort(nestedSort *NestedSort) *ScriptSort {
+ s.nestedSort = nestedSort
+ return s
+}
+
+// Source returns the JSON-serializable data.
+func (s *ScriptSort) Source() (interface{}, error) {
+ if s.script == nil {
+ return nil, errors.New("ScriptSort expected a script")
+ }
+ source := make(map[string]interface{})
+ x := make(map[string]interface{})
+ source["_script"] = x
+
+ src, err := s.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ x["script"] = src
+
+ x["type"] = s.typ
+
+ if s.ascending {
+ x["order"] = "asc"
+ } else {
+ x["order"] = "desc"
+ }
+ if s.sortMode != nil {
+ x["mode"] = *s.sortMode
+ }
+ if s.nestedFilter != nil {
+ src, err := s.nestedFilter.Source()
+ if err != nil {
+ return nil, err
+ }
+ x["nested_filter"] = src
+ }
+ if s.nestedPath != nil {
+ x["nested_path"] = *s.nestedPath
+ }
+ if s.nestedSort != nil {
+ src, err := s.nestedSort.Source()
+ if err != nil {
+ return nil, err
+ }
+ x["nested"] = src
+ }
+ return source, nil
+}
+
+// -- NestedSort --
+
+// NestedSort is used for fields that are inside a nested object.
+// It takes a "path" argument and an optional nested filter that the
+// nested objects should match with in order to be taken into account
+// for sorting.
+//
+// NestedSort is available from 6.1 and replaces nestedFilter and nestedPath
+// in the other sorters.
+type NestedSort struct {
+ Sorter
+ path string
+ filter Query
+ nestedSort *NestedSort
+}
+
+// NewNestedSort creates a new NestedSort.
+func NewNestedSort(path string) *NestedSort {
+ return &NestedSort{path: path}
+}
+
+// Filter sets the filter.
+func (s *NestedSort) Filter(filter Query) *NestedSort {
+ s.filter = filter
+ return s
+}
+
+// NestedSort embeds another level of nested sorting.
+func (s *NestedSort) NestedSort(nestedSort *NestedSort) *NestedSort {
+ s.nestedSort = nestedSort
+ return s
+}
+
+// Source returns the JSON-serializable data.
+func (s *NestedSort) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+
+ if s.path != "" {
+ source["path"] = s.path
+ }
+ if s.filter != nil {
+ src, err := s.filter.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["filter"] = src
+ }
+ if s.nestedSort != nil {
+ src, err := s.nestedSort.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["nested"] = src
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/sort_test.go b/vendor/github.com/olivere/elastic/sort_test.go
new file mode 100644
index 000000000..b54cbd98c
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/sort_test.go
@@ -0,0 +1,278 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestSortInfo(t *testing.T) {
+ builder := SortInfo{Field: "grade", Ascending: false}
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"grade":{"order":"desc"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSortInfoComplex(t *testing.T) {
+ builder := SortInfo{
+ Field: "price",
+ Ascending: false,
+ Missing: "_last",
+ SortMode: "avg",
+ NestedFilter: NewTermQuery("product.color", "blue"),
+ NestedPath: "variant",
+ }
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"price":{"missing":"_last","mode":"avg","nested_filter":{"term":{"product.color":"blue"}},"nested_path":"variant","order":"desc"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestScoreSort(t *testing.T) {
+ builder := NewScoreSort()
+ if builder.ascending != false {
+ t.Error("expected score sorter to be ascending by default")
+ }
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"_score":{"order":"desc"}}` // ScoreSort is "desc" by default
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestScoreSortOrderAscending(t *testing.T) {
+ builder := NewScoreSort().Asc()
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"_score":{"order":"asc"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestScoreSortOrderDescending(t *testing.T) {
+ builder := NewScoreSort().Desc()
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"_score":{"order":"desc"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFieldSort(t *testing.T) {
+ builder := NewFieldSort("grade")
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"grade":{"order":"asc"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFieldSortOrderDesc(t *testing.T) {
+ builder := NewFieldSort("grade").Desc()
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"grade":{"order":"desc"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFieldSortComplex(t *testing.T) {
+ builder := NewFieldSort("price").Desc().
+ SortMode("avg").
+ Missing("_last").
+ UnmappedType("product").
+ NestedFilter(NewTermQuery("product.color", "blue")).
+ NestedPath("variant")
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"price":{"missing":"_last","mode":"avg","nested_filter":{"term":{"product.color":"blue"}},"nested_path":"variant","order":"desc","unmapped_type":"product"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestGeoDistanceSort(t *testing.T) {
+ builder := NewGeoDistanceSort("pin.location").
+ Point(-70, 40).
+ Order(true).
+ Unit("km").
+ SortMode("min").
+ GeoDistance("plane")
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"_geo_distance":{"distance_type":"plane","mode":"min","order":"asc","pin.location":[{"lat":-70,"lon":40}],"unit":"km"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestGeoDistanceSortOrderDesc(t *testing.T) {
+ builder := NewGeoDistanceSort("pin.location").
+ Point(-70, 40).
+ Unit("km").
+ SortMode("min").
+ GeoDistance("arc").
+ Desc()
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"_geo_distance":{"distance_type":"arc","mode":"min","order":"desc","pin.location":[{"lat":-70,"lon":40}],"unit":"km"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+func TestScriptSort(t *testing.T) {
+ builder := NewScriptSort(NewScript("doc['field_name'].value * factor").Param("factor", 1.1), "number").Order(true)
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"_script":{"order":"asc","script":{"params":{"factor":1.1},"source":"doc['field_name'].value * factor"},"type":"number"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestScriptSortOrderDesc(t *testing.T) {
+ builder := NewScriptSort(NewScript("doc['field_name'].value * factor").Param("factor", 1.1), "number").Desc()
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"_script":{"order":"desc","script":{"params":{"factor":1.1},"source":"doc['field_name'].value * factor"},"type":"number"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestNestedSort(t *testing.T) {
+ builder := NewNestedSort("offer").
+ Filter(NewTermQuery("offer.color", "blue"))
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"filter":{"term":{"offer.color":"blue"}},"path":"offer"}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFieldSortWithNestedSort(t *testing.T) {
+ builder := NewFieldSort("offer.price").
+ Asc().
+ SortMode("avg").
+ NestedSort(
+ NewNestedSort("offer").Filter(NewTermQuery("offer.color", "blue")),
+ )
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"offer.price":{"mode":"avg","nested":{"filter":{"term":{"offer.color":"blue"}},"path":"offer"},"order":"asc"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/suggest_field.go b/vendor/github.com/olivere/elastic/suggest_field.go
new file mode 100644
index 000000000..8405a6f9e
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/suggest_field.go
@@ -0,0 +1,90 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "errors"
+)
+
+// SuggestField can be used by the caller to specify a suggest field
+// at index time. For a detailed example, see e.g.
+// https://www.elastic.co/blog/you-complete-me.
+type SuggestField struct {
+ inputs []string
+ weight int
+ contextQueries []SuggesterContextQuery
+}
+
+func NewSuggestField(input ...string) *SuggestField {
+ return &SuggestField{
+ inputs: input,
+ weight: -1,
+ }
+}
+
+func (f *SuggestField) Input(input ...string) *SuggestField {
+ if f.inputs == nil {
+ f.inputs = make([]string, 0)
+ }
+ f.inputs = append(f.inputs, input...)
+ return f
+}
+
+func (f *SuggestField) Weight(weight int) *SuggestField {
+ f.weight = weight
+ return f
+}
+
+func (f *SuggestField) ContextQuery(queries ...SuggesterContextQuery) *SuggestField {
+ f.contextQueries = append(f.contextQueries, queries...)
+ return f
+}
+
+// MarshalJSON encodes SuggestField into JSON.
+func (f *SuggestField) MarshalJSON() ([]byte, error) {
+ source := make(map[string]interface{})
+
+ if f.inputs != nil {
+ switch len(f.inputs) {
+ case 1:
+ source["input"] = f.inputs[0]
+ default:
+ source["input"] = f.inputs
+ }
+ }
+
+ if f.weight >= 0 {
+ source["weight"] = f.weight
+ }
+
+ switch len(f.contextQueries) {
+ case 0:
+ case 1:
+ src, err := f.contextQueries[0].Source()
+ if err != nil {
+ return nil, err
+ }
+ source["contexts"] = src
+ default:
+ ctxq := make(map[string]interface{})
+ for _, query := range f.contextQueries {
+ src, err := query.Source()
+ if err != nil {
+ return nil, err
+ }
+ m, ok := src.(map[string]interface{})
+ if !ok {
+ return nil, errors.New("SuggesterContextQuery must be of type map[string]interface{}")
+ }
+ for k, v := range m {
+ ctxq[k] = v
+ }
+ }
+ source["contexts"] = ctxq
+ }
+
+ return json.Marshal(source)
+}
diff --git a/vendor/github.com/olivere/elastic/suggest_field_test.go b/vendor/github.com/olivere/elastic/suggest_field_test.go
new file mode 100644
index 000000000..426875b2f
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/suggest_field_test.go
@@ -0,0 +1,29 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestSuggestField(t *testing.T) {
+ field := NewSuggestField().
+ Input("Welcome to Golang and Elasticsearch.", "Golang and Elasticsearch").
+ Weight(1).
+ ContextQuery(
+ NewSuggesterCategoryMapping("color").FieldName("color_field").DefaultValues("red", "green", "blue"),
+ NewSuggesterGeoMapping("location").Precision("5m").Neighbors(true).DefaultLocations(GeoPointFromLatLon(52.516275, 13.377704)),
+ )
+ data, err := json.Marshal(field)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"contexts":{"color":{"default":["red","green","blue"],"path":"color_field","type":"category"},"location":{"default":{"lat":52.516275,"lon":13.377704},"neighbors":true,"precision":["5m"],"type":"geo"}},"input":["Welcome to Golang and Elasticsearch.","Golang and Elasticsearch"],"weight":1}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/suggester.go b/vendor/github.com/olivere/elastic/suggester.go
new file mode 100644
index 000000000..f7dc48f90
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/suggester.go
@@ -0,0 +1,15 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Represents the generic suggester interface.
+// A suggester's only purpose is to return the
+// source of the query as a JSON-serializable
+// object. Returning a map[string]interface{}
+// will do.
+type Suggester interface {
+ Name() string
+ Source(includeName bool) (interface{}, error)
+}
diff --git a/vendor/github.com/olivere/elastic/suggester_completion.go b/vendor/github.com/olivere/elastic/suggester_completion.go
new file mode 100644
index 000000000..d2b4a326c
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/suggester_completion.go
@@ -0,0 +1,352 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "errors"
+
+// CompletionSuggester is a fast suggester for e.g. type-ahead completion.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters-completion.html
+// for more details.
+type CompletionSuggester struct {
+ Suggester
+ name string
+ text string
+ prefix string
+ regex string
+ field string
+ analyzer string
+ size *int
+ shardSize *int
+ contextQueries []SuggesterContextQuery
+ payload interface{}
+
+ fuzzyOptions *FuzzyCompletionSuggesterOptions
+ regexOptions *RegexCompletionSuggesterOptions
+ skipDuplicates *bool
+}
+
+// Creates a new completion suggester.
+func NewCompletionSuggester(name string) *CompletionSuggester {
+ return &CompletionSuggester{
+ name: name,
+ }
+}
+
+func (q *CompletionSuggester) Name() string {
+ return q.name
+}
+
+func (q *CompletionSuggester) Text(text string) *CompletionSuggester {
+ q.text = text
+ return q
+}
+
+func (q *CompletionSuggester) Prefix(prefix string) *CompletionSuggester {
+ q.prefix = prefix
+ return q
+}
+
+func (q *CompletionSuggester) PrefixWithEditDistance(prefix string, editDistance interface{}) *CompletionSuggester {
+ q.prefix = prefix
+ q.fuzzyOptions = NewFuzzyCompletionSuggesterOptions().EditDistance(editDistance)
+ return q
+}
+
+func (q *CompletionSuggester) PrefixWithOptions(prefix string, options *FuzzyCompletionSuggesterOptions) *CompletionSuggester {
+ q.prefix = prefix
+ q.fuzzyOptions = options
+ return q
+}
+
+func (q *CompletionSuggester) FuzzyOptions(options *FuzzyCompletionSuggesterOptions) *CompletionSuggester {
+ q.fuzzyOptions = options
+ return q
+}
+
+func (q *CompletionSuggester) Fuzziness(fuzziness interface{}) *CompletionSuggester {
+ if q.fuzzyOptions == nil {
+ q.fuzzyOptions = NewFuzzyCompletionSuggesterOptions()
+ }
+ q.fuzzyOptions = q.fuzzyOptions.EditDistance(fuzziness)
+ return q
+}
+
+func (q *CompletionSuggester) Regex(regex string) *CompletionSuggester {
+ q.regex = regex
+ return q
+}
+
+func (q *CompletionSuggester) RegexWithOptions(regex string, options *RegexCompletionSuggesterOptions) *CompletionSuggester {
+ q.regex = regex
+ q.regexOptions = options
+ return q
+}
+
+func (q *CompletionSuggester) RegexOptions(options *RegexCompletionSuggesterOptions) *CompletionSuggester {
+ q.regexOptions = options
+ return q
+}
+
+func (q *CompletionSuggester) SkipDuplicates(skipDuplicates bool) *CompletionSuggester {
+ q.skipDuplicates = &skipDuplicates
+ return q
+}
+
+func (q *CompletionSuggester) Field(field string) *CompletionSuggester {
+ q.field = field
+ return q
+}
+
+func (q *CompletionSuggester) Analyzer(analyzer string) *CompletionSuggester {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q *CompletionSuggester) Size(size int) *CompletionSuggester {
+ q.size = &size
+ return q
+}
+
+func (q *CompletionSuggester) ShardSize(shardSize int) *CompletionSuggester {
+ q.shardSize = &shardSize
+ return q
+}
+
+func (q *CompletionSuggester) ContextQuery(query SuggesterContextQuery) *CompletionSuggester {
+ q.contextQueries = append(q.contextQueries, query)
+ return q
+}
+
+func (q *CompletionSuggester) ContextQueries(queries ...SuggesterContextQuery) *CompletionSuggester {
+ q.contextQueries = append(q.contextQueries, queries...)
+ return q
+}
+
+// completionSuggesterRequest is necessary because the order in which
+// the JSON elements are routed to Elasticsearch is relevant.
+// We got into trouble when using plain maps because the text element
+// needs to go before the completion element.
+type completionSuggesterRequest struct {
+ Text string `json:"text,omitempty"`
+ Prefix string `json:"prefix,omitempty"`
+ Regex string `json:"regex,omitempty"`
+ Completion interface{} `json:"completion,omitempty"`
+}
+
+// Source creates the JSON data for the completion suggester.
+func (q *CompletionSuggester) Source(includeName bool) (interface{}, error) {
+ cs := &completionSuggesterRequest{}
+
+ if q.text != "" {
+ cs.Text = q.text
+ }
+ if q.prefix != "" {
+ cs.Prefix = q.prefix
+ }
+ if q.regex != "" {
+ cs.Regex = q.regex
+ }
+
+ suggester := make(map[string]interface{})
+ cs.Completion = suggester
+
+ if q.analyzer != "" {
+ suggester["analyzer"] = q.analyzer
+ }
+ if q.field != "" {
+ suggester["field"] = q.field
+ }
+ if q.size != nil {
+ suggester["size"] = *q.size
+ }
+ if q.shardSize != nil {
+ suggester["shard_size"] = *q.shardSize
+ }
+ switch len(q.contextQueries) {
+ case 0:
+ case 1:
+ src, err := q.contextQueries[0].Source()
+ if err != nil {
+ return nil, err
+ }
+ suggester["contexts"] = src
+ default:
+ ctxq := make(map[string]interface{})
+ for _, query := range q.contextQueries {
+ src, err := query.Source()
+ if err != nil {
+ return nil, err
+ }
+ // Merge the dictionary into ctxq
+ m, ok := src.(map[string]interface{})
+ if !ok {
+ return nil, errors.New("elastic: context query is not a map")
+ }
+ for k, v := range m {
+ ctxq[k] = v
+ }
+ }
+ suggester["contexts"] = ctxq
+ }
+
+ // Fuzzy options
+ if q.fuzzyOptions != nil {
+ src, err := q.fuzzyOptions.Source()
+ if err != nil {
+ return nil, err
+ }
+ suggester["fuzzy"] = src
+ }
+
+ // Regex options
+ if q.regexOptions != nil {
+ src, err := q.regexOptions.Source()
+ if err != nil {
+ return nil, err
+ }
+ suggester["regex"] = src
+ }
+
+ if q.skipDuplicates != nil {
+ suggester["skip_duplicates"] = *q.skipDuplicates
+ }
+
+ // TODO(oe) Add completion-suggester specific parameters here
+
+ if !includeName {
+ return cs, nil
+ }
+
+ source := make(map[string]interface{})
+ source[q.name] = cs
+ return source, nil
+}
+
+// -- Fuzzy options --
+
+// FuzzyCompletionSuggesterOptions represents the options for fuzzy completion suggester.
+type FuzzyCompletionSuggesterOptions struct {
+ editDistance interface{}
+ transpositions *bool
+ minLength *int
+ prefixLength *int
+ unicodeAware *bool
+ maxDeterminizedStates *int
+}
+
+// NewFuzzyCompletionSuggesterOptions initializes a new FuzzyCompletionSuggesterOptions instance.
+func NewFuzzyCompletionSuggesterOptions() *FuzzyCompletionSuggesterOptions {
+ return &FuzzyCompletionSuggesterOptions{}
+}
+
+// EditDistance specifies the maximum number of edits, e.g. a number like "1" or "2"
+// or a string like "0..2" or ">5". See https://www.elastic.co/guide/en/elasticsearch/reference/5.6/common-options.html#fuzziness
+// for details.
+func (o *FuzzyCompletionSuggesterOptions) EditDistance(editDistance interface{}) *FuzzyCompletionSuggesterOptions {
+ o.editDistance = editDistance
+ return o
+}
+
+// Transpositions, if set to true, are counted as one change instead of two (defaults to true).
+func (o *FuzzyCompletionSuggesterOptions) Transpositions(transpositions bool) *FuzzyCompletionSuggesterOptions {
+ o.transpositions = &transpositions
+ return o
+}
+
+// MinLength represents the minimum length of the input before fuzzy suggestions are returned (defaults to 3).
+func (o *FuzzyCompletionSuggesterOptions) MinLength(minLength int) *FuzzyCompletionSuggesterOptions {
+ o.minLength = &minLength
+ return o
+}
+
+// PrefixLength represents the minimum length of the input, which is not checked for
+// fuzzy alternatives (defaults to 1).
+func (o *FuzzyCompletionSuggesterOptions) PrefixLength(prefixLength int) *FuzzyCompletionSuggesterOptions {
+ o.prefixLength = &prefixLength
+ return o
+}
+
+// UnicodeAware, if true, all measurements (like fuzzy edit distance, transpositions, and lengths)
+// are measured in Unicode code points instead of in bytes. This is slightly slower than
+// raw bytes, so it is set to false by default.
+func (o *FuzzyCompletionSuggesterOptions) UnicodeAware(unicodeAware bool) *FuzzyCompletionSuggesterOptions {
+ o.unicodeAware = &unicodeAware
+ return o
+}
+
+// MaxDeterminizedStates is currently undocumented in Elasticsearch. It represents
+// the maximum automaton states allowed for fuzzy expansion.
+func (o *FuzzyCompletionSuggesterOptions) MaxDeterminizedStates(max int) *FuzzyCompletionSuggesterOptions {
+ o.maxDeterminizedStates = &max
+ return o
+}
+
+// Source creates the JSON data.
+func (o *FuzzyCompletionSuggesterOptions) Source() (interface{}, error) {
+ out := make(map[string]interface{})
+
+ if o.editDistance != nil {
+ out["fuzziness"] = o.editDistance
+ }
+ if o.transpositions != nil {
+ out["transpositions"] = *o.transpositions
+ }
+ if o.minLength != nil {
+ out["min_length"] = *o.minLength
+ }
+ if o.prefixLength != nil {
+ out["prefix_length"] = *o.prefixLength
+ }
+ if o.unicodeAware != nil {
+ out["unicode_aware"] = *o.unicodeAware
+ }
+ if o.maxDeterminizedStates != nil {
+ out["max_determinized_states"] = *o.maxDeterminizedStates
+ }
+
+ return out, nil
+}
+
+// -- Regex options --
+
+// RegexCompletionSuggesterOptions represents the options for regex completion suggester.
+type RegexCompletionSuggesterOptions struct {
+ flags interface{} // string or int
+ maxDeterminizedStates *int
+}
+
+// NewRegexCompletionSuggesterOptions initializes a new RegexCompletionSuggesterOptions instance.
+func NewRegexCompletionSuggesterOptions() *RegexCompletionSuggesterOptions {
+ return &RegexCompletionSuggesterOptions{}
+}
+
+// Flags represents internal regex flags. See https://www.elastic.co/guide/en/elasticsearch/reference/5.6/search-suggesters-completion.html#regex
+// for details.
+func (o *RegexCompletionSuggesterOptions) Flags(flags interface{}) *RegexCompletionSuggesterOptions {
+ o.flags = flags
+ return o
+}
+
+// MaxDeterminizedStates represents the maximum automaton states allowed for regex expansion.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/5.6/search-suggesters-completion.html#regex
+// for details.
+func (o *RegexCompletionSuggesterOptions) MaxDeterminizedStates(max int) *RegexCompletionSuggesterOptions {
+ o.maxDeterminizedStates = &max
+ return o
+}
+
+// Source creates the JSON data.
+func (o *RegexCompletionSuggesterOptions) Source() (interface{}, error) {
+ out := make(map[string]interface{})
+
+ if o.flags != nil {
+ out["flags"] = o.flags
+ }
+ if o.maxDeterminizedStates != nil {
+ out["max_determinized_states"] = *o.maxDeterminizedStates
+ }
+
+ return out, nil
+}
diff --git a/vendor/github.com/olivere/elastic/suggester_completion_test.go b/vendor/github.com/olivere/elastic/suggester_completion_test.go
new file mode 100644
index 000000000..adbf58657
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/suggester_completion_test.go
@@ -0,0 +1,110 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestCompletionSuggesterSource(t *testing.T) {
+ s := NewCompletionSuggester("song-suggest").
+ Text("n").
+ Field("suggest")
+ src, err := s.Source(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestCompletionSuggesterPrefixSource(t *testing.T) {
+ s := NewCompletionSuggester("song-suggest").
+ Prefix("nir").
+ Field("suggest")
+ src, err := s.Source(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"song-suggest":{"prefix":"nir","completion":{"field":"suggest"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestCompletionSuggesterPrefixWithFuzzySource(t *testing.T) {
+ s := NewCompletionSuggester("song-suggest").
+ Prefix("nor").
+ Field("suggest").
+ FuzzyOptions(NewFuzzyCompletionSuggesterOptions().EditDistance(2))
+ src, err := s.Source(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"song-suggest":{"prefix":"nor","completion":{"field":"suggest","fuzzy":{"fuzziness":2}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestCompletionSuggesterRegexSource(t *testing.T) {
+ s := NewCompletionSuggester("song-suggest").
+ Regex("n[ever|i]r").
+ Field("suggest")
+ src, err := s.Source(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"song-suggest":{"regex":"n[ever|i]r","completion":{"field":"suggest"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestCompletionSuggesterSourceWithMultipleContexts(t *testing.T) {
+ s := NewCompletionSuggester("song-suggest").
+ Text("n").
+ Field("suggest").
+ ContextQueries(
+ NewSuggesterCategoryQuery("artist", "Sting"),
+ NewSuggesterCategoryQuery("label", "BMG"),
+ )
+ src, err := s.Source(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"song-suggest":{"text":"n","completion":{"contexts":{"artist":[{"context":"Sting"}],"label":[{"context":"BMG"}]},"field":"suggest"}}}`
+ if got != expected {
+ t.Errorf("expected %s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/suggester_context.go b/vendor/github.com/olivere/elastic/suggester_context.go
new file mode 100644
index 000000000..12877c1a6
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/suggester_context.go
@@ -0,0 +1,124 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "errors"
+
+// SuggesterContextQuery is used to define context information within
+// a suggestion request.
+type SuggesterContextQuery interface {
+ Source() (interface{}, error)
+}
+
+// ContextSuggester is a fast suggester for e.g. type-ahead completion that supports filtering and boosting based on contexts.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/suggester-context.html
+// for more details.
+type ContextSuggester struct {
+ Suggester
+ name string
+ prefix string
+ field string
+ size *int
+ contextQueries []SuggesterContextQuery
+}
+
+// Creates a new context suggester.
+func NewContextSuggester(name string) *ContextSuggester {
+ return &ContextSuggester{
+ name: name,
+ contextQueries: make([]SuggesterContextQuery, 0),
+ }
+}
+
+func (q *ContextSuggester) Name() string {
+ return q.name
+}
+
+func (q *ContextSuggester) Prefix(prefix string) *ContextSuggester {
+ q.prefix = prefix
+ return q
+}
+
+func (q *ContextSuggester) Field(field string) *ContextSuggester {
+ q.field = field
+ return q
+}
+
+func (q *ContextSuggester) Size(size int) *ContextSuggester {
+ q.size = &size
+ return q
+}
+
+func (q *ContextSuggester) ContextQuery(query SuggesterContextQuery) *ContextSuggester {
+ q.contextQueries = append(q.contextQueries, query)
+ return q
+}
+
+func (q *ContextSuggester) ContextQueries(queries ...SuggesterContextQuery) *ContextSuggester {
+ q.contextQueries = append(q.contextQueries, queries...)
+ return q
+}
+
+// contextSuggesterRequest is necessary because the order in which
+// the JSON elements are routed to Elasticsearch is relevant.
+// We got into trouble when using plain maps because the text element
+// needs to go before the completion element.
+type contextSuggesterRequest struct {
+ Prefix string `json:"prefix"`
+ Completion interface{} `json:"completion"`
+}
+
+// Creates the source for the context suggester.
+func (q *ContextSuggester) Source(includeName bool) (interface{}, error) {
+ cs := &contextSuggesterRequest{}
+
+ if q.prefix != "" {
+ cs.Prefix = q.prefix
+ }
+
+ suggester := make(map[string]interface{})
+ cs.Completion = suggester
+
+ if q.field != "" {
+ suggester["field"] = q.field
+ }
+ if q.size != nil {
+ suggester["size"] = *q.size
+ }
+ switch len(q.contextQueries) {
+ case 0:
+ case 1:
+ src, err := q.contextQueries[0].Source()
+ if err != nil {
+ return nil, err
+ }
+ suggester["contexts"] = src
+ default:
+ ctxq := make(map[string]interface{})
+ for _, query := range q.contextQueries {
+ src, err := query.Source()
+ if err != nil {
+ return nil, err
+ }
+ // Merge the dictionary into ctxq
+ m, ok := src.(map[string]interface{})
+ if !ok {
+ return nil, errors.New("elastic: context query is not a map")
+ }
+ for k, v := range m {
+ ctxq[k] = v
+ }
+ }
+ suggester["contexts"] = ctxq
+ }
+
+ if !includeName {
+ return cs, nil
+ }
+
+ source := make(map[string]interface{})
+ source[q.name] = cs
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/suggester_context_category.go b/vendor/github.com/olivere/elastic/suggester_context_category.go
new file mode 100644
index 000000000..9c50651fa
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/suggester_context_category.go
@@ -0,0 +1,119 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// -- SuggesterCategoryMapping --
+
+// SuggesterCategoryMapping provides a mapping for a category context in a suggester.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/suggester-context.html#_category_mapping.
+type SuggesterCategoryMapping struct {
+ name string
+ fieldName string
+ defaultValues []string
+}
+
+// NewSuggesterCategoryMapping creates a new SuggesterCategoryMapping.
+func NewSuggesterCategoryMapping(name string) *SuggesterCategoryMapping {
+ return &SuggesterCategoryMapping{
+ name: name,
+ defaultValues: make([]string, 0),
+ }
+}
+
+func (q *SuggesterCategoryMapping) DefaultValues(values ...string) *SuggesterCategoryMapping {
+ q.defaultValues = append(q.defaultValues, values...)
+ return q
+}
+
+func (q *SuggesterCategoryMapping) FieldName(fieldName string) *SuggesterCategoryMapping {
+ q.fieldName = fieldName
+ return q
+}
+
+// Source returns a map that will be used to serialize the context query as JSON.
+func (q *SuggesterCategoryMapping) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+
+ x := make(map[string]interface{})
+ source[q.name] = x
+
+ x["type"] = "category"
+
+ switch len(q.defaultValues) {
+ case 0:
+ x["default"] = q.defaultValues
+ case 1:
+ x["default"] = q.defaultValues[0]
+ default:
+ x["default"] = q.defaultValues
+ }
+
+ if q.fieldName != "" {
+ x["path"] = q.fieldName
+ }
+ return source, nil
+}
+
+// -- SuggesterCategoryQuery --
+
+// SuggesterCategoryQuery provides querying a category context in a suggester.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/suggester-context.html#_category_query.
+type SuggesterCategoryQuery struct {
+ name string
+ values map[string]*int
+}
+
+// NewSuggesterCategoryQuery creates a new SuggesterCategoryQuery.
+func NewSuggesterCategoryQuery(name string, values ...string) *SuggesterCategoryQuery {
+ q := &SuggesterCategoryQuery{
+ name: name,
+ values: make(map[string]*int),
+ }
+
+ if len(values) > 0 {
+ q.Values(values...)
+ }
+ return q
+}
+
+func (q *SuggesterCategoryQuery) Value(val string) *SuggesterCategoryQuery {
+ q.values[val] = nil
+ return q
+}
+
+func (q *SuggesterCategoryQuery) ValueWithBoost(val string, boost int) *SuggesterCategoryQuery {
+ q.values[val] = &boost
+ return q
+}
+
+func (q *SuggesterCategoryQuery) Values(values ...string) *SuggesterCategoryQuery {
+ for _, val := range values {
+ q.values[val] = nil
+ }
+ return q
+}
+
+// Source returns a map that will be used to serialize the context query as JSON.
+func (q *SuggesterCategoryQuery) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+
+ switch len(q.values) {
+ case 0:
+ source[q.name] = make([]string, 0)
+ default:
+ contexts := make([]interface{}, 0)
+ for val, boost := range q.values {
+ context := make(map[string]interface{})
+ context["context"] = val
+ if boost != nil {
+ context["boost"] = *boost
+ }
+ contexts = append(contexts, context)
+ }
+ source[q.name] = contexts
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/suggester_context_category_test.go b/vendor/github.com/olivere/elastic/suggester_context_category_test.go
new file mode 100644
index 000000000..46acd725e
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/suggester_context_category_test.go
@@ -0,0 +1,163 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestSuggesterCategoryMapping(t *testing.T) {
+ q := NewSuggesterCategoryMapping("color").DefaultValues("red")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"color":{"default":"red","type":"category"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSuggesterCategoryMappingWithTwoDefaultValues(t *testing.T) {
+ q := NewSuggesterCategoryMapping("color").DefaultValues("red", "orange")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"color":{"default":["red","orange"],"type":"category"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSuggesterCategoryMappingWithFieldName(t *testing.T) {
+ q := NewSuggesterCategoryMapping("color").
+ DefaultValues("red", "orange").
+ FieldName("color_field")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"color":{"default":["red","orange"],"path":"color_field","type":"category"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSuggesterCategoryQuery(t *testing.T) {
+ q := NewSuggesterCategoryQuery("color", "red")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"color":[{"context":"red"}]}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSuggesterCategoryQueryWithTwoValues(t *testing.T) {
+ q := NewSuggesterCategoryQuery("color", "red", "yellow")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expectedOutcomes := []string{
+ `{"color":[{"context":"red"},{"context":"yellow"}]}`,
+ `{"color":[{"context":"yellow"},{"context":"red"}]}`,
+ }
+ var match bool
+ for _, expected := range expectedOutcomes {
+ if got == expected {
+ match = true
+ break
+ }
+ }
+ if !match {
+ t.Errorf("expected any of %v\n,got:\n%s", expectedOutcomes, got)
+ }
+}
+
+func TestSuggesterCategoryQueryWithBoost(t *testing.T) {
+ q := NewSuggesterCategoryQuery("color", "red")
+ q.ValueWithBoost("yellow", 4)
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expectedOutcomes := []string{
+ `{"color":[{"context":"red"},{"boost":4,"context":"yellow"}]}`,
+ `{"color":[{"boost":4,"context":"yellow"},{"context":"red"}]}`,
+ }
+ var match bool
+ for _, expected := range expectedOutcomes {
+ if got == expected {
+ match = true
+ break
+ }
+ }
+ if !match {
+ t.Errorf("expected any of %v\n,got:\n%v", expectedOutcomes, got)
+ }
+}
+
+func TestSuggesterCategoryQueryWithoutBoost(t *testing.T) {
+ q := NewSuggesterCategoryQuery("color", "red")
+ q.Value("yellow")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expectedOutcomes := []string{
+ `{"color":[{"context":"red"},{"context":"yellow"}]}`,
+ `{"color":[{"context":"yellow"},{"context":"red"}]}`,
+ }
+ var match bool
+ for _, expected := range expectedOutcomes {
+ if got == expected {
+ match = true
+ break
+ }
+ }
+ if !match {
+ t.Errorf("expected any of %v\n,got:\n%s", expectedOutcomes, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/suggester_context_geo.go b/vendor/github.com/olivere/elastic/suggester_context_geo.go
new file mode 100644
index 000000000..3fea63feb
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/suggester_context_geo.go
@@ -0,0 +1,130 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// -- SuggesterGeoMapping --
+
+// SuggesterGeoMapping provides a mapping for a geolocation context in a suggester.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/suggester-context.html#_geo_location_mapping.
+type SuggesterGeoMapping struct {
+ name string
+ defaultLocations []*GeoPoint
+ precision []string
+ neighbors *bool
+ fieldName string
+}
+
+// NewSuggesterGeoMapping creates a new SuggesterGeoMapping.
+func NewSuggesterGeoMapping(name string) *SuggesterGeoMapping {
+ return &SuggesterGeoMapping{
+ name: name,
+ }
+}
+
+func (q *SuggesterGeoMapping) DefaultLocations(locations ...*GeoPoint) *SuggesterGeoMapping {
+ q.defaultLocations = append(q.defaultLocations, locations...)
+ return q
+}
+
+func (q *SuggesterGeoMapping) Precision(precision ...string) *SuggesterGeoMapping {
+ q.precision = append(q.precision, precision...)
+ return q
+}
+
+func (q *SuggesterGeoMapping) Neighbors(neighbors bool) *SuggesterGeoMapping {
+ q.neighbors = &neighbors
+ return q
+}
+
+func (q *SuggesterGeoMapping) FieldName(fieldName string) *SuggesterGeoMapping {
+ q.fieldName = fieldName
+ return q
+}
+
+// Source returns a map that will be used to serialize the context query as JSON.
+func (q *SuggesterGeoMapping) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+
+ x := make(map[string]interface{})
+ source[q.name] = x
+
+ x["type"] = "geo"
+
+ if len(q.precision) > 0 {
+ x["precision"] = q.precision
+ }
+ if q.neighbors != nil {
+ x["neighbors"] = *q.neighbors
+ }
+
+ switch len(q.defaultLocations) {
+ case 0:
+ case 1:
+ x["default"] = q.defaultLocations[0].Source()
+ default:
+ var arr []interface{}
+ for _, p := range q.defaultLocations {
+ arr = append(arr, p.Source())
+ }
+ x["default"] = arr
+ }
+
+ if q.fieldName != "" {
+ x["path"] = q.fieldName
+ }
+ return source, nil
+}
+
+// -- SuggesterGeoQuery --
+
+// SuggesterGeoQuery provides querying a geolocation context in a suggester.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/suggester-context.html#_geo_location_query
+type SuggesterGeoQuery struct {
+ name string
+ location *GeoPoint
+ precision []string
+}
+
+// NewSuggesterGeoQuery creates a new SuggesterGeoQuery.
+func NewSuggesterGeoQuery(name string, location *GeoPoint) *SuggesterGeoQuery {
+ return &SuggesterGeoQuery{
+ name: name,
+ location: location,
+ precision: make([]string, 0),
+ }
+}
+
+func (q *SuggesterGeoQuery) Precision(precision ...string) *SuggesterGeoQuery {
+ q.precision = append(q.precision, precision...)
+ return q
+}
+
+// Source returns a map that will be used to serialize the context query as JSON.
+func (q *SuggesterGeoQuery) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+
+ if len(q.precision) == 0 {
+ if q.location != nil {
+ source[q.name] = q.location.Source()
+ }
+ } else {
+ x := make(map[string]interface{})
+ source[q.name] = x
+
+ if q.location != nil {
+ x["value"] = q.location.Source()
+ }
+
+ switch len(q.precision) {
+ case 0:
+ case 1:
+ x["precision"] = q.precision[0]
+ default:
+ x["precision"] = q.precision
+ }
+ }
+
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/suggester_context_geo_test.go b/vendor/github.com/olivere/elastic/suggester_context_geo_test.go
new file mode 100644
index 000000000..b1ab2f495
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/suggester_context_geo_test.go
@@ -0,0 +1,48 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestSuggesterGeoMapping(t *testing.T) {
+ q := NewSuggesterGeoMapping("location").
+ Precision("1km", "5m").
+ Neighbors(true).
+ FieldName("pin").
+ DefaultLocations(GeoPointFromLatLon(0.0, 0.0))
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"location":{"default":{"lat":0,"lon":0},"neighbors":true,"path":"pin","precision":["1km","5m"],"type":"geo"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSuggesterGeoQuery(t *testing.T) {
+ q := NewSuggesterGeoQuery("location", GeoPointFromLatLon(11.5, 62.71)).Precision("1km")
+ src, err := q.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"location":{"precision":"1km","value":{"lat":11.5,"lon":62.71}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/suggester_context_test.go b/vendor/github.com/olivere/elastic/suggester_context_test.go
new file mode 100644
index 000000000..045ccb2f4
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/suggester_context_test.go
@@ -0,0 +1,55 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestContextSuggesterSource(t *testing.T) {
+ s := NewContextSuggester("place_suggestion").
+ Prefix("tim").
+ Field("suggest")
+ src, err := s.Source(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"place_suggestion":{"prefix":"tim","completion":{"field":"suggest"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestContextSuggesterSourceWithMultipleContexts(t *testing.T) {
+ s := NewContextSuggester("place_suggestion").
+ Prefix("tim").
+ Field("suggest").
+ ContextQueries(
+ NewSuggesterCategoryQuery("place_type", "cafe", "restaurants"),
+ )
+ src, err := s.Source(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ // Due to the randomization of dictionary key, we could actually have two different valid expected outcomes
+ expected := `{"place_suggestion":{"prefix":"tim","completion":{"contexts":{"place_type":[{"context":"cafe"},{"context":"restaurants"}]},"field":"suggest"}}}`
+ if got != expected {
+ expected := `{"place_suggestion":{"prefix":"tim","completion":{"contexts":{"place_type":[{"context":"restaurants"},{"context":"cafe"}]},"field":"suggest"}}}`
+ if got != expected {
+ t.Errorf("expected %s\n,got:\n%s", expected, got)
+ }
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/suggester_phrase.go b/vendor/github.com/olivere/elastic/suggester_phrase.go
new file mode 100644
index 000000000..2f6b6a326
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/suggester_phrase.go
@@ -0,0 +1,546 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// PhraseSuggester provides an API to access word alternatives
+// on a per token basis within a certain string distance.
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters-phrase.html.
+type PhraseSuggester struct {
+ Suggester
+ name string
+ text string
+ field string
+ analyzer string
+ size *int
+ shardSize *int
+ contextQueries []SuggesterContextQuery
+
+ // fields specific to a phrase suggester
+ maxErrors *float64
+ separator *string
+ realWordErrorLikelihood *float64
+ confidence *float64
+ generators map[string][]CandidateGenerator
+ gramSize *int
+ smoothingModel SmoothingModel
+ forceUnigrams *bool
+ tokenLimit *int
+ preTag, postTag *string
+ collateQuery *string
+ collatePreference *string
+ collateParams map[string]interface{}
+ collatePrune *bool
+}
+
+// NewPhraseSuggester creates a new PhraseSuggester.
+func NewPhraseSuggester(name string) *PhraseSuggester {
+ return &PhraseSuggester{
+ name: name,
+ collateParams: make(map[string]interface{}),
+ }
+}
+
+func (q *PhraseSuggester) Name() string {
+ return q.name
+}
+
+func (q *PhraseSuggester) Text(text string) *PhraseSuggester {
+ q.text = text
+ return q
+}
+
+func (q *PhraseSuggester) Field(field string) *PhraseSuggester {
+ q.field = field
+ return q
+}
+
+func (q *PhraseSuggester) Analyzer(analyzer string) *PhraseSuggester {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q *PhraseSuggester) Size(size int) *PhraseSuggester {
+ q.size = &size
+ return q
+}
+
+func (q *PhraseSuggester) ShardSize(shardSize int) *PhraseSuggester {
+ q.shardSize = &shardSize
+ return q
+}
+
+func (q *PhraseSuggester) ContextQuery(query SuggesterContextQuery) *PhraseSuggester {
+ q.contextQueries = append(q.contextQueries, query)
+ return q
+}
+
+func (q *PhraseSuggester) ContextQueries(queries ...SuggesterContextQuery) *PhraseSuggester {
+ q.contextQueries = append(q.contextQueries, queries...)
+ return q
+}
+
+func (q *PhraseSuggester) GramSize(gramSize int) *PhraseSuggester {
+ if gramSize >= 1 {
+ q.gramSize = &gramSize
+ }
+ return q
+}
+
+func (q *PhraseSuggester) MaxErrors(maxErrors float64) *PhraseSuggester {
+ q.maxErrors = &maxErrors
+ return q
+}
+
+func (q *PhraseSuggester) Separator(separator string) *PhraseSuggester {
+ q.separator = &separator
+ return q
+}
+
+func (q *PhraseSuggester) RealWordErrorLikelihood(realWordErrorLikelihood float64) *PhraseSuggester {
+ q.realWordErrorLikelihood = &realWordErrorLikelihood
+ return q
+}
+
+func (q *PhraseSuggester) Confidence(confidence float64) *PhraseSuggester {
+ q.confidence = &confidence
+ return q
+}
+
+func (q *PhraseSuggester) CandidateGenerator(generator CandidateGenerator) *PhraseSuggester {
+ if q.generators == nil {
+ q.generators = make(map[string][]CandidateGenerator)
+ }
+ typ := generator.Type()
+ if _, found := q.generators[typ]; !found {
+ q.generators[typ] = make([]CandidateGenerator, 0)
+ }
+ q.generators[typ] = append(q.generators[typ], generator)
+ return q
+}
+
+func (q *PhraseSuggester) CandidateGenerators(generators ...CandidateGenerator) *PhraseSuggester {
+ for _, g := range generators {
+ q = q.CandidateGenerator(g)
+ }
+ return q
+}
+
+func (q *PhraseSuggester) ClearCandidateGenerator() *PhraseSuggester {
+ q.generators = nil
+ return q
+}
+
+func (q *PhraseSuggester) ForceUnigrams(forceUnigrams bool) *PhraseSuggester {
+ q.forceUnigrams = &forceUnigrams
+ return q
+}
+
+func (q *PhraseSuggester) SmoothingModel(smoothingModel SmoothingModel) *PhraseSuggester {
+ q.smoothingModel = smoothingModel
+ return q
+}
+
+func (q *PhraseSuggester) TokenLimit(tokenLimit int) *PhraseSuggester {
+ q.tokenLimit = &tokenLimit
+ return q
+}
+
+func (q *PhraseSuggester) Highlight(preTag, postTag string) *PhraseSuggester {
+ q.preTag = &preTag
+ q.postTag = &postTag
+ return q
+}
+
+func (q *PhraseSuggester) CollateQuery(collateQuery string) *PhraseSuggester {
+ q.collateQuery = &collateQuery
+ return q
+}
+
+func (q *PhraseSuggester) CollatePreference(collatePreference string) *PhraseSuggester {
+ q.collatePreference = &collatePreference
+ return q
+}
+
+func (q *PhraseSuggester) CollateParams(collateParams map[string]interface{}) *PhraseSuggester {
+ q.collateParams = collateParams
+ return q
+}
+
+func (q *PhraseSuggester) CollatePrune(collatePrune bool) *PhraseSuggester {
+ q.collatePrune = &collatePrune
+ return q
+}
+
+// phraseSuggesterRequest is necessary because the order in which
+// the JSON elements are routed to Elasticsearch is relevant.
+// We got into trouble when using plain maps because the text element
+// needs to go before the simple_phrase element.
+type phraseSuggesterRequest struct {
+ Text string `json:"text"`
+ Phrase interface{} `json:"phrase"`
+}
+
+// Source generates the source for the phrase suggester.
+func (q *PhraseSuggester) Source(includeName bool) (interface{}, error) {
+ ps := &phraseSuggesterRequest{}
+
+ if q.text != "" {
+ ps.Text = q.text
+ }
+
+ suggester := make(map[string]interface{})
+ ps.Phrase = suggester
+
+ if q.analyzer != "" {
+ suggester["analyzer"] = q.analyzer
+ }
+ if q.field != "" {
+ suggester["field"] = q.field
+ }
+ if q.size != nil {
+ suggester["size"] = *q.size
+ }
+ if q.shardSize != nil {
+ suggester["shard_size"] = *q.shardSize
+ }
+ switch len(q.contextQueries) {
+ case 0:
+ case 1:
+ src, err := q.contextQueries[0].Source()
+ if err != nil {
+ return nil, err
+ }
+ suggester["contexts"] = src
+ default:
+ var ctxq []interface{}
+ for _, query := range q.contextQueries {
+ src, err := query.Source()
+ if err != nil {
+ return nil, err
+ }
+ ctxq = append(ctxq, src)
+ }
+ suggester["contexts"] = ctxq
+ }
+
+ // Phase-specified parameters
+ if q.realWordErrorLikelihood != nil {
+ suggester["real_word_error_likelihood"] = *q.realWordErrorLikelihood
+ }
+ if q.confidence != nil {
+ suggester["confidence"] = *q.confidence
+ }
+ if q.separator != nil {
+ suggester["separator"] = *q.separator
+ }
+ if q.maxErrors != nil {
+ suggester["max_errors"] = *q.maxErrors
+ }
+ if q.gramSize != nil {
+ suggester["gram_size"] = *q.gramSize
+ }
+ if q.forceUnigrams != nil {
+ suggester["force_unigrams"] = *q.forceUnigrams
+ }
+ if q.tokenLimit != nil {
+ suggester["token_limit"] = *q.tokenLimit
+ }
+ if q.generators != nil && len(q.generators) > 0 {
+ for typ, generators := range q.generators {
+ var arr []interface{}
+ for _, g := range generators {
+ src, err := g.Source()
+ if err != nil {
+ return nil, err
+ }
+ arr = append(arr, src)
+ }
+ suggester[typ] = arr
+ }
+ }
+ if q.smoothingModel != nil {
+ src, err := q.smoothingModel.Source()
+ if err != nil {
+ return nil, err
+ }
+ x := make(map[string]interface{})
+ x[q.smoothingModel.Type()] = src
+ suggester["smoothing"] = x
+ }
+ if q.preTag != nil {
+ hl := make(map[string]string)
+ hl["pre_tag"] = *q.preTag
+ if q.postTag != nil {
+ hl["post_tag"] = *q.postTag
+ }
+ suggester["highlight"] = hl
+ }
+ if q.collateQuery != nil {
+ collate := make(map[string]interface{})
+ suggester["collate"] = collate
+ if q.collateQuery != nil {
+ collate["query"] = *q.collateQuery
+ }
+ if q.collatePreference != nil {
+ collate["preference"] = *q.collatePreference
+ }
+ if len(q.collateParams) > 0 {
+ collate["params"] = q.collateParams
+ }
+ if q.collatePrune != nil {
+ collate["prune"] = *q.collatePrune
+ }
+ }
+
+ if !includeName {
+ return ps, nil
+ }
+
+ source := make(map[string]interface{})
+ source[q.name] = ps
+ return source, nil
+}
+
+// -- Smoothing models --
+
+type SmoothingModel interface {
+ Type() string
+ Source() (interface{}, error)
+}
+
+// StupidBackoffSmoothingModel implements a stupid backoff smoothing model.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters-phrase.html#_smoothing_models
+// for details about smoothing models.
+type StupidBackoffSmoothingModel struct {
+ discount float64
+}
+
+func NewStupidBackoffSmoothingModel(discount float64) *StupidBackoffSmoothingModel {
+ return &StupidBackoffSmoothingModel{
+ discount: discount,
+ }
+}
+
+func (sm *StupidBackoffSmoothingModel) Type() string {
+ return "stupid_backoff"
+}
+
+func (sm *StupidBackoffSmoothingModel) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ source["discount"] = sm.discount
+ return source, nil
+}
+
+// --
+
+// LaplaceSmoothingModel implements a laplace smoothing model.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters-phrase.html#_smoothing_models
+// for details about smoothing models.
+type LaplaceSmoothingModel struct {
+ alpha float64
+}
+
+func NewLaplaceSmoothingModel(alpha float64) *LaplaceSmoothingModel {
+ return &LaplaceSmoothingModel{
+ alpha: alpha,
+ }
+}
+
+func (sm *LaplaceSmoothingModel) Type() string {
+ return "laplace"
+}
+
+func (sm *LaplaceSmoothingModel) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ source["alpha"] = sm.alpha
+ return source, nil
+}
+
+// --
+
+// LinearInterpolationSmoothingModel implements a linear interpolation
+// smoothing model.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters-phrase.html#_smoothing_models
+// for details about smoothing models.
+type LinearInterpolationSmoothingModel struct {
+ trigramLamda float64
+ bigramLambda float64
+ unigramLambda float64
+}
+
+func NewLinearInterpolationSmoothingModel(trigramLamda, bigramLambda, unigramLambda float64) *LinearInterpolationSmoothingModel {
+ return &LinearInterpolationSmoothingModel{
+ trigramLamda: trigramLamda,
+ bigramLambda: bigramLambda,
+ unigramLambda: unigramLambda,
+ }
+}
+
+func (sm *LinearInterpolationSmoothingModel) Type() string {
+ return "linear_interpolation"
+}
+
+func (sm *LinearInterpolationSmoothingModel) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ source["trigram_lambda"] = sm.trigramLamda
+ source["bigram_lambda"] = sm.bigramLambda
+ source["unigram_lambda"] = sm.unigramLambda
+ return source, nil
+}
+
+// -- CandidateGenerator --
+
+type CandidateGenerator interface {
+ Type() string
+ Source() (interface{}, error)
+}
+
+// DirectCandidateGenerator implements a direct candidate generator.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters-phrase.html#_smoothing_models
+// for details about smoothing models.
+type DirectCandidateGenerator struct {
+ field string
+ preFilter *string
+ postFilter *string
+ suggestMode *string
+ accuracy *float64
+ size *int
+ sort *string
+ stringDistance *string
+ maxEdits *int
+ maxInspections *int
+ maxTermFreq *float64
+ prefixLength *int
+ minWordLength *int
+ minDocFreq *float64
+}
+
+func NewDirectCandidateGenerator(field string) *DirectCandidateGenerator {
+ return &DirectCandidateGenerator{
+ field: field,
+ }
+}
+
+func (g *DirectCandidateGenerator) Type() string {
+ return "direct_generator"
+}
+
+func (g *DirectCandidateGenerator) Field(field string) *DirectCandidateGenerator {
+ g.field = field
+ return g
+}
+
+func (g *DirectCandidateGenerator) PreFilter(preFilter string) *DirectCandidateGenerator {
+ g.preFilter = &preFilter
+ return g
+}
+
+func (g *DirectCandidateGenerator) PostFilter(postFilter string) *DirectCandidateGenerator {
+ g.postFilter = &postFilter
+ return g
+}
+
+func (g *DirectCandidateGenerator) SuggestMode(suggestMode string) *DirectCandidateGenerator {
+ g.suggestMode = &suggestMode
+ return g
+}
+
+func (g *DirectCandidateGenerator) Accuracy(accuracy float64) *DirectCandidateGenerator {
+ g.accuracy = &accuracy
+ return g
+}
+
+func (g *DirectCandidateGenerator) Size(size int) *DirectCandidateGenerator {
+ g.size = &size
+ return g
+}
+
+func (g *DirectCandidateGenerator) Sort(sort string) *DirectCandidateGenerator {
+ g.sort = &sort
+ return g
+}
+
+func (g *DirectCandidateGenerator) StringDistance(stringDistance string) *DirectCandidateGenerator {
+ g.stringDistance = &stringDistance
+ return g
+}
+
+func (g *DirectCandidateGenerator) MaxEdits(maxEdits int) *DirectCandidateGenerator {
+ g.maxEdits = &maxEdits
+ return g
+}
+
+func (g *DirectCandidateGenerator) MaxInspections(maxInspections int) *DirectCandidateGenerator {
+ g.maxInspections = &maxInspections
+ return g
+}
+
+func (g *DirectCandidateGenerator) MaxTermFreq(maxTermFreq float64) *DirectCandidateGenerator {
+ g.maxTermFreq = &maxTermFreq
+ return g
+}
+
+func (g *DirectCandidateGenerator) PrefixLength(prefixLength int) *DirectCandidateGenerator {
+ g.prefixLength = &prefixLength
+ return g
+}
+
+func (g *DirectCandidateGenerator) MinWordLength(minWordLength int) *DirectCandidateGenerator {
+ g.minWordLength = &minWordLength
+ return g
+}
+
+func (g *DirectCandidateGenerator) MinDocFreq(minDocFreq float64) *DirectCandidateGenerator {
+ g.minDocFreq = &minDocFreq
+ return g
+}
+
+func (g *DirectCandidateGenerator) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ if g.field != "" {
+ source["field"] = g.field
+ }
+ if g.suggestMode != nil {
+ source["suggest_mode"] = *g.suggestMode
+ }
+ if g.accuracy != nil {
+ source["accuracy"] = *g.accuracy
+ }
+ if g.size != nil {
+ source["size"] = *g.size
+ }
+ if g.sort != nil {
+ source["sort"] = *g.sort
+ }
+ if g.stringDistance != nil {
+ source["string_distance"] = *g.stringDistance
+ }
+ if g.maxEdits != nil {
+ source["max_edits"] = *g.maxEdits
+ }
+ if g.maxInspections != nil {
+ source["max_inspections"] = *g.maxInspections
+ }
+ if g.maxTermFreq != nil {
+ source["max_term_freq"] = *g.maxTermFreq
+ }
+ if g.prefixLength != nil {
+ source["prefix_length"] = *g.prefixLength
+ }
+ if g.minWordLength != nil {
+ source["min_word_length"] = *g.minWordLength
+ }
+ if g.minDocFreq != nil {
+ source["min_doc_freq"] = *g.minDocFreq
+ }
+ if g.preFilter != nil {
+ source["pre_filter"] = *g.preFilter
+ }
+ if g.postFilter != nil {
+ source["post_filter"] = *g.postFilter
+ }
+ return source, nil
+}
diff --git a/vendor/github.com/olivere/elastic/suggester_phrase_test.go b/vendor/github.com/olivere/elastic/suggester_phrase_test.go
new file mode 100644
index 000000000..63dde686e
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/suggester_phrase_test.go
@@ -0,0 +1,169 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestPhraseSuggesterSource(t *testing.T) {
+ s := NewPhraseSuggester("name").
+ Text("Xor the Got-Jewel").
+ Analyzer("body").
+ Field("bigram").
+ Size(1).
+ RealWordErrorLikelihood(0.95).
+ MaxErrors(0.5).
+ GramSize(2).
+ Highlight("<em>", "</em>")
+ src, err := s.Source(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"name":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","field":"bigram","gram_size":2,"highlight":{"post_tag":"\u003c/em\u003e","pre_tag":"\u003cem\u003e"},"max_errors":0.5,"real_word_error_likelihood":0.95,"size":1}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestPhraseSuggesterSourceWithContextQuery(t *testing.T) {
+ geomapQ := NewSuggesterGeoMapping("location").
+ Precision("1km", "5m").
+ Neighbors(true).
+ FieldName("pin").
+ DefaultLocations(GeoPointFromLatLon(0.0, 0.0))
+
+ s := NewPhraseSuggester("name").
+ Text("Xor the Got-Jewel").
+ Analyzer("body").
+ Field("bigram").
+ Size(1).
+ RealWordErrorLikelihood(0.95).
+ MaxErrors(0.5).
+ GramSize(2).
+ Highlight("<em>", "</em>").
+ ContextQuery(geomapQ)
+ src, err := s.Source(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"name":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","contexts":{"location":{"default":{"lat":0,"lon":0},"neighbors":true,"path":"pin","precision":["1km","5m"],"type":"geo"}},"field":"bigram","gram_size":2,"highlight":{"post_tag":"\u003c/em\u003e","pre_tag":"\u003cem\u003e"},"max_errors":0.5,"real_word_error_likelihood":0.95,"size":1}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestPhraseSuggesterComplexSource(t *testing.T) {
+ g1 := NewDirectCandidateGenerator("body").
+ SuggestMode("always").
+ MinWordLength(1)
+
+ g2 := NewDirectCandidateGenerator("reverse").
+ SuggestMode("always").
+ MinWordLength(1).
+ PreFilter("reverse").
+ PostFilter("reverse")
+
+ s := NewPhraseSuggester("simple_phrase").
+ Text("Xor the Got-Jewel").
+ Analyzer("body").
+ Field("bigram").
+ Size(4).
+ RealWordErrorLikelihood(0.95).
+ Confidence(2.0).
+ GramSize(2).
+ CandidateGenerators(g1, g2).
+ CollateQuery(`"match":{"{{field_name}}" : "{{suggestion}}"}`).
+ CollateParams(map[string]interface{}{"field_name": "title"}).
+ CollatePreference("_primary").
+ CollatePrune(true)
+ src, err := s.Source(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"simple_phrase":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","collate":{"params":{"field_name":"title"},"preference":"_primary","prune":true,"query":"\"match\":{\"{{field_name}}\" : \"{{suggestion}}\"}"},"confidence":2,"direct_generator":[{"field":"body","min_word_length":1,"suggest_mode":"always"},{"field":"reverse","min_word_length":1,"post_filter":"reverse","pre_filter":"reverse","suggest_mode":"always"}],"field":"bigram","gram_size":2,"real_word_error_likelihood":0.95,"size":4}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestPhraseStupidBackoffSmoothingModel(t *testing.T) {
+ s := NewStupidBackoffSmoothingModel(0.42)
+ src, err := s.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ // The source does NOT include the smoothing model type!
+ expected := `{"discount":0.42}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+ if s.Type() != "stupid_backoff" {
+ t.Errorf("expected %q, got: %q", "stupid_backoff", s.Type())
+ }
+}
+
+func TestPhraseLaplaceSmoothingModel(t *testing.T) {
+ s := NewLaplaceSmoothingModel(0.63)
+ src, err := s.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ // The source does NOT include the smoothing model type!
+ expected := `{"alpha":0.63}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+ if s.Type() != "laplace" {
+ t.Errorf("expected %q, got: %q", "laplace", s.Type())
+ }
+}
+
+func TestLinearInterpolationSmoothingModel(t *testing.T) {
+ s := NewLinearInterpolationSmoothingModel(0.3, 0.2, 0.05)
+ src, err := s.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ // The source does NOT include the smoothing model type!
+ expected := `{"bigram_lambda":0.2,"trigram_lambda":0.3,"unigram_lambda":0.05}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+ if s.Type() != "linear_interpolation" {
+ t.Errorf("expected %q, got: %q", "linear_interpolation", s.Type())
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/suggester_term.go b/vendor/github.com/olivere/elastic/suggester_term.go
new file mode 100644
index 000000000..69e1531f6
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/suggester_term.go
@@ -0,0 +1,233 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// TermSuggester suggests terms based on edit distance.
+// For more details, see
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters-term.html.
+type TermSuggester struct {
+ Suggester
+ name string
+ text string
+ field string
+ analyzer string
+ size *int
+ shardSize *int
+ contextQueries []SuggesterContextQuery
+
+ // fields specific to term suggester
+ suggestMode string
+ accuracy *float64
+ sort string
+ stringDistance string
+ maxEdits *int
+ maxInspections *int
+ maxTermFreq *float64
+ prefixLength *int
+ minWordLength *int
+ minDocFreq *float64
+}
+
+// NewTermSuggester creates a new TermSuggester.
+func NewTermSuggester(name string) *TermSuggester {
+ return &TermSuggester{
+ name: name,
+ }
+}
+
+func (q *TermSuggester) Name() string {
+ return q.name
+}
+
+func (q *TermSuggester) Text(text string) *TermSuggester {
+ q.text = text
+ return q
+}
+
+func (q *TermSuggester) Field(field string) *TermSuggester {
+ q.field = field
+ return q
+}
+
+func (q *TermSuggester) Analyzer(analyzer string) *TermSuggester {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q *TermSuggester) Size(size int) *TermSuggester {
+ q.size = &size
+ return q
+}
+
+func (q *TermSuggester) ShardSize(shardSize int) *TermSuggester {
+ q.shardSize = &shardSize
+ return q
+}
+
+func (q *TermSuggester) ContextQuery(query SuggesterContextQuery) *TermSuggester {
+ q.contextQueries = append(q.contextQueries, query)
+ return q
+}
+
+func (q *TermSuggester) ContextQueries(queries ...SuggesterContextQuery) *TermSuggester {
+ q.contextQueries = append(q.contextQueries, queries...)
+ return q
+}
+
+func (q *TermSuggester) SuggestMode(suggestMode string) *TermSuggester {
+ q.suggestMode = suggestMode
+ return q
+}
+
+func (q *TermSuggester) Accuracy(accuracy float64) *TermSuggester {
+ q.accuracy = &accuracy
+ return q
+}
+
+func (q *TermSuggester) Sort(sort string) *TermSuggester {
+ q.sort = sort
+ return q
+}
+
+func (q *TermSuggester) StringDistance(stringDistance string) *TermSuggester {
+ q.stringDistance = stringDistance
+ return q
+}
+
+func (q *TermSuggester) MaxEdits(maxEdits int) *TermSuggester {
+ q.maxEdits = &maxEdits
+ return q
+}
+
+func (q *TermSuggester) MaxInspections(maxInspections int) *TermSuggester {
+ q.maxInspections = &maxInspections
+ return q
+}
+
+func (q *TermSuggester) MaxTermFreq(maxTermFreq float64) *TermSuggester {
+ q.maxTermFreq = &maxTermFreq
+ return q
+}
+
+func (q *TermSuggester) PrefixLength(prefixLength int) *TermSuggester {
+ q.prefixLength = &prefixLength
+ return q
+}
+
+func (q *TermSuggester) MinWordLength(minWordLength int) *TermSuggester {
+ q.minWordLength = &minWordLength
+ return q
+}
+
+func (q *TermSuggester) MinDocFreq(minDocFreq float64) *TermSuggester {
+ q.minDocFreq = &minDocFreq
+ return q
+}
+
+// termSuggesterRequest is necessary because the order in which
+// the JSON elements are routed to Elasticsearch is relevant.
+// We got into trouble when using plain maps because the text element
+// needs to go before the term element.
+type termSuggesterRequest struct {
+ Text string `json:"text"`
+ Term interface{} `json:"term"`
+}
+
+// Source generates the source for the term suggester.
+func (q *TermSuggester) Source(includeName bool) (interface{}, error) {
+ // "suggest" : {
+ // "my-suggest-1" : {
+ // "text" : "the amsterdma meetpu",
+ // "term" : {
+ // "field" : "body"
+ // }
+ // },
+ // "my-suggest-2" : {
+ // "text" : "the rottredam meetpu",
+ // "term" : {
+ // "field" : "title",
+ // }
+ // }
+ // }
+ ts := &termSuggesterRequest{}
+ if q.text != "" {
+ ts.Text = q.text
+ }
+
+ suggester := make(map[string]interface{})
+ ts.Term = suggester
+
+ if q.analyzer != "" {
+ suggester["analyzer"] = q.analyzer
+ }
+ if q.field != "" {
+ suggester["field"] = q.field
+ }
+ if q.size != nil {
+ suggester["size"] = *q.size
+ }
+ if q.shardSize != nil {
+ suggester["shard_size"] = *q.shardSize
+ }
+ switch len(q.contextQueries) {
+ case 0:
+ case 1:
+ src, err := q.contextQueries[0].Source()
+ if err != nil {
+ return nil, err
+ }
+ suggester["contexts"] = src
+ default:
+ ctxq := make([]interface{}, len(q.contextQueries))
+ for i, query := range q.contextQueries {
+ src, err := query.Source()
+ if err != nil {
+ return nil, err
+ }
+ ctxq[i] = src
+ }
+ suggester["contexts"] = ctxq
+ }
+
+ // Specific to term suggester
+ if q.suggestMode != "" {
+ suggester["suggest_mode"] = q.suggestMode
+ }
+ if q.accuracy != nil {
+ suggester["accuracy"] = *q.accuracy
+ }
+ if q.sort != "" {
+ suggester["sort"] = q.sort
+ }
+ if q.stringDistance != "" {
+ suggester["string_distance"] = q.stringDistance
+ }
+ if q.maxEdits != nil {
+ suggester["max_edits"] = *q.maxEdits
+ }
+ if q.maxInspections != nil {
+ suggester["max_inspections"] = *q.maxInspections
+ }
+ if q.maxTermFreq != nil {
+ suggester["max_term_freq"] = *q.maxTermFreq
+ }
+ if q.prefixLength != nil {
+ suggester["prefix_length"] = *q.prefixLength
+ }
+ if q.minWordLength != nil {
+ suggester["min_word_len"] = *q.minWordLength
+ }
+ if q.minDocFreq != nil {
+ suggester["min_doc_freq"] = *q.minDocFreq
+ }
+
+ if !includeName {
+ return ts, nil
+ }
+
+ source := make(map[string]interface{})
+ source[q.name] = ts
+ return source, nil
+}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_completion_fuzzy_test.go b/vendor/github.com/olivere/elastic/suggester_term_test.go
index aae1db11b..d3250f69a 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/suggester_completion_fuzzy_test.go
+++ b/vendor/github.com/olivere/elastic/suggester_term_test.go
@@ -9,11 +9,10 @@ import (
"testing"
)
-func TestFuzzyCompletionSuggesterSource(t *testing.T) {
- s := NewFuzzyCompletionSuggester("song-suggest").
+func TestTermSuggesterSource(t *testing.T) {
+ s := NewTermSuggester("name").
Text("n").
- Field("suggest").
- Fuzziness(2)
+ Field("suggest")
src, err := s.Source(true)
if err != nil {
t.Fatal(err)
@@ -23,17 +22,17 @@ func TestFuzzyCompletionSuggesterSource(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest","fuzzy":{"fuzziness":2}}}}`
+ expected := `{"name":{"text":"n","term":{"field":"suggest"}}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
}
-func TestFuzzyCompletionSuggesterWithStringFuzzinessSource(t *testing.T) {
- s := NewFuzzyCompletionSuggester("song-suggest").
+func TestTermSuggesterWithPrefixLengthSource(t *testing.T) {
+ s := NewTermSuggester("name").
Text("n").
Field("suggest").
- Fuzziness("1..4")
+ PrefixLength(0)
src, err := s.Source(true)
if err != nil {
t.Fatal(err)
@@ -43,7 +42,7 @@ func TestFuzzyCompletionSuggesterWithStringFuzzinessSource(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"song-suggest":{"text":"n","completion":{"field":"suggest","fuzzy":{"fuzziness":"1..4"}}}}`
+ expected := `{"name":{"text":"n","term":{"field":"suggest","prefix_length":0}}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
diff --git a/vendor/github.com/olivere/elastic/tasks_cancel.go b/vendor/github.com/olivere/elastic/tasks_cancel.go
new file mode 100644
index 000000000..84f8aec35
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/tasks_cancel.go
@@ -0,0 +1,149 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// TasksCancelService can cancel long-running tasks.
+// It is supported as of Elasticsearch 2.3.0.
+//
+// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/tasks-cancel.html
+// for details.
+type TasksCancelService struct {
+ client *Client
+ pretty bool
+ taskId *int64
+ actions []string
+ nodeId []string
+ parentNode string
+ parentTask *int64
+}
+
+// NewTasksCancelService creates a new TasksCancelService.
+func NewTasksCancelService(client *Client) *TasksCancelService {
+ return &TasksCancelService{
+ client: client,
+ actions: make([]string, 0),
+ nodeId: make([]string, 0),
+ }
+}
+
+// TaskId specifies the task to cancel. Set to -1 to cancel all tasks.
+func (s *TasksCancelService) TaskId(taskId int64) *TasksCancelService {
+ s.taskId = &taskId
+ return s
+}
+
+// Actions is a list of actions that should be cancelled. Leave empty to cancel all.
+func (s *TasksCancelService) Actions(actions []string) *TasksCancelService {
+ s.actions = actions
+ return s
+}
+
+// NodeId is a list of node IDs or names to limit the returned information;
+// use `_local` to return information from the node you're connecting to,
+// leave empty to get information from all nodes.
+func (s *TasksCancelService) NodeId(nodeId []string) *TasksCancelService {
+ s.nodeId = nodeId
+ return s
+}
+
+// ParentNode specifies to cancel tasks with specified parent node.
+func (s *TasksCancelService) ParentNode(parentNode string) *TasksCancelService {
+ s.parentNode = parentNode
+ return s
+}
+
+// ParentTask specifies to cancel tasks with specified parent task id.
+// Set to -1 to cancel all.
+func (s *TasksCancelService) ParentTask(parentTask int64) *TasksCancelService {
+ s.parentTask = &parentTask
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *TasksCancelService) Pretty(pretty bool) *TasksCancelService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *TasksCancelService) buildURL() (string, url.Values, error) {
+ // Build URL
+ var err error
+ var path string
+ if s.taskId != nil {
+ path, err = uritemplates.Expand("/_tasks/{task_id}/_cancel", map[string]string{
+ "task_id": fmt.Sprintf("%d", *s.taskId),
+ })
+ } else {
+ path = "/_tasks/_cancel"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if len(s.actions) > 0 {
+ params.Set("actions", strings.Join(s.actions, ","))
+ }
+ if len(s.nodeId) > 0 {
+ params.Set("node_id", strings.Join(s.nodeId, ","))
+ }
+ if s.parentNode != "" {
+ params.Set("parent_node", s.parentNode)
+ }
+ if s.parentTask != nil {
+ params.Set("parent_task", fmt.Sprintf("%v", *s.parentTask))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *TasksCancelService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *TasksCancelService) Do(ctx context.Context) (*TasksListResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(TasksListResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
diff --git a/vendor/github.com/olivere/elastic/tasks_cancel_test.go b/vendor/github.com/olivere/elastic/tasks_cancel_test.go
new file mode 100644
index 000000000..c9d863394
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/tasks_cancel_test.go
@@ -0,0 +1,51 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "testing"
+
+func TestTasksCancelBuildURL(t *testing.T) {
+ client := setupTestClient(t)
+
+ // Cancel all
+ got, _, err := client.TasksCancel().buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := "/_tasks/_cancel"
+ if got != want {
+ t.Errorf("want %q; got %q", want, got)
+ }
+
+ // Cancel specific task
+ got, _, err = client.TasksCancel().TaskId(42).buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ want = "/_tasks/42/_cancel"
+ if got != want {
+ t.Errorf("want %q; got %q", want, got)
+ }
+}
+
+/*
+func TestTasksCancel(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+ esversion, err := client.ElasticsearchVersion(DefaultURL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if esversion < "2.3.0" {
+ t.Skipf("Elasticsearch %v does not support Tasks Management API yet", esversion)
+ }
+ res, err := client.TasksCancel("1").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("response is nil")
+ }
+}
+*/
diff --git a/vendor/github.com/olivere/elastic/tasks_get_task.go b/vendor/github.com/olivere/elastic/tasks_get_task.go
new file mode 100644
index 000000000..5f63726e4
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/tasks_get_task.go
@@ -0,0 +1,108 @@
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// TasksGetTaskService retrieves the state of a task in the cluster. It is part of the Task Management API
+// documented at http://www.elastic.co/guide/en/elasticsearch/reference/5.2/tasks-list.html.
+//
+// It is supported as of Elasticsearch 2.3.0.
+type TasksGetTaskService struct {
+ client *Client
+ pretty bool
+ taskId string
+ waitForCompletion *bool
+}
+
+// NewTasksGetTaskService creates a new TasksGetTaskService.
+func NewTasksGetTaskService(client *Client) *TasksGetTaskService {
+ return &TasksGetTaskService{
+ client: client,
+ }
+}
+
+// TaskId indicates to return the task with specified id.
+func (s *TasksGetTaskService) TaskId(taskId string) *TasksGetTaskService {
+ s.taskId = taskId
+ return s
+}
+
+// WaitForCompletion indicates whether to wait for the matching tasks
+// to complete (default: false).
+func (s *TasksGetTaskService) WaitForCompletion(waitForCompletion bool) *TasksGetTaskService {
+ s.waitForCompletion = &waitForCompletion
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *TasksGetTaskService) Pretty(pretty bool) *TasksGetTaskService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *TasksGetTaskService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_tasks/{task_id}", map[string]string{
+ "task_id": s.taskId,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.waitForCompletion != nil {
+ params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *TasksGetTaskService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *TasksGetTaskService) Do(ctx context.Context) (*TasksGetTaskResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(TasksGetTaskResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+type TasksGetTaskResponse struct {
+ Completed bool `json:"completed"`
+ Task *TaskInfo `json:"task,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/tasks_get_task_test.go b/vendor/github.com/olivere/elastic/tasks_get_task_test.go
new file mode 100644
index 000000000..a4da49c74
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/tasks_get_task_test.go
@@ -0,0 +1,43 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "testing"
+)
+
+func TestTasksGetTaskBuildURL(t *testing.T) {
+ client := setupTestClient(t)
+
+ // Get specific task
+ got, _, err := client.TasksGetTask().TaskId("123").buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := "/_tasks/123"
+ if got != want {
+ t.Errorf("want %q; got %q", want, got)
+ }
+}
+
+/*
+func TestTasksGetTask(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+ esversion, err := client.ElasticsearchVersion(DefaultURL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if esversion < "2.3.0" {
+ t.Skipf("Elasticsearch %v does not support Tasks Management API yet", esversion)
+ }
+ res, err := client.TasksGetTask().TaskId("123").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("response is nil")
+ }
+}
+*/
diff --git a/vendor/github.com/olivere/elastic/tasks_list.go b/vendor/github.com/olivere/elastic/tasks_list.go
new file mode 100644
index 000000000..54299d961
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/tasks_list.go
@@ -0,0 +1,231 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// TasksListService retrieves the list of currently executing tasks
+// on one ore more nodes in the cluster. It is part of the Task Management API
+// documented at https://www.elastic.co/guide/en/elasticsearch/reference/6.0/tasks.html.
+//
+// It is supported as of Elasticsearch 2.3.0.
+type TasksListService struct {
+ client *Client
+ pretty bool
+ taskId []string
+ actions []string
+ detailed *bool
+ nodeId []string
+ parentNode string
+ parentTaskId *string
+ waitForCompletion *bool
+ groupBy string
+}
+
+// NewTasksListService creates a new TasksListService.
+func NewTasksListService(client *Client) *TasksListService {
+ return &TasksListService{
+ client: client,
+ }
+}
+
+// TaskId indicates to returns the task(s) with specified id(s).
+func (s *TasksListService) TaskId(taskId ...string) *TasksListService {
+ s.taskId = append(s.taskId, taskId...)
+ return s
+}
+
+// Actions is a list of actions that should be returned. Leave empty to return all.
+func (s *TasksListService) Actions(actions ...string) *TasksListService {
+ s.actions = append(s.actions, actions...)
+ return s
+}
+
+// Detailed indicates whether to return detailed task information (default: false).
+func (s *TasksListService) Detailed(detailed bool) *TasksListService {
+ s.detailed = &detailed
+ return s
+}
+
+// NodeId is a list of node IDs or names to limit the returned information;
+// use `_local` to return information from the node you're connecting to,
+// leave empty to get information from all nodes.
+func (s *TasksListService) NodeId(nodeId ...string) *TasksListService {
+ s.nodeId = append(s.nodeId, nodeId...)
+ return s
+}
+
+// ParentNode returns tasks with specified parent node.
+func (s *TasksListService) ParentNode(parentNode string) *TasksListService {
+ s.parentNode = parentNode
+ return s
+}
+
+// ParentTaskId returns tasks with specified parent task id (node_id:task_number). Set to -1 to return all.
+func (s *TasksListService) ParentTaskId(parentTaskId string) *TasksListService {
+ s.parentTaskId = &parentTaskId
+ return s
+}
+
+// WaitForCompletion indicates whether to wait for the matching tasks
+// to complete (default: false).
+func (s *TasksListService) WaitForCompletion(waitForCompletion bool) *TasksListService {
+ s.waitForCompletion = &waitForCompletion
+ return s
+}
+
+// GroupBy groups tasks by nodes or parent/child relationships.
+// As of now, it can either be "nodes" (default) or "parents".
+func (s *TasksListService) GroupBy(groupBy string) *TasksListService {
+ s.groupBy = groupBy
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *TasksListService) Pretty(pretty bool) *TasksListService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *TasksListService) buildURL() (string, url.Values, error) {
+ // Build URL
+ var err error
+ var path string
+ if len(s.taskId) > 0 {
+ path, err = uritemplates.Expand("/_tasks/{task_id}", map[string]string{
+ "task_id": strings.Join(s.taskId, ","),
+ })
+ } else {
+ path = "/_tasks"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if len(s.actions) > 0 {
+ params.Set("actions", strings.Join(s.actions, ","))
+ }
+ if s.detailed != nil {
+ params.Set("detailed", fmt.Sprintf("%v", *s.detailed))
+ }
+ if len(s.nodeId) > 0 {
+ params.Set("node_id", strings.Join(s.nodeId, ","))
+ }
+ if s.parentNode != "" {
+ params.Set("parent_node", s.parentNode)
+ }
+ if s.parentTaskId != nil {
+ params.Set("parent_task_id", *s.parentTaskId)
+ }
+ if s.waitForCompletion != nil {
+ params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion))
+ }
+ if s.groupBy != "" {
+ params.Set("group_by", s.groupBy)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *TasksListService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *TasksListService) Do(ctx context.Context) (*TasksListResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(TasksListResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// TasksListResponse is the response of TasksListService.Do.
+type TasksListResponse struct {
+ TaskFailures []*TaskOperationFailure `json:"task_failures"`
+ NodeFailures []*FailedNodeException `json:"node_failures"`
+ // Nodes returns the tasks per node. The key is the node id.
+ Nodes map[string]*DiscoveryNode `json:"nodes"`
+}
+
+type TaskOperationFailure struct {
+ TaskId int64 `json:"task_id"` // this is a long in the Java source
+ NodeId string `json:"node_id"`
+ Status string `json:"status"`
+ Reason *ErrorDetails `json:"reason"`
+}
+
+type FailedNodeException struct {
+ *ErrorDetails
+ NodeId string `json:"node_id"`
+}
+
+type DiscoveryNode struct {
+ Name string `json:"name"`
+ TransportAddress string `json:"transport_address"`
+ Host string `json:"host"`
+ IP string `json:"ip"`
+ Roles []string `json:"roles"` // "master", "data", or "ingest"
+ Attributes map[string]interface{} `json:"attributes"`
+ // Tasks returns the tasks by its id (as a string).
+ Tasks map[string]*TaskInfo `json:"tasks"`
+}
+
+// TaskInfo represents information about a currently running task.
+type TaskInfo struct {
+ Node string `json:"node"`
+ Id int64 `json:"id"` // the task id (yes, this is a long in the Java source)
+ Type string `json:"type"`
+ Action string `json:"action"`
+ Status interface{} `json:"status"` // has separate implementations of Task.Status in Java for reindexing, replication, and "RawTaskStatus"
+ Description interface{} `json:"description"` // same as Status
+ StartTime string `json:"start_time"`
+ StartTimeInMillis int64 `json:"start_time_in_millis"`
+ RunningTime string `json:"running_time"`
+ RunningTimeInNanos int64 `json:"running_time_in_nanos"`
+ Cancellable bool `json:"cancellable"`
+ ParentTaskId string `json:"parent_task_id"` // like "YxJnVYjwSBm_AUbzddTajQ:12356"
+}
+
+// StartTaskResult is used in cases where a task gets started asynchronously and
+// the operation simply returnes a TaskID to watch for via the Task Management API.
+type StartTaskResult struct {
+ TaskId string `json:"task"`
+}
diff --git a/vendor/github.com/olivere/elastic/tasks_list_test.go b/vendor/github.com/olivere/elastic/tasks_list_test.go
new file mode 100644
index 000000000..9ecabcd68
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/tasks_list_test.go
@@ -0,0 +1,65 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestTasksListBuildURL(t *testing.T) {
+ client := setupTestClient(t)
+
+ tests := []struct {
+ TaskId []string
+ Expected string
+ }{
+ {
+ []string{},
+ "/_tasks",
+ },
+ {
+ []string{"42"},
+ "/_tasks/42",
+ },
+ {
+ []string{"42", "37"},
+ "/_tasks/42%2C37",
+ },
+ }
+
+ for i, test := range tests {
+ path, _, err := client.TasksList().TaskId(test.TaskId...).buildURL()
+ if err != nil {
+ t.Errorf("case #%d: %v", i+1, err)
+ continue
+ }
+ if path != test.Expected {
+ t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
+ }
+ }
+}
+
+func TestTasksList(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
+ esversion, err := client.ElasticsearchVersion(DefaultURL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if esversion < "2.3.0" {
+ t.Skipf("Elasticsearch %v does not support Tasks Management API yet", esversion)
+ }
+
+ res, err := client.TasksList().Pretty(true).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("response is nil")
+ }
+ if len(res.Nodes) == 0 {
+ t.Fatalf("expected at least 1 node; got: %d", len(res.Nodes))
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/termvectors.go b/vendor/github.com/olivere/elastic/termvectors.go
new file mode 100644
index 000000000..5943ad14f
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/termvectors.go
@@ -0,0 +1,464 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// TermvectorsService returns information and statistics on terms in the
+// fields of a particular document. The document could be stored in the
+// index or artificially provided by the user.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-termvectors.html
+// for documentation.
+type TermvectorsService struct {
+ client *Client
+ pretty bool
+ id string
+ index string
+ typ string
+ dfs *bool
+ doc interface{}
+ fieldStatistics *bool
+ fields []string
+ filter *TermvectorsFilterSettings
+ perFieldAnalyzer map[string]string
+ offsets *bool
+ parent string
+ payloads *bool
+ positions *bool
+ preference string
+ realtime *bool
+ routing string
+ termStatistics *bool
+ version interface{}
+ versionType string
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewTermvectorsService creates a new TermvectorsService.
+func NewTermvectorsService(client *Client) *TermvectorsService {
+ return &TermvectorsService{
+ client: client,
+ }
+}
+
+// Index in which the document resides.
+func (s *TermvectorsService) Index(index string) *TermvectorsService {
+ s.index = index
+ return s
+}
+
+// Type of the document.
+func (s *TermvectorsService) Type(typ string) *TermvectorsService {
+ s.typ = typ
+ return s
+}
+
+// Id of the document.
+func (s *TermvectorsService) Id(id string) *TermvectorsService {
+ s.id = id
+ return s
+}
+
+// Dfs specifies if distributed frequencies should be returned instead
+// shard frequencies.
+func (s *TermvectorsService) Dfs(dfs bool) *TermvectorsService {
+ s.dfs = &dfs
+ return s
+}
+
+// Doc is the document to analyze.
+func (s *TermvectorsService) Doc(doc interface{}) *TermvectorsService {
+ s.doc = doc
+ return s
+}
+
+// FieldStatistics specifies if document count, sum of document frequencies
+// and sum of total term frequencies should be returned.
+func (s *TermvectorsService) FieldStatistics(fieldStatistics bool) *TermvectorsService {
+ s.fieldStatistics = &fieldStatistics
+ return s
+}
+
+// Fields a list of fields to return.
+func (s *TermvectorsService) Fields(fields ...string) *TermvectorsService {
+ if s.fields == nil {
+ s.fields = make([]string, 0)
+ }
+ s.fields = append(s.fields, fields...)
+ return s
+}
+
+// Filter adds terms filter settings.
+func (s *TermvectorsService) Filter(filter *TermvectorsFilterSettings) *TermvectorsService {
+ s.filter = filter
+ return s
+}
+
+// PerFieldAnalyzer allows to specify a different analyzer than the one
+// at the field.
+func (s *TermvectorsService) PerFieldAnalyzer(perFieldAnalyzer map[string]string) *TermvectorsService {
+ s.perFieldAnalyzer = perFieldAnalyzer
+ return s
+}
+
+// Offsets specifies if term offsets should be returned.
+func (s *TermvectorsService) Offsets(offsets bool) *TermvectorsService {
+ s.offsets = &offsets
+ return s
+}
+
+// Parent id of documents.
+func (s *TermvectorsService) Parent(parent string) *TermvectorsService {
+ s.parent = parent
+ return s
+}
+
+// Payloads specifies if term payloads should be returned.
+func (s *TermvectorsService) Payloads(payloads bool) *TermvectorsService {
+ s.payloads = &payloads
+ return s
+}
+
+// Positions specifies if term positions should be returned.
+func (s *TermvectorsService) Positions(positions bool) *TermvectorsService {
+ s.positions = &positions
+ return s
+}
+
+// Preference specify the node or shard the operation
+// should be performed on (default: random).
+func (s *TermvectorsService) Preference(preference string) *TermvectorsService {
+ s.preference = preference
+ return s
+}
+
+// Realtime specifies if request is real-time as opposed to
+// near-real-time (default: true).
+func (s *TermvectorsService) Realtime(realtime bool) *TermvectorsService {
+ s.realtime = &realtime
+ return s
+}
+
+// Routing is a specific routing value.
+func (s *TermvectorsService) Routing(routing string) *TermvectorsService {
+ s.routing = routing
+ return s
+}
+
+// TermStatistics specifies if total term frequency and document frequency
+// should be returned.
+func (s *TermvectorsService) TermStatistics(termStatistics bool) *TermvectorsService {
+ s.termStatistics = &termStatistics
+ return s
+}
+
+// Version an explicit version number for concurrency control.
+func (s *TermvectorsService) Version(version interface{}) *TermvectorsService {
+ s.version = version
+ return s
+}
+
+// VersionType specifies a version type ("internal", "external", or "external_gte").
+func (s *TermvectorsService) VersionType(versionType string) *TermvectorsService {
+ s.versionType = versionType
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *TermvectorsService) Pretty(pretty bool) *TermvectorsService {
+ s.pretty = pretty
+ return s
+}
+
+// BodyJson defines the body parameters. See documentation.
+func (s *TermvectorsService) BodyJson(body interface{}) *TermvectorsService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString defines the body parameters as a string. See documentation.
+func (s *TermvectorsService) BodyString(body string) *TermvectorsService {
+ s.bodyString = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *TermvectorsService) buildURL() (string, url.Values, error) {
+ var pathParam = map[string]string{
+ "index": s.index,
+ "type": s.typ,
+ }
+ var path string
+ var err error
+
+ // Build URL
+ if s.id != "" {
+ pathParam["id"] = s.id
+ path, err = uritemplates.Expand("/{index}/{type}/{id}/_termvectors", pathParam)
+ } else {
+ path, err = uritemplates.Expand("/{index}/{type}/_termvectors", pathParam)
+ }
+
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.dfs != nil {
+ params.Set("dfs", fmt.Sprintf("%v", *s.dfs))
+ }
+ if s.fieldStatistics != nil {
+ params.Set("field_statistics", fmt.Sprintf("%v", *s.fieldStatistics))
+ }
+ if len(s.fields) > 0 {
+ params.Set("fields", strings.Join(s.fields, ","))
+ }
+ if s.offsets != nil {
+ params.Set("offsets", fmt.Sprintf("%v", *s.offsets))
+ }
+ if s.parent != "" {
+ params.Set("parent", s.parent)
+ }
+ if s.payloads != nil {
+ params.Set("payloads", fmt.Sprintf("%v", *s.payloads))
+ }
+ if s.positions != nil {
+ params.Set("positions", fmt.Sprintf("%v", *s.positions))
+ }
+ if s.preference != "" {
+ params.Set("preference", s.preference)
+ }
+ if s.realtime != nil {
+ params.Set("realtime", fmt.Sprintf("%v", *s.realtime))
+ }
+ if s.routing != "" {
+ params.Set("routing", s.routing)
+ }
+ if s.termStatistics != nil {
+ params.Set("term_statistics", fmt.Sprintf("%v", *s.termStatistics))
+ }
+ if s.version != nil {
+ params.Set("version", fmt.Sprintf("%v", s.version))
+ }
+ if s.versionType != "" {
+ params.Set("version_type", s.versionType)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *TermvectorsService) Validate() error {
+ var invalid []string
+ if s.index == "" {
+ invalid = append(invalid, "Index")
+ }
+ if s.typ == "" {
+ invalid = append(invalid, "Type")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *TermvectorsService) Do(ctx context.Context) (*TermvectorsResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else if s.bodyString != "" {
+ body = s.bodyString
+ } else {
+ data := make(map[string]interface{})
+ if s.doc != nil {
+ data["doc"] = s.doc
+ }
+ if len(s.perFieldAnalyzer) > 0 {
+ data["per_field_analyzer"] = s.perFieldAnalyzer
+ }
+ if s.filter != nil {
+ src, err := s.filter.Source()
+ if err != nil {
+ return nil, err
+ }
+ data["filter"] = src
+ }
+ if len(data) > 0 {
+ body = data
+ }
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(TermvectorsResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Filter settings --
+
+// TermvectorsFilterSettings adds additional filters to a Termsvector request.
+// It allows to filter terms based on their tf-idf scores.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-termvectors.html#_terms_filtering
+// for more information.
+type TermvectorsFilterSettings struct {
+ maxNumTerms *int64
+ minTermFreq *int64
+ maxTermFreq *int64
+ minDocFreq *int64
+ maxDocFreq *int64
+ minWordLength *int64
+ maxWordLength *int64
+}
+
+// NewTermvectorsFilterSettings creates and initializes a new TermvectorsFilterSettings struct.
+func NewTermvectorsFilterSettings() *TermvectorsFilterSettings {
+ return &TermvectorsFilterSettings{}
+}
+
+// MaxNumTerms specifies the maximum number of terms the must be returned per field.
+func (fs *TermvectorsFilterSettings) MaxNumTerms(value int64) *TermvectorsFilterSettings {
+ fs.maxNumTerms = &value
+ return fs
+}
+
+// MinTermFreq ignores words with less than this frequency in the source doc.
+func (fs *TermvectorsFilterSettings) MinTermFreq(value int64) *TermvectorsFilterSettings {
+ fs.minTermFreq = &value
+ return fs
+}
+
+// MaxTermFreq ignores words with more than this frequency in the source doc.
+func (fs *TermvectorsFilterSettings) MaxTermFreq(value int64) *TermvectorsFilterSettings {
+ fs.maxTermFreq = &value
+ return fs
+}
+
+// MinDocFreq ignores terms which do not occur in at least this many docs.
+func (fs *TermvectorsFilterSettings) MinDocFreq(value int64) *TermvectorsFilterSettings {
+ fs.minDocFreq = &value
+ return fs
+}
+
+// MaxDocFreq ignores terms which occur in more than this many docs.
+func (fs *TermvectorsFilterSettings) MaxDocFreq(value int64) *TermvectorsFilterSettings {
+ fs.maxDocFreq = &value
+ return fs
+}
+
+// MinWordLength specifies the minimum word length below which words will be ignored.
+func (fs *TermvectorsFilterSettings) MinWordLength(value int64) *TermvectorsFilterSettings {
+ fs.minWordLength = &value
+ return fs
+}
+
+// MaxWordLength specifies the maximum word length above which words will be ignored.
+func (fs *TermvectorsFilterSettings) MaxWordLength(value int64) *TermvectorsFilterSettings {
+ fs.maxWordLength = &value
+ return fs
+}
+
+// Source returns JSON for the query.
+func (fs *TermvectorsFilterSettings) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+ if fs.maxNumTerms != nil {
+ source["max_num_terms"] = *fs.maxNumTerms
+ }
+ if fs.minTermFreq != nil {
+ source["min_term_freq"] = *fs.minTermFreq
+ }
+ if fs.maxTermFreq != nil {
+ source["max_term_freq"] = *fs.maxTermFreq
+ }
+ if fs.minDocFreq != nil {
+ source["min_doc_freq"] = *fs.minDocFreq
+ }
+ if fs.maxDocFreq != nil {
+ source["max_doc_freq"] = *fs.maxDocFreq
+ }
+ if fs.minWordLength != nil {
+ source["min_word_length"] = *fs.minWordLength
+ }
+ if fs.maxWordLength != nil {
+ source["max_word_length"] = *fs.maxWordLength
+ }
+ return source, nil
+}
+
+// -- Response types --
+
+type TokenInfo struct {
+ StartOffset int64 `json:"start_offset"`
+ EndOffset int64 `json:"end_offset"`
+ Position int64 `json:"position"`
+ Payload string `json:"payload"`
+}
+
+type TermsInfo struct {
+ DocFreq int64 `json:"doc_freq"`
+ Score float64 `json:"score"`
+ TermFreq int64 `json:"term_freq"`
+ Ttf int64 `json:"ttf"`
+ Tokens []TokenInfo `json:"tokens"`
+}
+
+type FieldStatistics struct {
+ DocCount int64 `json:"doc_count"`
+ SumDocFreq int64 `json:"sum_doc_freq"`
+ SumTtf int64 `json:"sum_ttf"`
+}
+
+type TermVectorsFieldInfo struct {
+ FieldStatistics FieldStatistics `json:"field_statistics"`
+ Terms map[string]TermsInfo `json:"terms"`
+}
+
+// TermvectorsResponse is the response of TermvectorsService.Do.
+type TermvectorsResponse struct {
+ Index string `json:"_index"`
+ Type string `json:"_type"`
+ Id string `json:"_id,omitempty"`
+ Version int `json:"_version"`
+ Found bool `json:"found"`
+ Took int64 `json:"took"`
+ TermVectors map[string]TermVectorsFieldInfo `json:"term_vectors"`
+}
diff --git a/vendor/github.com/olivere/elastic/termvectors_test.go b/vendor/github.com/olivere/elastic/termvectors_test.go
new file mode 100644
index 000000000..0391f2b0a
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/termvectors_test.go
@@ -0,0 +1,157 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+ "time"
+)
+
+func TestTermVectorsBuildURL(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Index string
+ Type string
+ Id string
+ Expected string
+ }{
+ {
+ "twitter",
+ "doc",
+ "",
+ "/twitter/doc/_termvectors",
+ },
+ {
+ "twitter",
+ "doc",
+ "1",
+ "/twitter/doc/1/_termvectors",
+ },
+ }
+
+ for _, test := range tests {
+ builder := client.TermVectors(test.Index, test.Type)
+ if test.Id != "" {
+ builder = builder.Id(test.Id)
+ }
+ path, _, err := builder.buildURL()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+ }
+ }
+}
+
+func TestTermVectorsWithId(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+
+ // Add a document
+ indexResult, err := client.Index().
+ Index(testIndexName).
+ Type("doc").
+ Id("1").
+ BodyJson(&tweet1).
+ Refresh("true").
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if indexResult == nil {
+ t.Errorf("expected result to be != nil; got: %v", indexResult)
+ }
+
+ // TermVectors by specifying ID
+ field := "Message"
+ result, err := client.TermVectors(testIndexName, "doc").
+ Id("1").
+ Fields(field).
+ FieldStatistics(true).
+ TermStatistics(true).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if result == nil {
+ t.Fatal("expected to return information and statistics")
+ }
+ if !result.Found {
+ t.Errorf("expected found to be %v; got: %v", true, result.Found)
+ }
+}
+
+func TestTermVectorsWithDoc(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ // Travis lags sometimes
+ if isTravis() {
+ time.Sleep(2 * time.Second)
+ }
+
+ // TermVectors by specifying Doc
+ var doc = map[string]interface{}{
+ "fullname": "John Doe",
+ "text": "twitter test test test",
+ }
+ var perFieldAnalyzer = map[string]string{
+ "fullname": "keyword",
+ }
+
+ result, err := client.TermVectors(testIndexName, "doc").
+ Doc(doc).
+ PerFieldAnalyzer(perFieldAnalyzer).
+ FieldStatistics(true).
+ TermStatistics(true).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if result == nil {
+ t.Fatal("expected to return information and statistics")
+ }
+ if !result.Found {
+ t.Errorf("expected found to be %v; got: %v", true, result.Found)
+ }
+}
+
+func TestTermVectorsWithFilter(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ // Travis lags sometimes
+ if isTravis() {
+ time.Sleep(2 * time.Second)
+ }
+
+ // TermVectors by specifying Doc
+ var doc = map[string]interface{}{
+ "fullname": "John Doe",
+ "text": "twitter test test test",
+ }
+ var perFieldAnalyzer = map[string]string{
+ "fullname": "keyword",
+ }
+
+ result, err := client.TermVectors(testIndexName, "doc").
+ Doc(doc).
+ PerFieldAnalyzer(perFieldAnalyzer).
+ FieldStatistics(true).
+ TermStatistics(true).
+ Filter(NewTermvectorsFilterSettings().MinTermFreq(1)).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if result == nil {
+ t.Fatal("expected to return information and statistics")
+ }
+ if !result.Found {
+ t.Errorf("expected found to be %v; got: %v", true, result.Found)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/update.go b/vendor/github.com/olivere/elastic/update.go
new file mode 100644
index 000000000..5507fae4c
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/update.go
@@ -0,0 +1,327 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// UpdateService updates a document in Elasticsearch.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-update.html
+// for details.
+type UpdateService struct {
+ client *Client
+ index string
+ typ string
+ id string
+ routing string
+ parent string
+ script *Script
+ fields []string
+ fsc *FetchSourceContext
+ version *int64
+ versionType string
+ retryOnConflict *int
+ refresh string
+ waitForActiveShards string
+ upsert interface{}
+ scriptedUpsert *bool
+ docAsUpsert *bool
+ detectNoop *bool
+ doc interface{}
+ timeout string
+ pretty bool
+}
+
+// NewUpdateService creates the service to update documents in Elasticsearch.
+func NewUpdateService(client *Client) *UpdateService {
+ builder := &UpdateService{
+ client: client,
+ fields: make([]string, 0),
+ }
+ return builder
+}
+
+// Index is the name of the Elasticsearch index (required).
+func (b *UpdateService) Index(name string) *UpdateService {
+ b.index = name
+ return b
+}
+
+// Type is the type of the document (required).
+func (b *UpdateService) Type(typ string) *UpdateService {
+ b.typ = typ
+ return b
+}
+
+// Id is the identifier of the document to update (required).
+func (b *UpdateService) Id(id string) *UpdateService {
+ b.id = id
+ return b
+}
+
+// Routing specifies a specific routing value.
+func (b *UpdateService) Routing(routing string) *UpdateService {
+ b.routing = routing
+ return b
+}
+
+// Parent sets the id of the parent document.
+func (b *UpdateService) Parent(parent string) *UpdateService {
+ b.parent = parent
+ return b
+}
+
+// Script is the script definition.
+func (b *UpdateService) Script(script *Script) *UpdateService {
+ b.script = script
+ return b
+}
+
+// RetryOnConflict specifies how many times the operation should be retried
+// when a conflict occurs (default: 0).
+func (b *UpdateService) RetryOnConflict(retryOnConflict int) *UpdateService {
+ b.retryOnConflict = &retryOnConflict
+ return b
+}
+
+// Fields is a list of fields to return in the response.
+func (b *UpdateService) Fields(fields ...string) *UpdateService {
+ b.fields = make([]string, 0, len(fields))
+ b.fields = append(b.fields, fields...)
+ return b
+}
+
+// Version defines the explicit version number for concurrency control.
+func (b *UpdateService) Version(version int64) *UpdateService {
+ b.version = &version
+ return b
+}
+
+// VersionType is e.g. "internal".
+func (b *UpdateService) VersionType(versionType string) *UpdateService {
+ b.versionType = versionType
+ return b
+}
+
+// Refresh the index after performing the update.
+func (b *UpdateService) Refresh(refresh string) *UpdateService {
+ b.refresh = refresh
+ return b
+}
+
+// WaitForActiveShards sets the number of shard copies that must be active before
+// proceeding with the update operation. Defaults to 1, meaning the primary shard only.
+// Set to `all` for all shard copies, otherwise set to any non-negative value less than
+// or equal to the total number of copies for the shard (number of replicas + 1).
+func (b *UpdateService) WaitForActiveShards(waitForActiveShards string) *UpdateService {
+ b.waitForActiveShards = waitForActiveShards
+ return b
+}
+
+// Doc allows for updating a partial document.
+func (b *UpdateService) Doc(doc interface{}) *UpdateService {
+ b.doc = doc
+ return b
+}
+
+// Upsert can be used to index the document when it doesn't exist yet.
+// Use this e.g. to initialize a document with a default value.
+func (b *UpdateService) Upsert(doc interface{}) *UpdateService {
+ b.upsert = doc
+ return b
+}
+
+// DocAsUpsert can be used to insert the document if it doesn't already exist.
+func (b *UpdateService) DocAsUpsert(docAsUpsert bool) *UpdateService {
+ b.docAsUpsert = &docAsUpsert
+ return b
+}
+
+// DetectNoop will instruct Elasticsearch to check if changes will occur
+// when updating via Doc. It there aren't any changes, the request will
+// turn into a no-op.
+func (b *UpdateService) DetectNoop(detectNoop bool) *UpdateService {
+ b.detectNoop = &detectNoop
+ return b
+}
+
+// ScriptedUpsert should be set to true if the referenced script
+// (defined in Script or ScriptId) should be called to perform an insert.
+// The default is false.
+func (b *UpdateService) ScriptedUpsert(scriptedUpsert bool) *UpdateService {
+ b.scriptedUpsert = &scriptedUpsert
+ return b
+}
+
+// Timeout is an explicit timeout for the operation, e.g. "1000", "1s" or "500ms".
+func (b *UpdateService) Timeout(timeout string) *UpdateService {
+ b.timeout = timeout
+ return b
+}
+
+// Pretty instructs to return human readable, prettified JSON.
+func (b *UpdateService) Pretty(pretty bool) *UpdateService {
+ b.pretty = pretty
+ return b
+}
+
+// FetchSource asks Elasticsearch to return the updated _source in the response.
+func (s *UpdateService) FetchSource(fetchSource bool) *UpdateService {
+ if s.fsc == nil {
+ s.fsc = NewFetchSourceContext(fetchSource)
+ } else {
+ s.fsc.SetFetchSource(fetchSource)
+ }
+ return s
+}
+
+// FetchSourceContext indicates that _source should be returned in the response,
+// allowing wildcard patterns to be defined via FetchSourceContext.
+func (s *UpdateService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *UpdateService {
+ s.fsc = fetchSourceContext
+ return s
+}
+
+// url returns the URL part of the document request.
+func (b *UpdateService) url() (string, url.Values, error) {
+ // Build url
+ path := "/{index}/{type}/{id}/_update"
+ path, err := uritemplates.Expand(path, map[string]string{
+ "index": b.index,
+ "type": b.typ,
+ "id": b.id,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Parameters
+ params := make(url.Values)
+ if b.pretty {
+ params.Set("pretty", "true")
+ }
+ if b.routing != "" {
+ params.Set("routing", b.routing)
+ }
+ if b.parent != "" {
+ params.Set("parent", b.parent)
+ }
+ if b.timeout != "" {
+ params.Set("timeout", b.timeout)
+ }
+ if b.refresh != "" {
+ params.Set("refresh", b.refresh)
+ }
+ if b.waitForActiveShards != "" {
+ params.Set("wait_for_active_shards", b.waitForActiveShards)
+ }
+ if len(b.fields) > 0 {
+ params.Set("fields", strings.Join(b.fields, ","))
+ }
+ if b.version != nil {
+ params.Set("version", fmt.Sprintf("%d", *b.version))
+ }
+ if b.versionType != "" {
+ params.Set("version_type", b.versionType)
+ }
+ if b.retryOnConflict != nil {
+ params.Set("retry_on_conflict", fmt.Sprintf("%v", *b.retryOnConflict))
+ }
+
+ return path, params, nil
+}
+
+// body returns the body part of the document request.
+func (b *UpdateService) body() (interface{}, error) {
+ source := make(map[string]interface{})
+
+ if b.script != nil {
+ src, err := b.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["script"] = src
+ }
+
+ if b.scriptedUpsert != nil {
+ source["scripted_upsert"] = *b.scriptedUpsert
+ }
+
+ if b.upsert != nil {
+ source["upsert"] = b.upsert
+ }
+
+ if b.doc != nil {
+ source["doc"] = b.doc
+ }
+ if b.docAsUpsert != nil {
+ source["doc_as_upsert"] = *b.docAsUpsert
+ }
+ if b.detectNoop != nil {
+ source["detect_noop"] = *b.detectNoop
+ }
+ if b.fsc != nil {
+ src, err := b.fsc.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["_source"] = src
+ }
+
+ return source, nil
+}
+
+// Do executes the update operation.
+func (b *UpdateService) Do(ctx context.Context) (*UpdateResponse, error) {
+ path, params, err := b.url()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get body of the request
+ body, err := b.body()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get response
+ res, err := b.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(UpdateResponse)
+ if err := b.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// UpdateResponse is the result of updating a document in Elasticsearch.
+type UpdateResponse struct {
+ Index string `json:"_index,omitempty"`
+ Type string `json:"_type,omitempty"`
+ Id string `json:"_id,omitempty"`
+ Version int64 `json:"_version,omitempty"`
+ Result string `json:"result,omitempty"`
+ Shards *shardsInfo `json:"_shards,omitempty"`
+ SeqNo int64 `json:"_seq_no,omitempty"`
+ PrimaryTerm int64 `json:"_primary_term,omitempty"`
+ Status int `json:"status,omitempty"`
+ ForcedRefresh bool `json:"forced_refresh,omitempty"`
+ GetResult *GetResult `json:"get,omitempty"`
+}
diff --git a/vendor/github.com/olivere/elastic/update_by_query.go b/vendor/github.com/olivere/elastic/update_by_query.go
new file mode 100644
index 000000000..953d67388
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/update_by_query.go
@@ -0,0 +1,655 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// UpdateByQueryService is documented at https://www.elastic.co/guide/en/elasticsearch/plugins/master/plugins-reindex.html.
+type UpdateByQueryService struct {
+ client *Client
+ pretty bool
+ index []string
+ typ []string
+ script *Script
+ query Query
+ body interface{}
+ xSource []string
+ xSourceExclude []string
+ xSourceInclude []string
+ allowNoIndices *bool
+ analyzeWildcard *bool
+ analyzer string
+ conflicts string
+ defaultOperator string
+ docvalueFields []string
+ df string
+ expandWildcards string
+ explain *bool
+ fielddataFields []string
+ from *int
+ ignoreUnavailable *bool
+ lenient *bool
+ lowercaseExpandedTerms *bool
+ pipeline string
+ preference string
+ q string
+ refresh string
+ requestCache *bool
+ requestsPerSecond *int
+ routing []string
+ scroll string
+ scrollSize *int
+ searchTimeout string
+ searchType string
+ size *int
+ sort []string
+ stats []string
+ storedFields []string
+ suggestField string
+ suggestMode string
+ suggestSize *int
+ suggestText string
+ terminateAfter *int
+ timeout string
+ trackScores *bool
+ version *bool
+ versionType *bool
+ waitForActiveShards string
+ waitForCompletion *bool
+}
+
+// NewUpdateByQueryService creates a new UpdateByQueryService.
+func NewUpdateByQueryService(client *Client) *UpdateByQueryService {
+ return &UpdateByQueryService{
+ client: client,
+ }
+}
+
+// Index is a list of index names to search; use `_all` or empty string to
+// perform the operation on all indices.
+func (s *UpdateByQueryService) Index(index ...string) *UpdateByQueryService {
+ s.index = append(s.index, index...)
+ return s
+}
+
+// Type is a list of document types to search; leave empty to perform
+// the operation on all types.
+func (s *UpdateByQueryService) Type(typ ...string) *UpdateByQueryService {
+ s.typ = append(s.typ, typ...)
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *UpdateByQueryService) Pretty(pretty bool) *UpdateByQueryService {
+ s.pretty = pretty
+ return s
+}
+
+// Script sets an update script.
+func (s *UpdateByQueryService) Script(script *Script) *UpdateByQueryService {
+ s.script = script
+ return s
+}
+
+// Body specifies the body of the request. It overrides data being specified via
+// SearchService or Script.
+func (s *UpdateByQueryService) Body(body string) *UpdateByQueryService {
+ s.body = body
+ return s
+}
+
+// XSource is true or false to return the _source field or not,
+// or a list of fields to return.
+func (s *UpdateByQueryService) XSource(xSource ...string) *UpdateByQueryService {
+ s.xSource = append(s.xSource, xSource...)
+ return s
+}
+
+// XSourceExclude represents a list of fields to exclude from the returned _source field.
+func (s *UpdateByQueryService) XSourceExclude(xSourceExclude ...string) *UpdateByQueryService {
+ s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...)
+ return s
+}
+
+// XSourceInclude represents a list of fields to extract and return from the _source field.
+func (s *UpdateByQueryService) XSourceInclude(xSourceInclude ...string) *UpdateByQueryService {
+ s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...)
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices expression
+// resolves into no concrete indices. (This includes `_all` string or when
+// no indices have been specified).
+func (s *UpdateByQueryService) AllowNoIndices(allowNoIndices bool) *UpdateByQueryService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// AnalyzeWildcard specifies whether wildcard and prefix queries should be
+// analyzed (default: false).
+func (s *UpdateByQueryService) AnalyzeWildcard(analyzeWildcard bool) *UpdateByQueryService {
+ s.analyzeWildcard = &analyzeWildcard
+ return s
+}
+
+// Analyzer specifies the analyzer to use for the query string.
+func (s *UpdateByQueryService) Analyzer(analyzer string) *UpdateByQueryService {
+ s.analyzer = analyzer
+ return s
+}
+
+// Conflicts indicates what to do when the process detects version conflicts.
+// Possible values are "proceed" and "abort".
+func (s *UpdateByQueryService) Conflicts(conflicts string) *UpdateByQueryService {
+ s.conflicts = conflicts
+ return s
+}
+
+// AbortOnVersionConflict aborts the request on version conflicts.
+// It is an alias to setting Conflicts("abort").
+func (s *UpdateByQueryService) AbortOnVersionConflict() *UpdateByQueryService {
+ s.conflicts = "abort"
+ return s
+}
+
+// ProceedOnVersionConflict aborts the request on version conflicts.
+// It is an alias to setting Conflicts("proceed").
+func (s *UpdateByQueryService) ProceedOnVersionConflict() *UpdateByQueryService {
+ s.conflicts = "proceed"
+ return s
+}
+
+// DefaultOperator is the default operator for query string query (AND or OR).
+func (s *UpdateByQueryService) DefaultOperator(defaultOperator string) *UpdateByQueryService {
+ s.defaultOperator = defaultOperator
+ return s
+}
+
+// DF specifies the field to use as default where no field prefix is given in the query string.
+func (s *UpdateByQueryService) DF(df string) *UpdateByQueryService {
+ s.df = df
+ return s
+}
+
+// DocvalueFields specifies the list of fields to return as the docvalue representation of a field for each hit.
+func (s *UpdateByQueryService) DocvalueFields(docvalueFields ...string) *UpdateByQueryService {
+ s.docvalueFields = docvalueFields
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *UpdateByQueryService) ExpandWildcards(expandWildcards string) *UpdateByQueryService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// Explain specifies whether to return detailed information about score
+// computation as part of a hit.
+func (s *UpdateByQueryService) Explain(explain bool) *UpdateByQueryService {
+ s.explain = &explain
+ return s
+}
+
+// FielddataFields is a list of fields to return as the field data
+// representation of a field for each hit.
+func (s *UpdateByQueryService) FielddataFields(fielddataFields ...string) *UpdateByQueryService {
+ s.fielddataFields = append(s.fielddataFields, fielddataFields...)
+ return s
+}
+
+// From is the starting offset (default: 0).
+func (s *UpdateByQueryService) From(from int) *UpdateByQueryService {
+ s.from = &from
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *UpdateByQueryService) IgnoreUnavailable(ignoreUnavailable bool) *UpdateByQueryService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// Lenient specifies whether format-based query failures
+// (such as providing text to a numeric field) should be ignored.
+func (s *UpdateByQueryService) Lenient(lenient bool) *UpdateByQueryService {
+ s.lenient = &lenient
+ return s
+}
+
+// LowercaseExpandedTerms specifies whether query terms should be lowercased.
+func (s *UpdateByQueryService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *UpdateByQueryService {
+ s.lowercaseExpandedTerms = &lowercaseExpandedTerms
+ return s
+}
+
+// Pipeline specifies the ingest pipeline to set on index requests made by this action (default: none).
+func (s *UpdateByQueryService) Pipeline(pipeline string) *UpdateByQueryService {
+ s.pipeline = pipeline
+ return s
+}
+
+// Preference specifies the node or shard the operation should be performed on
+// (default: random).
+func (s *UpdateByQueryService) Preference(preference string) *UpdateByQueryService {
+ s.preference = preference
+ return s
+}
+
+// Q specifies the query in the Lucene query string syntax.
+func (s *UpdateByQueryService) Q(q string) *UpdateByQueryService {
+ s.q = q
+ return s
+}
+
+// Query sets a query definition using the Query DSL.
+func (s *UpdateByQueryService) Query(query Query) *UpdateByQueryService {
+ s.query = query
+ return s
+}
+
+// Refresh indicates whether the effected indexes should be refreshed.
+func (s *UpdateByQueryService) Refresh(refresh string) *UpdateByQueryService {
+ s.refresh = refresh
+ return s
+}
+
+// RequestCache specifies if request cache should be used for this request
+// or not, defaults to index level setting.
+func (s *UpdateByQueryService) RequestCache(requestCache bool) *UpdateByQueryService {
+ s.requestCache = &requestCache
+ return s
+}
+
+// RequestsPerSecond sets the throttle on this request in sub-requests per second.
+// -1 means set no throttle as does "unlimited" which is the only non-float this accepts.
+func (s *UpdateByQueryService) RequestsPerSecond(requestsPerSecond int) *UpdateByQueryService {
+ s.requestsPerSecond = &requestsPerSecond
+ return s
+}
+
+// Routing is a list of specific routing values.
+func (s *UpdateByQueryService) Routing(routing ...string) *UpdateByQueryService {
+ s.routing = append(s.routing, routing...)
+ return s
+}
+
+// Scroll specifies how long a consistent view of the index should be maintained
+// for scrolled search.
+func (s *UpdateByQueryService) Scroll(scroll string) *UpdateByQueryService {
+ s.scroll = scroll
+ return s
+}
+
+// ScrollSize is the size on the scroll request powering the update_by_query.
+func (s *UpdateByQueryService) ScrollSize(scrollSize int) *UpdateByQueryService {
+ s.scrollSize = &scrollSize
+ return s
+}
+
+// SearchTimeout defines an explicit timeout for each search request.
+// Defaults to no timeout.
+func (s *UpdateByQueryService) SearchTimeout(searchTimeout string) *UpdateByQueryService {
+ s.searchTimeout = searchTimeout
+ return s
+}
+
+// SearchType is the search operation type. Possible values are
+// "query_then_fetch" and "dfs_query_then_fetch".
+func (s *UpdateByQueryService) SearchType(searchType string) *UpdateByQueryService {
+ s.searchType = searchType
+ return s
+}
+
+// Size represents the number of hits to return (default: 10).
+func (s *UpdateByQueryService) Size(size int) *UpdateByQueryService {
+ s.size = &size
+ return s
+}
+
+// Sort is a list of <field>:<direction> pairs.
+func (s *UpdateByQueryService) Sort(sort ...string) *UpdateByQueryService {
+ s.sort = append(s.sort, sort...)
+ return s
+}
+
+// SortByField adds a sort order.
+func (s *UpdateByQueryService) SortByField(field string, ascending bool) *UpdateByQueryService {
+ if ascending {
+ s.sort = append(s.sort, fmt.Sprintf("%s:asc", field))
+ } else {
+ s.sort = append(s.sort, fmt.Sprintf("%s:desc", field))
+ }
+ return s
+}
+
+// Stats specifies specific tag(s) of the request for logging and statistical purposes.
+func (s *UpdateByQueryService) Stats(stats ...string) *UpdateByQueryService {
+ s.stats = append(s.stats, stats...)
+ return s
+}
+
+// StoredFields specifies the list of stored fields to return as part of a hit.
+func (s *UpdateByQueryService) StoredFields(storedFields ...string) *UpdateByQueryService {
+ s.storedFields = storedFields
+ return s
+}
+
+// SuggestField specifies which field to use for suggestions.
+func (s *UpdateByQueryService) SuggestField(suggestField string) *UpdateByQueryService {
+ s.suggestField = suggestField
+ return s
+}
+
+// SuggestMode specifies the suggest mode. Possible values are
+// "missing", "popular", and "always".
+func (s *UpdateByQueryService) SuggestMode(suggestMode string) *UpdateByQueryService {
+ s.suggestMode = suggestMode
+ return s
+}
+
+// SuggestSize specifies how many suggestions to return in response.
+func (s *UpdateByQueryService) SuggestSize(suggestSize int) *UpdateByQueryService {
+ s.suggestSize = &suggestSize
+ return s
+}
+
+// SuggestText specifies the source text for which the suggestions should be returned.
+func (s *UpdateByQueryService) SuggestText(suggestText string) *UpdateByQueryService {
+ s.suggestText = suggestText
+ return s
+}
+
+// TerminateAfter indicates the maximum number of documents to collect
+// for each shard, upon reaching which the query execution will terminate early.
+func (s *UpdateByQueryService) TerminateAfter(terminateAfter int) *UpdateByQueryService {
+ s.terminateAfter = &terminateAfter
+ return s
+}
+
+// Timeout is the time each individual bulk request should wait for shards
+// that are unavailable.
+func (s *UpdateByQueryService) Timeout(timeout string) *UpdateByQueryService {
+ s.timeout = timeout
+ return s
+}
+
+// TimeoutInMillis sets the timeout in milliseconds.
+func (s *UpdateByQueryService) TimeoutInMillis(timeoutInMillis int) *UpdateByQueryService {
+ s.timeout = fmt.Sprintf("%dms", timeoutInMillis)
+ return s
+}
+
+// TrackScores indicates whether to calculate and return scores even if
+// they are not used for sorting.
+func (s *UpdateByQueryService) TrackScores(trackScores bool) *UpdateByQueryService {
+ s.trackScores = &trackScores
+ return s
+}
+
+// Version specifies whether to return document version as part of a hit.
+func (s *UpdateByQueryService) Version(version bool) *UpdateByQueryService {
+ s.version = &version
+ return s
+}
+
+// VersionType indicates if the document increment the version number (internal)
+// on hit or not (reindex).
+func (s *UpdateByQueryService) VersionType(versionType bool) *UpdateByQueryService {
+ s.versionType = &versionType
+ return s
+}
+
+// WaitForActiveShards sets the number of shard copies that must be active before proceeding
+// with the update by query operation. Defaults to 1, meaning the primary shard only.
+// Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal
+// to the total number of copies for the shard (number of replicas + 1).
+func (s *UpdateByQueryService) WaitForActiveShards(waitForActiveShards string) *UpdateByQueryService {
+ s.waitForActiveShards = waitForActiveShards
+ return s
+}
+
+// WaitForCompletion indicates if the request should block until the reindex is complete.
+func (s *UpdateByQueryService) WaitForCompletion(waitForCompletion bool) *UpdateByQueryService {
+ s.waitForCompletion = &waitForCompletion
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *UpdateByQueryService) buildURL() (string, url.Values, error) {
+ // Build URL
+ var err error
+ var path string
+ if len(s.typ) > 0 {
+ path, err = uritemplates.Expand("/{index}/{type}/_update_by_query", map[string]string{
+ "index": strings.Join(s.index, ","),
+ "type": strings.Join(s.typ, ","),
+ })
+ } else {
+ path, err = uritemplates.Expand("/{index}/_update_by_query", map[string]string{
+ "index": strings.Join(s.index, ","),
+ })
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if len(s.xSource) > 0 {
+ params.Set("_source", strings.Join(s.xSource, ","))
+ }
+ if len(s.xSourceExclude) > 0 {
+ params.Set("_source_exclude", strings.Join(s.xSourceExclude, ","))
+ }
+ if len(s.xSourceInclude) > 0 {
+ params.Set("_source_include", strings.Join(s.xSourceInclude, ","))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.analyzer != "" {
+ params.Set("analyzer", s.analyzer)
+ }
+ if s.analyzeWildcard != nil {
+ params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard))
+ }
+ if s.conflicts != "" {
+ params.Set("conflicts", s.conflicts)
+ }
+ if s.defaultOperator != "" {
+ params.Set("default_operator", s.defaultOperator)
+ }
+ if s.df != "" {
+ params.Set("df", s.df)
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.explain != nil {
+ params.Set("explain", fmt.Sprintf("%v", *s.explain))
+ }
+ if len(s.storedFields) > 0 {
+ params.Set("stored_fields", strings.Join(s.storedFields, ","))
+ }
+ if len(s.docvalueFields) > 0 {
+ params.Set("docvalue_fields", strings.Join(s.docvalueFields, ","))
+ }
+ if len(s.fielddataFields) > 0 {
+ params.Set("fielddata_fields", strings.Join(s.fielddataFields, ","))
+ }
+ if s.from != nil {
+ params.Set("from", fmt.Sprintf("%d", *s.from))
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.lenient != nil {
+ params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
+ }
+ if s.lowercaseExpandedTerms != nil {
+ params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms))
+ }
+ if s.pipeline != "" {
+ params.Set("pipeline", s.pipeline)
+ }
+ if s.preference != "" {
+ params.Set("preference", s.preference)
+ }
+ if s.q != "" {
+ params.Set("q", s.q)
+ }
+ if s.refresh != "" {
+ params.Set("refresh", s.refresh)
+ }
+ if s.requestCache != nil {
+ params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache))
+ }
+ if len(s.routing) > 0 {
+ params.Set("routing", strings.Join(s.routing, ","))
+ }
+ if s.scroll != "" {
+ params.Set("scroll", s.scroll)
+ }
+ if s.scrollSize != nil {
+ params.Set("scroll_size", fmt.Sprintf("%d", *s.scrollSize))
+ }
+ if s.searchTimeout != "" {
+ params.Set("search_timeout", s.searchTimeout)
+ }
+ if s.searchType != "" {
+ params.Set("search_type", s.searchType)
+ }
+ if s.size != nil {
+ params.Set("size", fmt.Sprintf("%d", *s.size))
+ }
+ if len(s.sort) > 0 {
+ params.Set("sort", strings.Join(s.sort, ","))
+ }
+ if len(s.stats) > 0 {
+ params.Set("stats", strings.Join(s.stats, ","))
+ }
+ if s.suggestField != "" {
+ params.Set("suggest_field", s.suggestField)
+ }
+ if s.suggestMode != "" {
+ params.Set("suggest_mode", s.suggestMode)
+ }
+ if s.suggestSize != nil {
+ params.Set("suggest_size", fmt.Sprintf("%v", *s.suggestSize))
+ }
+ if s.suggestText != "" {
+ params.Set("suggest_text", s.suggestText)
+ }
+ if s.terminateAfter != nil {
+ params.Set("terminate_after", fmt.Sprintf("%v", *s.terminateAfter))
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.trackScores != nil {
+ params.Set("track_scores", fmt.Sprintf("%v", *s.trackScores))
+ }
+ if s.version != nil {
+ params.Set("version", fmt.Sprintf("%v", *s.version))
+ }
+ if s.versionType != nil {
+ params.Set("version_type", fmt.Sprintf("%v", *s.versionType))
+ }
+ if s.waitForActiveShards != "" {
+ params.Set("wait_for_active_shards", s.waitForActiveShards)
+ }
+ if s.waitForCompletion != nil {
+ params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion))
+ }
+ if s.requestsPerSecond != nil {
+ params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *UpdateByQueryService) Validate() error {
+ var invalid []string
+ if len(s.index) == 0 {
+ invalid = append(invalid, "Index")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// getBody returns the body part of the document request.
+func (s *UpdateByQueryService) getBody() (interface{}, error) {
+ if s.body != nil {
+ return s.body, nil
+ }
+ source := make(map[string]interface{})
+ if s.script != nil {
+ src, err := s.script.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["script"] = src
+ }
+ if s.query != nil {
+ src, err := s.query.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["query"] = src
+ }
+ return source, nil
+}
+
+// Do executes the operation.
+func (s *UpdateByQueryService) Do(ctx context.Context) (*BulkIndexByScrollResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ body, err := s.getBody()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response (BulkIndexByScrollResponse is defined in DeleteByQuery)
+ ret := new(BulkIndexByScrollResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
diff --git a/vendor/github.com/olivere/elastic/update_by_query_test.go b/vendor/github.com/olivere/elastic/update_by_query_test.go
new file mode 100644
index 000000000..fde924dd5
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/update_by_query_test.go
@@ -0,0 +1,147 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+)
+
+func TestUpdateByQueryBuildURL(t *testing.T) {
+ client := setupTestClient(t)
+
+ tests := []struct {
+ Indices []string
+ Types []string
+ Expected string
+ ExpectErr bool
+ }{
+ {
+ []string{},
+ []string{},
+ "",
+ true,
+ },
+ {
+ []string{"index1"},
+ []string{},
+ "/index1/_update_by_query",
+ false,
+ },
+ {
+ []string{"index1", "index2"},
+ []string{},
+ "/index1%2Cindex2/_update_by_query",
+ false,
+ },
+ {
+ []string{},
+ []string{"type1"},
+ "",
+ true,
+ },
+ {
+ []string{"index1"},
+ []string{"type1"},
+ "/index1/type1/_update_by_query",
+ false,
+ },
+ {
+ []string{"index1", "index2"},
+ []string{"type1", "type2"},
+ "/index1%2Cindex2/type1%2Ctype2/_update_by_query",
+ false,
+ },
+ }
+
+ for i, test := range tests {
+ builder := client.UpdateByQuery().Index(test.Indices...).Type(test.Types...)
+ err := builder.Validate()
+ if err != nil {
+ if !test.ExpectErr {
+ t.Errorf("case #%d: %v", i+1, err)
+ continue
+ }
+ } else {
+ // err == nil
+ if test.ExpectErr {
+ t.Errorf("case #%d: expected error", i+1)
+ continue
+ }
+ path, _, _ := builder.buildURL()
+ if path != test.Expected {
+ t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
+ }
+ }
+ }
+}
+
+func TestUpdateByQueryBodyWithQuery(t *testing.T) {
+ client := setupTestClient(t)
+ out, err := client.UpdateByQuery().Query(NewTermQuery("user", "olivere")).getBody()
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := json.Marshal(out)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := string(b)
+ want := `{"query":{"term":{"user":"olivere"}}}`
+ if got != want {
+ t.Fatalf("\ngot %s\nwant %s", got, want)
+ }
+}
+
+func TestUpdateByQueryBodyWithQueryAndScript(t *testing.T) {
+ client := setupTestClient(t)
+ out, err := client.UpdateByQuery().
+ Query(NewTermQuery("user", "olivere")).
+ Script(NewScriptInline("ctx._source.likes++")).
+ getBody()
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := json.Marshal(out)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := string(b)
+ want := `{"query":{"term":{"user":"olivere"}},"script":{"source":"ctx._source.likes++"}}`
+ if got != want {
+ t.Fatalf("\ngot %s\nwant %s", got, want)
+ }
+}
+
+func TestUpdateByQuery(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
+ esversion, err := client.ElasticsearchVersion(DefaultURL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if esversion < "2.3.0" {
+ t.Skipf("Elasticsearch %v does not support update-by-query yet", esversion)
+ }
+
+ sourceCount, err := client.Count(testIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if sourceCount <= 0 {
+ t.Fatalf("expected more than %d documents; got: %d", 0, sourceCount)
+ }
+
+ res, err := client.UpdateByQuery(testIndexName).ProceedOnVersionConflict().Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("response is nil")
+ }
+ if res.Updated != sourceCount {
+ t.Fatalf("expected %d; got: %d", sourceCount, res.Updated)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/update_integration_test.go b/vendor/github.com/olivere/elastic/update_integration_test.go
new file mode 100644
index 000000000..f36925298
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/update_integration_test.go
@@ -0,0 +1,58 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+)
+
+func TestUpdateWithScript(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ // Get original
+ getRes, err := client.Get().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ var original tweet
+ if err := json.Unmarshal(*getRes.Source, &original); err != nil {
+ t.Fatal(err)
+ }
+
+ // Update with script
+ updRes, err := client.Update().Index(testIndexName).Type("doc").Id("1").
+ Script(
+ NewScript(`ctx._source.message = "Updated message text."`).Lang("painless"),
+ ).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if updRes == nil {
+ t.Fatal("response is nil")
+ }
+ if want, have := "updated", updRes.Result; want != have {
+ t.Fatalf("want Result = %q, have %v", want, have)
+ }
+
+ // Get new version
+ getRes, err = client.Get().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ var updated tweet
+ if err := json.Unmarshal(*getRes.Source, &updated); err != nil {
+ t.Fatal(err)
+ }
+
+ if want, have := original.User, updated.User; want != have {
+ t.Fatalf("want User = %q, have %v", want, have)
+ }
+ if want, have := "Updated message text.", updated.Message; want != have {
+ t.Fatalf("want Message = %q, have %v", want, have)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/update_test.go b/vendor/github.com/olivere/elastic/update_test.go
new file mode 100644
index 000000000..1f04cedd6
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/update_test.go
@@ -0,0 +1,262 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "net/url"
+ "testing"
+)
+
+func TestUpdateViaScript(t *testing.T) {
+ client := setupTestClient(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ update := client.Update().
+ Index("test").Type("type1").Id("1").
+ Script(NewScript("ctx._source.tags += tag").Params(map[string]interface{}{"tag": "blue"}).Lang("groovy"))
+ path, params, err := update.url()
+ if err != nil {
+ t.Fatalf("expected to return URL, got: %v", err)
+ }
+ expectedPath := `/test/type1/1/_update`
+ if expectedPath != path {
+ t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path)
+ }
+ expectedParams := url.Values{}
+ if expectedParams.Encode() != params.Encode() {
+ t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode())
+ }
+ body, err := update.body()
+ if err != nil {
+ t.Fatalf("expected to return body, got: %v", err)
+ }
+ data, err := json.Marshal(body)
+ if err != nil {
+ t.Fatalf("expected to marshal body as JSON, got: %v", err)
+ }
+ got := string(data)
+ expected := `{"script":{"lang":"groovy","params":{"tag":"blue"},"source":"ctx._source.tags += tag"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\ngot:\n%s", expected, got)
+ }
+}
+
+func TestUpdateViaScriptId(t *testing.T) {
+ client := setupTestClient(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ scriptParams := map[string]interface{}{
+ "pageViewEvent": map[string]interface{}{
+ "url": "foo.com/bar",
+ "response": 404,
+ "time": "2014-01-01 12:32",
+ },
+ }
+ script := NewScriptStored("my_web_session_summariser").Params(scriptParams)
+
+ update := client.Update().
+ Index("sessions").Type("session").Id("dh3sgudg8gsrgl").
+ Script(script).
+ ScriptedUpsert(true).
+ Upsert(map[string]interface{}{})
+ path, params, err := update.url()
+ if err != nil {
+ t.Fatalf("expected to return URL, got: %v", err)
+ }
+ expectedPath := `/sessions/session/dh3sgudg8gsrgl/_update`
+ if expectedPath != path {
+ t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path)
+ }
+ expectedParams := url.Values{}
+ if expectedParams.Encode() != params.Encode() {
+ t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode())
+ }
+ body, err := update.body()
+ if err != nil {
+ t.Fatalf("expected to return body, got: %v", err)
+ }
+ data, err := json.Marshal(body)
+ if err != nil {
+ t.Fatalf("expected to marshal body as JSON, got: %v", err)
+ }
+ got := string(data)
+ expected := `{"script":{"id":"my_web_session_summariser","params":{"pageViewEvent":{"response":404,"time":"2014-01-01 12:32","url":"foo.com/bar"}}},"scripted_upsert":true,"upsert":{}}`
+ if got != expected {
+ t.Errorf("expected\n%s\ngot:\n%s", expected, got)
+ }
+}
+
+func TestUpdateViaScriptAndUpsert(t *testing.T) {
+ client := setupTestClient(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ update := client.Update().
+ Index("test").Type("type1").Id("1").
+ Script(NewScript("ctx._source.counter += count").Params(map[string]interface{}{"count": 4})).
+ Upsert(map[string]interface{}{"counter": 1})
+ path, params, err := update.url()
+ if err != nil {
+ t.Fatalf("expected to return URL, got: %v", err)
+ }
+ expectedPath := `/test/type1/1/_update`
+ if expectedPath != path {
+ t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path)
+ }
+ expectedParams := url.Values{}
+ if expectedParams.Encode() != params.Encode() {
+ t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode())
+ }
+ body, err := update.body()
+ if err != nil {
+ t.Fatalf("expected to return body, got: %v", err)
+ }
+ data, err := json.Marshal(body)
+ if err != nil {
+ t.Fatalf("expected to marshal body as JSON, got: %v", err)
+ }
+ got := string(data)
+ expected := `{"script":{"params":{"count":4},"source":"ctx._source.counter += count"},"upsert":{"counter":1}}`
+ if got != expected {
+ t.Errorf("expected\n%s\ngot:\n%s", expected, got)
+ }
+}
+
+func TestUpdateViaDoc(t *testing.T) {
+ client := setupTestClient(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ update := client.Update().
+ Index("test").Type("type1").Id("1").
+ Doc(map[string]interface{}{"name": "new_name"}).
+ DetectNoop(true)
+ path, params, err := update.url()
+ if err != nil {
+ t.Fatalf("expected to return URL, got: %v", err)
+ }
+ expectedPath := `/test/type1/1/_update`
+ if expectedPath != path {
+ t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path)
+ }
+ expectedParams := url.Values{}
+ if expectedParams.Encode() != params.Encode() {
+ t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode())
+ }
+ body, err := update.body()
+ if err != nil {
+ t.Fatalf("expected to return body, got: %v", err)
+ }
+ data, err := json.Marshal(body)
+ if err != nil {
+ t.Fatalf("expected to marshal body as JSON, got: %v", err)
+ }
+ got := string(data)
+ expected := `{"detect_noop":true,"doc":{"name":"new_name"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\ngot:\n%s", expected, got)
+ }
+}
+
+func TestUpdateViaDocAndUpsert(t *testing.T) {
+ client := setupTestClient(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ update := client.Update().
+ Index("test").Type("type1").Id("1").
+ Doc(map[string]interface{}{"name": "new_name"}).
+ DocAsUpsert(true).
+ Timeout("1s").
+ Refresh("true")
+ path, params, err := update.url()
+ if err != nil {
+ t.Fatalf("expected to return URL, got: %v", err)
+ }
+ expectedPath := `/test/type1/1/_update`
+ if expectedPath != path {
+ t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path)
+ }
+ expectedParams := url.Values{"refresh": []string{"true"}, "timeout": []string{"1s"}}
+ if expectedParams.Encode() != params.Encode() {
+ t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode())
+ }
+ body, err := update.body()
+ if err != nil {
+ t.Fatalf("expected to return body, got: %v", err)
+ }
+ data, err := json.Marshal(body)
+ if err != nil {
+ t.Fatalf("expected to marshal body as JSON, got: %v", err)
+ }
+ got := string(data)
+ expected := `{"doc":{"name":"new_name"},"doc_as_upsert":true}`
+ if got != expected {
+ t.Errorf("expected\n%s\ngot:\n%s", expected, got)
+ }
+}
+
+func TestUpdateViaDocAndUpsertAndFetchSource(t *testing.T) {
+ client := setupTestClient(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ update := client.Update().
+ Index("test").Type("type1").Id("1").
+ Doc(map[string]interface{}{"name": "new_name"}).
+ DocAsUpsert(true).
+ Timeout("1s").
+ Refresh("true").
+ FetchSource(true)
+ path, params, err := update.url()
+ if err != nil {
+ t.Fatalf("expected to return URL, got: %v", err)
+ }
+ expectedPath := `/test/type1/1/_update`
+ if expectedPath != path {
+ t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path)
+ }
+ expectedParams := url.Values{
+ "refresh": []string{"true"},
+ "timeout": []string{"1s"},
+ }
+ if expectedParams.Encode() != params.Encode() {
+ t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode())
+ }
+ body, err := update.body()
+ if err != nil {
+ t.Fatalf("expected to return body, got: %v", err)
+ }
+ data, err := json.Marshal(body)
+ if err != nil {
+ t.Fatalf("expected to marshal body as JSON, got: %v", err)
+ }
+ got := string(data)
+ expected := `{"_source":true,"doc":{"name":"new_name"},"doc_as_upsert":true}`
+ if got != expected {
+ t.Errorf("expected\n%s\ngot:\n%s", expected, got)
+ }
+}
+
+func TestUpdateAndFetchSource(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ res, err := client.Update().
+ Index(testIndexName).Type("doc").Id("1").
+ Doc(map[string]interface{}{"user": "sandrae"}).
+ DetectNoop(true).
+ FetchSource(true).
+ Do(context.Background())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res == nil {
+ t.Fatal("expected response != nil")
+ }
+ if res.GetResult == nil {
+ t.Fatal("expected GetResult != nil")
+ }
+ data, err := json.Marshal(res.GetResult.Source)
+ if err != nil {
+ t.Fatalf("expected to marshal body as JSON, got: %v", err)
+ }
+ got := string(data)
+ expected := `{"user":"sandrae","message":"Welcome to Golang and Elasticsearch.","retweets":0,"created":"0001-01-01T00:00:00Z"}`
+ if got != expected {
+ t.Errorf("expected\n%s\ngot:\n%s", expected, got)
+ }
+}
diff --git a/vendor/github.com/olivere/elastic/uritemplates/LICENSE b/vendor/github.com/olivere/elastic/uritemplates/LICENSE
new file mode 100644
index 000000000..de9c88cb6
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/uritemplates/LICENSE
@@ -0,0 +1,18 @@
+Copyright (c) 2013 Joshua Tacoma
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/olivere/elastic/uritemplates/uritemplates.go b/vendor/github.com/olivere/elastic/uritemplates/uritemplates.go
new file mode 100644
index 000000000..8a84813fe
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/uritemplates/uritemplates.go
@@ -0,0 +1,359 @@
+// Copyright 2013 Joshua Tacoma. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package uritemplates is a level 4 implementation of RFC 6570 (URI
+// Template, http://tools.ietf.org/html/rfc6570).
+//
+// To use uritemplates, parse a template string and expand it with a value
+// map:
+//
+// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}")
+// values := make(map[string]interface{})
+// values["user"] = "jtacoma"
+// values["repo"] = "uritemplates"
+// expanded, _ := template.ExpandString(values)
+// fmt.Printf(expanded)
+//
+package uritemplates
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]")
+ reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]")
+ validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$")
+ hex = []byte("0123456789ABCDEF")
+)
+
+func pctEncode(src []byte) []byte {
+ dst := make([]byte, len(src)*3)
+ for i, b := range src {
+ buf := dst[i*3 : i*3+3]
+ buf[0] = 0x25
+ buf[1] = hex[b/16]
+ buf[2] = hex[b%16]
+ }
+ return dst
+}
+
+func escape(s string, allowReserved bool) (escaped string) {
+ if allowReserved {
+ escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode))
+ } else {
+ escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode))
+ }
+ return escaped
+}
+
+// A UriTemplate is a parsed representation of a URI template.
+type UriTemplate struct {
+ raw string
+ parts []templatePart
+}
+
+// Parse parses a URI template string into a UriTemplate object.
+func Parse(rawtemplate string) (template *UriTemplate, err error) {
+ template = new(UriTemplate)
+ template.raw = rawtemplate
+ split := strings.Split(rawtemplate, "{")
+ template.parts = make([]templatePart, len(split)*2-1)
+ for i, s := range split {
+ if i == 0 {
+ if strings.Contains(s, "}") {
+ err = errors.New("unexpected }")
+ break
+ }
+ template.parts[i].raw = s
+ } else {
+ subsplit := strings.Split(s, "}")
+ if len(subsplit) != 2 {
+ err = errors.New("malformed template")
+ break
+ }
+ expression := subsplit[0]
+ template.parts[i*2-1], err = parseExpression(expression)
+ if err != nil {
+ break
+ }
+ template.parts[i*2].raw = subsplit[1]
+ }
+ }
+ if err != nil {
+ template = nil
+ }
+ return template, err
+}
+
+type templatePart struct {
+ raw string
+ terms []templateTerm
+ first string
+ sep string
+ named bool
+ ifemp string
+ allowReserved bool
+}
+
+type templateTerm struct {
+ name string
+ explode bool
+ truncate int
+}
+
+func parseExpression(expression string) (result templatePart, err error) {
+ switch expression[0] {
+ case '+':
+ result.sep = ","
+ result.allowReserved = true
+ expression = expression[1:]
+ case '.':
+ result.first = "."
+ result.sep = "."
+ expression = expression[1:]
+ case '/':
+ result.first = "/"
+ result.sep = "/"
+ expression = expression[1:]
+ case ';':
+ result.first = ";"
+ result.sep = ";"
+ result.named = true
+ expression = expression[1:]
+ case '?':
+ result.first = "?"
+ result.sep = "&"
+ result.named = true
+ result.ifemp = "="
+ expression = expression[1:]
+ case '&':
+ result.first = "&"
+ result.sep = "&"
+ result.named = true
+ result.ifemp = "="
+ expression = expression[1:]
+ case '#':
+ result.first = "#"
+ result.sep = ","
+ result.allowReserved = true
+ expression = expression[1:]
+ default:
+ result.sep = ","
+ }
+ rawterms := strings.Split(expression, ",")
+ result.terms = make([]templateTerm, len(rawterms))
+ for i, raw := range rawterms {
+ result.terms[i], err = parseTerm(raw)
+ if err != nil {
+ break
+ }
+ }
+ return result, err
+}
+
+func parseTerm(term string) (result templateTerm, err error) {
+ if strings.HasSuffix(term, "*") {
+ result.explode = true
+ term = term[:len(term)-1]
+ }
+ split := strings.Split(term, ":")
+ if len(split) == 1 {
+ result.name = term
+ } else if len(split) == 2 {
+ result.name = split[0]
+ var parsed int64
+ parsed, err = strconv.ParseInt(split[1], 10, 0)
+ result.truncate = int(parsed)
+ } else {
+ err = errors.New("multiple colons in same term")
+ }
+ if !validname.MatchString(result.name) {
+ err = errors.New("not a valid name: " + result.name)
+ }
+ if result.explode && result.truncate > 0 {
+ err = errors.New("both explode and prefix modifers on same term")
+ }
+ return result, err
+}
+
+// Expand expands a URI template with a set of values to produce a string.
+func (self *UriTemplate) Expand(value interface{}) (string, error) {
+ values, ismap := value.(map[string]interface{})
+ if !ismap {
+ if m, ismap := struct2map(value); !ismap {
+ return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.")
+ } else {
+ return self.Expand(m)
+ }
+ }
+ var buf bytes.Buffer
+ for _, p := range self.parts {
+ err := p.expand(&buf, values)
+ if err != nil {
+ return "", err
+ }
+ }
+ return buf.String(), nil
+}
+
+func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error {
+ if len(self.raw) > 0 {
+ buf.WriteString(self.raw)
+ return nil
+ }
+ var zeroLen = buf.Len()
+ buf.WriteString(self.first)
+ var firstLen = buf.Len()
+ for _, term := range self.terms {
+ value, exists := values[term.name]
+ if !exists {
+ continue
+ }
+ if buf.Len() != firstLen {
+ buf.WriteString(self.sep)
+ }
+ switch v := value.(type) {
+ case string:
+ self.expandString(buf, term, v)
+ case []interface{}:
+ self.expandArray(buf, term, v)
+ case map[string]interface{}:
+ if term.truncate > 0 {
+ return errors.New("cannot truncate a map expansion")
+ }
+ self.expandMap(buf, term, v)
+ default:
+ if m, ismap := struct2map(value); ismap {
+ if term.truncate > 0 {
+ return errors.New("cannot truncate a map expansion")
+ }
+ self.expandMap(buf, term, m)
+ } else {
+ str := fmt.Sprintf("%v", value)
+ self.expandString(buf, term, str)
+ }
+ }
+ }
+ if buf.Len() == firstLen {
+ original := buf.Bytes()[:zeroLen]
+ buf.Reset()
+ buf.Write(original)
+ }
+ return nil
+}
+
+func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) {
+ if self.named {
+ buf.WriteString(name)
+ if empty {
+ buf.WriteString(self.ifemp)
+ } else {
+ buf.WriteString("=")
+ }
+ }
+}
+
+func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) {
+ if len(s) > t.truncate && t.truncate > 0 {
+ s = s[:t.truncate]
+ }
+ self.expandName(buf, t.name, len(s) == 0)
+ buf.WriteString(escape(s, self.allowReserved))
+}
+
+func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) {
+ if len(a) == 0 {
+ return
+ } else if !t.explode {
+ self.expandName(buf, t.name, false)
+ }
+ for i, value := range a {
+ if t.explode && i > 0 {
+ buf.WriteString(self.sep)
+ } else if i > 0 {
+ buf.WriteString(",")
+ }
+ var s string
+ switch v := value.(type) {
+ case string:
+ s = v
+ default:
+ s = fmt.Sprintf("%v", v)
+ }
+ if len(s) > t.truncate && t.truncate > 0 {
+ s = s[:t.truncate]
+ }
+ if self.named && t.explode {
+ self.expandName(buf, t.name, len(s) == 0)
+ }
+ buf.WriteString(escape(s, self.allowReserved))
+ }
+}
+
+func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) {
+ if len(m) == 0 {
+ return
+ }
+ if !t.explode {
+ self.expandName(buf, t.name, len(m) == 0)
+ }
+ var firstLen = buf.Len()
+ for k, value := range m {
+ if firstLen != buf.Len() {
+ if t.explode {
+ buf.WriteString(self.sep)
+ } else {
+ buf.WriteString(",")
+ }
+ }
+ var s string
+ switch v := value.(type) {
+ case string:
+ s = v
+ default:
+ s = fmt.Sprintf("%v", v)
+ }
+ if t.explode {
+ buf.WriteString(escape(k, self.allowReserved))
+ buf.WriteRune('=')
+ buf.WriteString(escape(s, self.allowReserved))
+ } else {
+ buf.WriteString(escape(k, self.allowReserved))
+ buf.WriteRune(',')
+ buf.WriteString(escape(s, self.allowReserved))
+ }
+ }
+}
+
+func struct2map(v interface{}) (map[string]interface{}, bool) {
+ value := reflect.ValueOf(v)
+ switch value.Type().Kind() {
+ case reflect.Ptr:
+ return struct2map(value.Elem().Interface())
+ case reflect.Struct:
+ m := make(map[string]interface{})
+ for i := 0; i < value.NumField(); i++ {
+ tag := value.Type().Field(i).Tag
+ var name string
+ if strings.Contains(string(tag), ":") {
+ name = tag.Get("uri")
+ } else {
+ name = strings.TrimSpace(string(tag))
+ }
+ if len(name) == 0 {
+ name = value.Type().Field(i).Name
+ }
+ m[name] = value.Field(i).Interface()
+ }
+ return m, true
+ }
+ return nil, false
+}
diff --git a/vendor/github.com/olivere/elastic/uritemplates/utils.go b/vendor/github.com/olivere/elastic/uritemplates/utils.go
new file mode 100644
index 000000000..399ef4623
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/uritemplates/utils.go
@@ -0,0 +1,13 @@
+package uritemplates
+
+func Expand(path string, expansions map[string]string) (string, error) {
+ template, err := Parse(path)
+ if err != nil {
+ return "", err
+ }
+ values := make(map[string]interface{})
+ for k, v := range expansions {
+ values[k] = v
+ }
+ return template.Expand(values)
+}
diff --git a/vendor/github.com/olivere/elastic/uritemplates/utils_test.go b/vendor/github.com/olivere/elastic/uritemplates/utils_test.go
new file mode 100644
index 000000000..633949b6f
--- /dev/null
+++ b/vendor/github.com/olivere/elastic/uritemplates/utils_test.go
@@ -0,0 +1,105 @@
+package uritemplates
+
+import (
+ "testing"
+)
+
+type ExpandTest struct {
+ in string
+ expansions map[string]string
+ want string
+}
+
+var expandTests = []ExpandTest{
+ // #0: no expansions
+ {
+ "http://www.golang.org/",
+ map[string]string{},
+ "http://www.golang.org/",
+ },
+ // #1: one expansion, no escaping
+ {
+ "http://www.golang.org/{bucket}/delete",
+ map[string]string{
+ "bucket": "red",
+ },
+ "http://www.golang.org/red/delete",
+ },
+ // #2: one expansion, with hex escapes
+ {
+ "http://www.golang.org/{bucket}/delete",
+ map[string]string{
+ "bucket": "red/blue",
+ },
+ "http://www.golang.org/red%2Fblue/delete",
+ },
+ // #3: one expansion, with space
+ {
+ "http://www.golang.org/{bucket}/delete",
+ map[string]string{
+ "bucket": "red or blue",
+ },
+ "http://www.golang.org/red%20or%20blue/delete",
+ },
+ // #4: expansion not found
+ {
+ "http://www.golang.org/{object}/delete",
+ map[string]string{
+ "bucket": "red or blue",
+ },
+ "http://www.golang.org//delete",
+ },
+ // #5: multiple expansions
+ {
+ "http://www.golang.org/{one}/{two}/{three}/get",
+ map[string]string{
+ "one": "ONE",
+ "two": "TWO",
+ "three": "THREE",
+ },
+ "http://www.golang.org/ONE/TWO/THREE/get",
+ },
+ // #6: utf-8 characters
+ {
+ "http://www.golang.org/{bucket}/get",
+ map[string]string{
+ "bucket": "£100",
+ },
+ "http://www.golang.org/%C2%A3100/get",
+ },
+ // #7: punctuations
+ {
+ "http://www.golang.org/{bucket}/get",
+ map[string]string{
+ "bucket": `/\@:,.*~`,
+ },
+ "http://www.golang.org/%2F%5C%40%3A%2C.%2A~/get",
+ },
+ // #8: mis-matched brackets
+ {
+ "http://www.golang.org/{bucket/get",
+ map[string]string{
+ "bucket": "red",
+ },
+ "",
+ },
+ // #9: "+" prefix for suppressing escape
+ // See also: http://tools.ietf.org/html/rfc6570#section-3.2.3
+ {
+ "http://www.golang.org/{+topic}",
+ map[string]string{
+ "topic": "/topics/myproject/mytopic",
+ },
+ // The double slashes here look weird, but it's intentional
+ "http://www.golang.org//topics/myproject/mytopic",
+ },
+}
+
+func TestExpand(t *testing.T) {
+ for i, test := range expandTests {
+ got, _ := Expand(test.in, test.expansions)
+ if got != test.want {
+ t.Errorf("got %q expected %q in test %d", got, test.want, i)
+ }
+ }
+}
diff --git a/vendor/github.com/pelletier/go-toml/.travis.yml b/vendor/github.com/pelletier/go-toml/.travis.yml
index 6e644fdfd..ab2775d7d 100644
--- a/vendor/github.com/pelletier/go-toml/.travis.yml
+++ b/vendor/github.com/pelletier/go-toml/.travis.yml
@@ -1,8 +1,8 @@
sudo: false
language: go
go:
- - 1.8.4
- - 1.9.1
+ - 1.8.5
+ - 1.9.2
- tip
matrix:
allow_failures:
diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go
index 0da938b03..284db6467 100644
--- a/vendor/github.com/pelletier/go-toml/keysparsing.go
+++ b/vendor/github.com/pelletier/go-toml/keysparsing.go
@@ -6,36 +6,16 @@ import (
"bytes"
"errors"
"fmt"
- "strconv"
"unicode"
)
-var escapeSequenceMap = map[rune]rune{
- 'b': '\b',
- 't': '\t',
- 'n': '\n',
- 'f': '\f',
- 'r': '\r',
- '"': '"',
- '\\': '\\',
-}
-
-type parseKeyState int
-
-const (
- bare parseKeyState = iota
- basic
- literal
- esc
- unicode4
- unicode8
-)
-
+// Convert the bare key group string to an array.
+// The input supports double quotation to allow "." inside the key name,
+// but escape sequences are not supported. Lexers must unescape them beforehand.
func parseKey(key string) ([]string, error) {
groups := []string{}
var buffer bytes.Buffer
- var hex bytes.Buffer
- state := bare
+ inQuotes := false
wasInQuotes := false
ignoreSpace := true
expectDot := false
@@ -47,67 +27,17 @@ func parseKey(key string) ([]string, error) {
}
ignoreSpace = false
}
-
- if state == esc {
- if char == 'u' {
- state = unicode4
- hex.Reset()
- } else if char == 'U' {
- state = unicode8
- hex.Reset()
- } else if newChar, ok := escapeSequenceMap[char]; ok {
- buffer.WriteRune(newChar)
- state = basic
- } else {
- return nil, fmt.Errorf(`invalid escape sequence \%c`, char)
- }
- continue
- }
-
- if state == unicode4 || state == unicode8 {
- if isHexDigit(char) {
- hex.WriteRune(char)
- }
- if (state == unicode4 && hex.Len() == 4) || (state == unicode8 && hex.Len() == 8) {
- if value, err := strconv.ParseInt(hex.String(), 16, 32); err == nil {
- buffer.WriteRune(rune(value))
- } else {
- return nil, err
- }
- state = basic
- }
- continue
- }
-
switch char {
- case '\\':
- if state == basic {
- state = esc
- } else if state == literal {
- buffer.WriteRune(char)
- }
- case '\'':
- if state == bare {
- state = literal
- } else if state == literal {
- groups = append(groups, buffer.String())
- buffer.Reset()
- wasInQuotes = true
- state = bare
- }
- expectDot = false
case '"':
- if state == bare {
- state = basic
- } else if state == basic {
+ if inQuotes {
groups = append(groups, buffer.String())
buffer.Reset()
- state = bare
wasInQuotes = true
}
+ inQuotes = !inQuotes
expectDot = false
case '.':
- if state != bare {
+ if inQuotes {
buffer.WriteRune(char)
} else {
if !wasInQuotes {
@@ -122,31 +52,25 @@ func parseKey(key string) ([]string, error) {
wasInQuotes = false
}
case ' ':
- if state == basic {
+ if inQuotes {
buffer.WriteRune(char)
} else {
expectDot = true
}
default:
- if state == bare {
- if !isValidBareChar(char) {
- return nil, fmt.Errorf("invalid bare character: %c", char)
- } else if expectDot {
- return nil, errors.New("what?")
- }
+ if !inQuotes && !isValidBareChar(char) {
+ return nil, fmt.Errorf("invalid bare character: %c", char)
+ }
+ if !inQuotes && expectDot {
+ return nil, errors.New("what?")
}
buffer.WriteRune(char)
expectDot = false
}
}
-
- // state must be bare at the end
- if state == esc {
- return nil, errors.New("unfinished escape sequence")
- } else if state != bare {
+ if inQuotes {
return nil, errors.New("mismatched quotes")
}
-
if buffer.Len() > 0 {
groups = append(groups, buffer.String())
}
diff --git a/vendor/github.com/pelletier/go-toml/keysparsing_test.go b/vendor/github.com/pelletier/go-toml/keysparsing_test.go
index 7aa4cd64a..84cb82604 100644
--- a/vendor/github.com/pelletier/go-toml/keysparsing_test.go
+++ b/vendor/github.com/pelletier/go-toml/keysparsing_test.go
@@ -50,17 +50,10 @@ func TestBaseKeyPound(t *testing.T) {
func TestQuotedKeys(t *testing.T) {
testResult(t, `hello."foo".bar`, []string{"hello", "foo", "bar"})
testResult(t, `"hello!"`, []string{"hello!"})
- testResult(t, `"hello\tworld"`, []string{"hello\tworld"})
- testResult(t, `"\U0001F914"`, []string{"\U0001F914"})
- testResult(t, `"\u2764"`, []string{"\u2764"})
+ testResult(t, `foo."ba.r".baz`, []string{"foo", "ba.r", "baz"})
- testResult(t, `hello.'foo'.bar`, []string{"hello", "foo", "bar"})
- testResult(t, `'hello!'`, []string{"hello!"})
- testResult(t, `'hello\tworld'`, []string{`hello\tworld`})
-
- testError(t, `"\w"`, `invalid escape sequence \w`)
- testError(t, `"\`, `unfinished escape sequence`)
- testError(t, `"\t`, `mismatched quotes`)
+ // escape sequences must not be converted
+ testResult(t, `"hello\tworld"`, []string{`hello\tworld`})
}
func TestEmptyKey(t *testing.T) {
diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go
index 209665676..d11de4285 100644
--- a/vendor/github.com/pelletier/go-toml/lexer.go
+++ b/vendor/github.com/pelletier/go-toml/lexer.go
@@ -204,6 +204,14 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn {
return l.lexFalse
}
+ if l.follow("inf") {
+ return l.lexInf
+ }
+
+ if l.follow("nan") {
+ return l.lexNan
+ }
+
if isSpace(next) {
l.skip()
continue
@@ -265,6 +273,18 @@ func (l *tomlLexer) lexFalse() tomlLexStateFn {
return l.lexRvalue
}
+func (l *tomlLexer) lexInf() tomlLexStateFn {
+ l.fastForward(3)
+ l.emit(tokenInf)
+ return l.lexRvalue
+}
+
+func (l *tomlLexer) lexNan() tomlLexStateFn {
+ l.fastForward(3)
+ l.emit(tokenNan)
+ return l.lexRvalue
+}
+
func (l *tomlLexer) lexEqual() tomlLexStateFn {
l.next()
l.emit(tokenEqual)
@@ -277,6 +297,8 @@ func (l *tomlLexer) lexComma() tomlLexStateFn {
return l.lexRvalue
}
+// Parse the key and emits its value without escape sequences.
+// bare keys, basic string keys and literal string keys are supported.
func (l *tomlLexer) lexKey() tomlLexStateFn {
growingString := ""
@@ -287,7 +309,16 @@ func (l *tomlLexer) lexKey() tomlLexStateFn {
if err != nil {
return l.errorf(err.Error())
}
- growingString += `"` + str + `"`
+ growingString += str
+ l.next()
+ continue
+ } else if r == '\'' {
+ l.next()
+ str, err := l.lexLiteralStringAsString(`'`, false)
+ if err != nil {
+ return l.errorf(err.Error())
+ }
+ growingString += str
l.next()
continue
} else if r == '\n' {
@@ -527,6 +558,7 @@ func (l *tomlLexer) lexTableKey() tomlLexStateFn {
return l.lexInsideTableKey
}
+// Parse the key till "]]", but only bare keys are supported
func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn {
for r := l.peek(); r != eof; r = l.peek() {
switch r {
@@ -550,6 +582,7 @@ func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn {
return l.errorf("unclosed table array key")
}
+// Parse the key till "]" but only bare keys are supported
func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn {
for r := l.peek(); r != eof; r = l.peek() {
switch r {
@@ -638,7 +671,14 @@ func (l *tomlLexer) lexNumber() tomlLexStateFn {
if r == '+' || r == '-' {
l.next()
+ if l.follow("inf") {
+ return l.lexInf
+ }
+ if l.follow("nan") {
+ return l.lexNan
+ }
}
+
pointSeen := false
expSeen := false
digitSeen := false
diff --git a/vendor/github.com/pelletier/go-toml/lexer_test.go b/vendor/github.com/pelletier/go-toml/lexer_test.go
index 313b83c5d..cb4913031 100644
--- a/vendor/github.com/pelletier/go-toml/lexer_test.go
+++ b/vendor/github.com/pelletier/go-toml/lexer_test.go
@@ -690,7 +690,7 @@ func TestKeyGroupArray(t *testing.T) {
func TestQuotedKey(t *testing.T) {
testFlow(t, "\"a b\" = 42", []token{
- {Position{1, 1}, tokenKey, "\"a b\""},
+ {Position{1, 1}, tokenKey, "a b"},
{Position{1, 7}, tokenEqual, "="},
{Position{1, 9}, tokenInteger, "42"},
{Position{1, 11}, tokenEOF, ""},
diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go
index 6280225e9..b5a241505 100644
--- a/vendor/github.com/pelletier/go-toml/marshal.go
+++ b/vendor/github.com/pelletier/go-toml/marshal.go
@@ -230,7 +230,7 @@ func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, er
if err != nil {
return nil, err
}
- tval.Set(opts.name, opts.comment, opts.commented, val)
+ tval.SetWithComment(opts.name, opts.comment, opts.commented, val)
}
}
case reflect.Map:
@@ -245,9 +245,9 @@ func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, er
if err != nil {
return nil, err
}
- tval.SetPath([]string{keyStr}, "", false, val)
+ tval.SetPath([]string{keyStr}, val)
} else {
- tval.Set(key.String(), "", false, val)
+ tval.Set(key.String(), val)
}
}
}
@@ -486,96 +486,55 @@ func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.V
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval)
default:
switch mtype.Kind() {
- case reflect.Bool:
- val, ok := tval.(bool)
- if !ok {
- return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to bool", tval, tval)
- }
- return reflect.ValueOf(val), nil
- case reflect.Int:
- val, ok := tval.(int64)
- if !ok {
- return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
- }
- return reflect.ValueOf(int(val)), nil
- case reflect.Int8:
- val, ok := tval.(int64)
- if !ok {
- return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
- }
- return reflect.ValueOf(int8(val)), nil
- case reflect.Int16:
- val, ok := tval.(int64)
- if !ok {
- return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
- }
- return reflect.ValueOf(int16(val)), nil
- case reflect.Int32:
- val, ok := tval.(int64)
- if !ok {
- return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
+ case reflect.Bool, reflect.Struct:
+ val := reflect.ValueOf(tval)
+ // if this passes for when mtype is reflect.Struct, tval is a time.Time
+ if !val.Type().ConvertibleTo(mtype) {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
}
- return reflect.ValueOf(int32(val)), nil
- case reflect.Int64:
- val, ok := tval.(int64)
- if !ok {
- return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
- }
- return reflect.ValueOf(val), nil
- case reflect.Uint:
- val, ok := tval.(int64)
- if !ok {
- return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
- }
- return reflect.ValueOf(uint(val)), nil
- case reflect.Uint8:
- val, ok := tval.(int64)
- if !ok {
- return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
+
+ return val.Convert(mtype), nil
+ case reflect.String:
+ val := reflect.ValueOf(tval)
+ // stupidly, int64 is convertible to string. So special case this.
+ if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
}
- return reflect.ValueOf(uint8(val)), nil
- case reflect.Uint16:
- val, ok := tval.(int64)
- if !ok {
- return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
+
+ return val.Convert(mtype), nil
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ val := reflect.ValueOf(tval)
+ if !val.Type().ConvertibleTo(mtype) {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
}
- return reflect.ValueOf(uint16(val)), nil
- case reflect.Uint32:
- val, ok := tval.(int64)
- if !ok {
- return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
+ if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Int()) {
+ return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String())
}
- return reflect.ValueOf(uint32(val)), nil
- case reflect.Uint64:
- val, ok := tval.(int64)
- if !ok {
- return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
+
+ return val.Convert(mtype), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ val := reflect.ValueOf(tval)
+ if !val.Type().ConvertibleTo(mtype) {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
}
- return reflect.ValueOf(uint64(val)), nil
- case reflect.Float32:
- val, ok := tval.(float64)
- if !ok {
- return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to float", tval, tval)
+ if val.Int() < 0 {
+ return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String())
}
- return reflect.ValueOf(float32(val)), nil
- case reflect.Float64:
- val, ok := tval.(float64)
- if !ok {
- return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to float", tval, tval)
+ if reflect.Indirect(reflect.New(mtype)).OverflowUint(uint64(val.Int())) {
+ return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String())
}
- return reflect.ValueOf(val), nil
- case reflect.String:
- val, ok := tval.(string)
- if !ok {
- return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to string", tval, tval)
+
+ return val.Convert(mtype), nil
+ case reflect.Float32, reflect.Float64:
+ val := reflect.ValueOf(tval)
+ if !val.Type().ConvertibleTo(mtype) {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
}
- return reflect.ValueOf(val), nil
- case reflect.Struct:
- val, ok := tval.(time.Time)
- if !ok {
- return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to time", tval, tval)
+ if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Float()) {
+ return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String())
}
- return reflect.ValueOf(val), nil
+
+ return val.Convert(mtype), nil
default:
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind())
}
diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go
index 0f2ab7a33..2d27599a9 100644
--- a/vendor/github.com/pelletier/go-toml/parser.go
+++ b/vendor/github.com/pelletier/go-toml/parser.go
@@ -5,6 +5,7 @@ package toml
import (
"errors"
"fmt"
+ "math"
"reflect"
"regexp"
"strconv"
@@ -110,7 +111,7 @@ func (p *tomlParser) parseGroupArray() tomlParserStateFn {
newTree := newTree()
newTree.position = startToken.Position
array = append(array, newTree)
- p.tree.SetPath(p.currentTable, "", false, array)
+ p.tree.SetPath(p.currentTable, array)
// remove all keys that were children of this table array
prefix := key.val + "."
@@ -185,10 +186,7 @@ func (p *tomlParser) parseAssign() tomlParserStateFn {
}
// assign value to the found table
- keyVals, err := parseKey(key.val)
- if err != nil {
- p.raiseError(key, "%s", err)
- }
+ keyVals := []string{key.val}
if len(keyVals) != 1 {
p.raiseError(key, "Invalid key")
}
@@ -246,6 +244,13 @@ func (p *tomlParser) parseRvalue() interface{} {
return true
case tokenFalse:
return false
+ case tokenInf:
+ if tok.val[0] == '-' {
+ return math.Inf(-1)
+ }
+ return math.Inf(1)
+ case tokenNan:
+ return math.NaN()
case tokenInteger:
cleanedVal := cleanupNumberToken(tok.val)
var err error
@@ -340,7 +345,7 @@ Loop:
key := p.getToken()
p.assume(tokenEqual)
value := p.parseRvalue()
- tree.Set(key.val, "", false, value)
+ tree.Set(key.val, value)
case tokenComma:
if previous == nil {
p.raiseError(follow, "inline table cannot start with a comma")
@@ -350,7 +355,7 @@ Loop:
}
p.getToken()
default:
- p.raiseError(follow, "unexpected token type in inline table: %s", follow.typ.String())
+ p.raiseError(follow, "unexpected token type in inline table: %s", follow.String())
}
previous = follow
}
diff --git a/vendor/github.com/pelletier/go-toml/parser_test.go b/vendor/github.com/pelletier/go-toml/parser_test.go
index 6c8eec6a3..ca29c442e 100644
--- a/vendor/github.com/pelletier/go-toml/parser_test.go
+++ b/vendor/github.com/pelletier/go-toml/parser_test.go
@@ -2,6 +2,7 @@ package toml
import (
"fmt"
+ "math"
"reflect"
"testing"
"time"
@@ -46,7 +47,7 @@ func assertTree(t *testing.T, tree *Tree, err error, ref map[string]interface{})
func TestCreateSubTree(t *testing.T) {
tree := newTree()
tree.createSubTree([]string{"a", "b", "c"}, Position{})
- tree.Set("a.b.c", "", false, 42)
+ tree.Set("a.b.c", 42)
if tree.Get("a.b.c") != 42 {
t.Fail()
}
@@ -72,6 +73,17 @@ func TestNumberInKey(t *testing.T) {
})
}
+func TestIncorrectKeyExtraSquareBracket(t *testing.T) {
+ _, err := Load(`[a]b]
+zyx = 42`)
+ if err == nil {
+ t.Error("Error should have been returned.")
+ }
+ if err.Error() != "(1, 4): unexpected token" {
+ t.Error("Bad error message:", err.Error())
+ }
+}
+
func TestSimpleNumbers(t *testing.T) {
tree, err := Load("a = +42\nb = -21\nc = +4.2\nd = -2.1")
assertTree(t, tree, err, map[string]interface{}{
@@ -82,6 +94,25 @@ func TestSimpleNumbers(t *testing.T) {
})
}
+func TestSpecialFloats(t *testing.T) {
+ tree, err := Load(`
+normalinf = inf
+plusinf = +inf
+minusinf = -inf
+normalnan = nan
+plusnan = +nan
+minusnan = -nan
+`)
+ assertTree(t, tree, err, map[string]interface{}{
+ "normalinf": math.Inf(1),
+ "plusinf": math.Inf(1),
+ "minusinf": math.Inf(-1),
+ "normalnan": math.NaN(),
+ "plusnan": math.NaN(),
+ "minusnan": math.NaN(),
+ })
+}
+
func TestHexIntegers(t *testing.T) {
tree, err := Load(`a = 0xDEADBEEF`)
assertTree(t, tree, err, map[string]interface{}{"a": int64(3735928559)})
@@ -208,6 +239,36 @@ func TestSpaceKey(t *testing.T) {
})
}
+func TestDoubleQuotedKey(t *testing.T) {
+ tree, err := Load(`
+ "key" = "a"
+ "\t" = "b"
+ "\U0001F914" = "c"
+ "\u2764" = "d"
+ `)
+ assertTree(t, tree, err, map[string]interface{}{
+ "key": "a",
+ "\t": "b",
+ "\U0001F914": "c",
+ "\u2764": "d",
+ })
+}
+
+func TestSingleQuotedKey(t *testing.T) {
+ tree, err := Load(`
+ 'key' = "a"
+ '\t' = "b"
+ '\U0001F914' = "c"
+ '\u2764' = "d"
+ `)
+ assertTree(t, tree, err, map[string]interface{}{
+ `key`: "a",
+ `\t`: "b",
+ `\U0001F914`: "c",
+ `\u2764`: "d",
+ })
+}
+
func TestStringEscapables(t *testing.T) {
tree, err := Load("a = \"a \\n b\"")
assertTree(t, tree, err, map[string]interface{}{
diff --git a/vendor/github.com/pelletier/go-toml/test.sh b/vendor/github.com/pelletier/go-toml/test.sh
index 91a889670..a70a8b022 100755
--- a/vendor/github.com/pelletier/go-toml/test.sh
+++ b/vendor/github.com/pelletier/go-toml/test.sh
@@ -1,6 +1,7 @@
#!/bin/bash
# fail out of the script if anything here fails
set -e
+set -o pipefail
# set the path to the present working directory
export GOPATH=`pwd`
diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go
index 5581fe0bc..1a9081346 100644
--- a/vendor/github.com/pelletier/go-toml/token.go
+++ b/vendor/github.com/pelletier/go-toml/token.go
@@ -23,6 +23,8 @@ const (
tokenTrue
tokenFalse
tokenFloat
+ tokenInf
+ tokenNan
tokenEqual
tokenLeftBracket
tokenRightBracket
@@ -55,6 +57,8 @@ var tokenTypeNames = []string{
"True",
"False",
"Float",
+ "Inf",
+ "NaN",
"=",
"[",
"]",
diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go
index c3e324374..05493a444 100644
--- a/vendor/github.com/pelletier/go-toml/toml.go
+++ b/vendor/github.com/pelletier/go-toml/toml.go
@@ -71,18 +71,15 @@ func (t *Tree) Keys() []string {
}
// Get the value at key in the Tree.
-// Key is a dot-separated path (e.g. a.b.c).
+// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings.
+// If you need to retrieve non-bare keys, use GetPath.
// Returns nil if the path does not exist in the tree.
// If keys is of length zero, the current tree is returned.
func (t *Tree) Get(key string) interface{} {
if key == "" {
return t
}
- comps, err := parseKey(key)
- if err != nil {
- return nil
- }
- return t.GetPath(comps)
+ return t.GetPath(strings.Split(key, "."))
}
// GetPath returns the element in the tree indicated by 'keys'.
@@ -181,14 +178,26 @@ func (t *Tree) GetDefault(key string, def interface{}) interface{} {
// Set an element in the tree.
// Key is a dot-separated path (e.g. a.b.c).
// Creates all necessary intermediate trees, if needed.
-func (t *Tree) Set(key string, comment string, commented bool, value interface{}) {
- t.SetPath(strings.Split(key, "."), comment, commented, value)
+func (t *Tree) Set(key string, value interface{}) {
+ t.SetWithComment(key, "", false, value)
+}
+
+// SetWithComment is the same as Set, but allows you to provide comment
+// information to the key, that will be reused by Marshal().
+func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) {
+ t.SetPathWithComment(strings.Split(key, "."), comment, commented, value)
}
// SetPath sets an element in the tree.
// Keys is an array of path elements (e.g. {"a","b","c"}).
// Creates all necessary intermediate trees, if needed.
-func (t *Tree) SetPath(keys []string, comment string, commented bool, value interface{}) {
+func (t *Tree) SetPath(keys []string, value interface{}) {
+ t.SetPathWithComment(keys, "", false, value)
+}
+
+// SetPathWithComment is the same as SetPath, but allows you to provide comment
+// information to the key, that will be reused by Marshal().
+func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) {
subtree := t
for _, intermediateKey := range keys[:len(keys)-1] {
nextTree, exists := subtree.values[intermediateKey]
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go
index f5ef124f0..d322a9764 100644
--- a/vendor/github.com/pelletier/go-toml/tomltree_write.go
+++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go
@@ -54,9 +54,9 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen
// Ensure a round float does contain a decimal point. Otherwise feeding
// the output back to the parser would convert to an integer.
if math.Trunc(value) == value {
- return strconv.FormatFloat(value, 'f', 1, 32), nil
+ return strings.ToLower(strconv.FormatFloat(value, 'f', 1, 32)), nil
}
- return strconv.FormatFloat(value, 'f', -1, 32), nil
+ return strings.ToLower(strconv.FormatFloat(value, 'f', -1, 32)), nil
case string:
return "\"" + encodeTomlString(value) + "\"", nil
case []byte:
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write_test.go b/vendor/github.com/pelletier/go-toml/tomltree_write_test.go
index 5ea59bc1a..206203b88 100644
--- a/vendor/github.com/pelletier/go-toml/tomltree_write_test.go
+++ b/vendor/github.com/pelletier/go-toml/tomltree_write_test.go
@@ -309,6 +309,24 @@ func TestTreeWriteToFloat(t *testing.T) {
}
}
+func TestTreeWriteToSpecialFloat(t *testing.T) {
+ expected := `a = +inf
+b = -inf
+c = nan`
+
+ tree, err := Load(expected)
+ if err != nil {
+ t.Fatal(err)
+ }
+ str, err := tree.ToTomlString()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if strings.TrimSpace(str) != strings.TrimSpace(expected) {
+ t.Fatalf("Expected:\n%s\nGot:\n%s", expected, str)
+ }
+}
+
func BenchmarkTreeToTomlString(b *testing.B) {
toml, err := Load(sampleHard)
if err != nil {
diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml
index 7ca408d1b..588ceca18 100644
--- a/vendor/github.com/pkg/errors/.travis.yml
+++ b/vendor/github.com/pkg/errors/.travis.yml
@@ -1,12 +1,10 @@
language: go
go_import_path: github.com/pkg/errors
go:
- - 1.4.x
- - 1.5.x
- - 1.6.x
- - 1.7.x
- - 1.8.x
- - 1.9.x
+ - 1.4.3
+ - 1.5.4
+ - 1.6.2
+ - 1.7.1
- tip
script:
diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md
index 6483ba2af..273db3c98 100644
--- a/vendor/github.com/pkg/errors/README.md
+++ b/vendor/github.com/pkg/errors/README.md
@@ -1,4 +1,4 @@
-# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge)
+# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors)
Package errors provides simple error handling primitives.
@@ -47,6 +47,6 @@ We welcome pull requests, bug fixes and issue reports. With that said, the bar f
Before proposing a change, please discuss your change by raising an issue.
-## License
+## Licence
BSD-2-Clause
diff --git a/vendor/github.com/pkg/errors/bench_test.go b/vendor/github.com/pkg/errors/bench_test.go
index 903b5f2d4..0416a3cbb 100644
--- a/vendor/github.com/pkg/errors/bench_test.go
+++ b/vendor/github.com/pkg/errors/bench_test.go
@@ -15,7 +15,6 @@ func noErrors(at, depth int) error {
}
return noErrors(at+1, depth)
}
-
func yesErrors(at, depth int) error {
if at >= depth {
return New("ye error")
@@ -23,11 +22,8 @@ func yesErrors(at, depth int) error {
return yesErrors(at+1, depth)
}
-// GlobalE is an exported global to store the result of benchmark results,
-// preventing the compiler from optimising the benchmark functions away.
-var GlobalE error
-
func BenchmarkErrors(b *testing.B) {
+ var toperr error
type run struct {
stack int
std bool
@@ -57,7 +53,7 @@ func BenchmarkErrors(b *testing.B) {
err = f(0, r.stack)
}
b.StopTimer()
- GlobalE = err
+ toperr = err
})
}
}
diff --git a/vendor/github.com/pkg/errors/errors_test.go b/vendor/github.com/pkg/errors/errors_test.go
index c4e6eef64..1d8c63558 100644
--- a/vendor/github.com/pkg/errors/errors_test.go
+++ b/vendor/github.com/pkg/errors/errors_test.go
@@ -196,6 +196,7 @@ func TestWithMessage(t *testing.T) {
t.Errorf("WithMessage(%v, %q): got: %q, want %q", tt.err, tt.message, got, tt.want)
}
}
+
}
// errors.New, etc values are not expected to be compared by value
diff --git a/vendor/github.com/pkg/errors/format_test.go b/vendor/github.com/pkg/errors/format_test.go
index c2eef5f04..15fd7d89d 100644
--- a/vendor/github.com/pkg/errors/format_test.go
+++ b/vendor/github.com/pkg/errors/format_test.go
@@ -491,7 +491,7 @@ type wrapper struct {
want []string
}
-func prettyBlocks(blocks []string) string {
+func prettyBlocks(blocks []string, prefix ...string) string {
var out []string
for _, b := range blocks {
diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go
index b485761a7..6b1f2891a 100644
--- a/vendor/github.com/pkg/errors/stack.go
+++ b/vendor/github.com/pkg/errors/stack.go
@@ -46,8 +46,7 @@ func (f Frame) line() int {
//
// Format accepts flags that alter the printing of some verbs, as follows:
//
-// %+s function name and path of source file relative to the compile time
-// GOPATH separated by \n\t (<funcname>\n\t<path>)
+// %+s path of source file relative to the compile time GOPATH
// %+v equivalent to %+s:%d
func (f Frame) Format(s fmt.State, verb rune) {
switch verb {
@@ -80,14 +79,6 @@ func (f Frame) Format(s fmt.State, verb rune) {
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
type StackTrace []Frame
-// Format formats the stack of Frames according to the fmt.Formatter interface.
-//
-// %s lists source files for each Frame in the stack
-// %v lists the source file and line number for each Frame in the stack
-//
-// Format accepts flags that alter the printing of some verbs, as follows:
-//
-// %+v Prints filename, function, and line number for each Frame in the stack.
func (st StackTrace) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
index 64cc40fe1..003e99fad 100644
--- a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
+++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
@@ -559,10 +559,14 @@ type UnifiedDiff struct {
func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
buf := bufio.NewWriter(writer)
defer buf.Flush()
- w := func(format string, args ...interface{}) error {
+ wf := func(format string, args ...interface{}) error {
_, err := buf.WriteString(fmt.Sprintf(format, args...))
return err
}
+ ws := func(s string) error {
+ _, err := buf.WriteString(s)
+ return err
+ }
if len(diff.Eol) == 0 {
diff.Eol = "\n"
@@ -581,26 +585,28 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
if len(diff.ToDate) > 0 {
toDate = "\t" + diff.ToDate
}
- err := w("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
- if err != nil {
- return err
- }
- err = w("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
- if err != nil {
- return err
+ if diff.FromFile != "" || diff.ToFile != "" {
+ err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
+ if err != nil {
+ return err
+ }
}
}
first, last := g[0], g[len(g)-1]
range1 := formatRangeUnified(first.I1, last.I2)
range2 := formatRangeUnified(first.J1, last.J2)
- if err := w("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
+ if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
return err
}
for _, c := range g {
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
if c.Tag == 'e' {
for _, line := range diff.A[i1:i2] {
- if err := w(" " + line); err != nil {
+ if err := ws(" " + line); err != nil {
return err
}
}
@@ -608,14 +614,14 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
}
if c.Tag == 'r' || c.Tag == 'd' {
for _, line := range diff.A[i1:i2] {
- if err := w("-" + line); err != nil {
+ if err := ws("-" + line); err != nil {
return err
}
}
}
if c.Tag == 'r' || c.Tag == 'i' {
for _, line := range diff.B[j1:j2] {
- if err := w("+" + line); err != nil {
+ if err := ws("+" + line); err != nil {
return err
}
}
@@ -669,12 +675,18 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
buf := bufio.NewWriter(writer)
defer buf.Flush()
var diffErr error
- w := func(format string, args ...interface{}) {
+ wf := func(format string, args ...interface{}) {
_, err := buf.WriteString(fmt.Sprintf(format, args...))
if diffErr == nil && err != nil {
diffErr = err
}
}
+ ws := func(s string) {
+ _, err := buf.WriteString(s)
+ if diffErr == nil && err != nil {
+ diffErr = err
+ }
+ }
if len(diff.Eol) == 0 {
diff.Eol = "\n"
@@ -700,15 +712,17 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
if len(diff.ToDate) > 0 {
toDate = "\t" + diff.ToDate
}
- w("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
- w("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
+ if diff.FromFile != "" || diff.ToFile != "" {
+ wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
+ wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
+ }
}
first, last := g[0], g[len(g)-1]
- w("***************" + diff.Eol)
+ ws("***************" + diff.Eol)
range1 := formatRangeContext(first.I1, last.I2)
- w("*** %s ****%s", range1, diff.Eol)
+ wf("*** %s ****%s", range1, diff.Eol)
for _, c := range g {
if c.Tag == 'r' || c.Tag == 'd' {
for _, cc := range g {
@@ -716,7 +730,7 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
continue
}
for _, line := range diff.A[cc.I1:cc.I2] {
- w(prefix[cc.Tag] + line)
+ ws(prefix[cc.Tag] + line)
}
}
break
@@ -724,7 +738,7 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
}
range2 := formatRangeContext(first.J1, last.J2)
- w("--- %s ----%s", range2, diff.Eol)
+ wf("--- %s ----%s", range2, diff.Eol)
for _, c := range g {
if c.Tag == 'r' || c.Tag == 'i' {
for _, cc := range g {
@@ -732,7 +746,7 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
continue
}
for _, line := range diff.B[cc.J1:cc.J2] {
- w(prefix[cc.Tag] + line)
+ ws(prefix[cc.Tag] + line)
}
}
break
diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go
index 94670bea3..d72511962 100644
--- a/vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go
+++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go
@@ -102,11 +102,12 @@ group
}
}
-func ExampleGetUnifiedDiffString() {
+func ExampleGetUnifiedDiffCode() {
a := `one
two
three
-four`
+four
+fmt.Printf("%s,%T",a,b)`
b := `zero
one
three
@@ -121,16 +122,54 @@ four`
Context: 3,
}
result, _ := GetUnifiedDiffString(diff)
- fmt.Printf(strings.Replace(result, "\t", " ", -1))
+ fmt.Println(strings.Replace(result, "\t", " ", -1))
// Output:
// --- Original 2005-01-26 23:30:50
// +++ Current 2010-04-02 10:20:52
- // @@ -1,4 +1,4 @@
+ // @@ -1,5 +1,4 @@
// +zero
// one
// -two
// three
// four
+ // -fmt.Printf("%s,%T",a,b)
+}
+
+func ExampleGetContextDiffCode() {
+ a := `one
+two
+three
+four
+fmt.Printf("%s,%T",a,b)`
+ b := `zero
+one
+tree
+four`
+ diff := ContextDiff{
+ A: SplitLines(a),
+ B: SplitLines(b),
+ FromFile: "Original",
+ ToFile: "Current",
+ Context: 3,
+ Eol: "\n",
+ }
+ result, _ := GetContextDiffString(diff)
+ fmt.Print(strings.Replace(result, "\t", " ", -1))
+ // Output:
+ // *** Original
+ // --- Current
+ // ***************
+ // *** 1,5 ****
+ // one
+ // ! two
+ // ! three
+ // four
+ // - fmt.Printf("%s,%T",a,b)
+ // --- 1,4 ----
+ // + zero
+ // one
+ // ! tree
+ // four
}
func ExampleGetContextDiffString() {
@@ -318,6 +357,41 @@ func TestOutputFormatNoTrailingTabOnEmptyFiledate(t *testing.T) {
assertEqual(t, SplitLines(cd)[:2], []string{"*** Original\n", "--- Current\n"})
}
+func TestOmitFilenames(t *testing.T) {
+ diff := UnifiedDiff{
+ A: SplitLines("o\nn\ne\n"),
+ B: SplitLines("t\nw\no\n"),
+ Eol: "\n",
+ }
+ ud, err := GetUnifiedDiffString(diff)
+ assertEqual(t, err, nil)
+ assertEqual(t, SplitLines(ud), []string{
+ "@@ -0,0 +1,2 @@\n",
+ "+t\n",
+ "+w\n",
+ "@@ -2,2 +3,0 @@\n",
+ "-n\n",
+ "-e\n",
+ "\n",
+ })
+
+ cd, err := GetContextDiffString(ContextDiff(diff))
+ assertEqual(t, err, nil)
+ assertEqual(t, SplitLines(cd), []string{
+ "***************\n",
+ "*** 0 ****\n",
+ "--- 1,2 ----\n",
+ "+ t\n",
+ "+ w\n",
+ "***************\n",
+ "*** 2,3 ****\n",
+ "- n\n",
+ "- e\n",
+ "--- 3 ----\n",
+ "\n",
+ })
+}
+
func TestSplitLines(t *testing.T) {
allTests := []struct {
input string
diff --git a/vendor/github.com/prometheus/client_golang/.gitignore b/vendor/github.com/prometheus/client_golang/.gitignore
index f6fc2e8eb..5725b80fd 100644
--- a/vendor/github.com/prometheus/client_golang/.gitignore
+++ b/vendor/github.com/prometheus/client_golang/.gitignore
@@ -7,6 +7,10 @@
_obj
_test
+# Examples
+/examples/simple/simple
+/examples/random/random
+
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
diff --git a/vendor/github.com/prometheus/client_golang/.travis.yml b/vendor/github.com/prometheus/client_golang/.travis.yml
index d83f31a59..e9bca4ec7 100644
--- a/vendor/github.com/prometheus/client_golang/.travis.yml
+++ b/vendor/github.com/prometheus/client_golang/.travis.yml
@@ -2,8 +2,9 @@ sudo: false
language: go
go:
- - 1.5.4
- - 1.6.2
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
script:
- - go test -short ./...
+ - go test -short ./...
diff --git a/vendor/github.com/prometheus/client_golang/AUTHORS.md b/vendor/github.com/prometheus/client_golang/AUTHORS.md
deleted file mode 100644
index c5275d5ab..000000000
--- a/vendor/github.com/prometheus/client_golang/AUTHORS.md
+++ /dev/null
@@ -1,18 +0,0 @@
-The Prometheus project was started by Matt T. Proud (emeritus) and
-Julius Volz in 2012.
-
-Maintainers of this repository:
-
-* Björn Rabenstein <beorn@soundcloud.com>
-
-The following individuals have contributed code to this repository
-(listed in alphabetical order):
-
-* Bernerd Schaefer <bj.schaefer@gmail.com>
-* Björn Rabenstein <beorn@soundcloud.com>
-* Daniel Bornkessel <daniel@soundcloud.com>
-* Jeff Younker <jeff@drinktomi.com>
-* Julius Volz <julius.volz@gmail.com>
-* Matt T. Proud <matt.proud@gmail.com>
-* Tobias Schmidt <ts@soundcloud.com>
-
diff --git a/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md b/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md
index 5705f0fbe..40503edbf 100644
--- a/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md
+++ b/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md
@@ -2,9 +2,9 @@
Prometheus uses GitHub to manage reviews of pull requests.
-* If you have a trivial fix or improvement, go ahead and create a pull
- request, addressing (with `@...`) one or more of the maintainers
- (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+* If you have a trivial fix or improvement, go ahead and create a pull request,
+ addressing (with `@...`) the maintainer of this repository (see
+ [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
* If you plan to do something more involved, first discuss your ideas
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
diff --git a/vendor/github.com/prometheus/client_golang/MAINTAINERS.md b/vendor/github.com/prometheus/client_golang/MAINTAINERS.md
new file mode 100644
index 000000000..3ede55fe1
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/MAINTAINERS.md
@@ -0,0 +1 @@
+* Björn Rabenstein <beorn@soundcloud.com>
diff --git a/vendor/github.com/prometheus/client_golang/README.md b/vendor/github.com/prometheus/client_golang/README.md
index 557eacf5a..0eb0df1df 100644
--- a/vendor/github.com/prometheus/client_golang/README.md
+++ b/vendor/github.com/prometheus/client_golang/README.md
@@ -1,12 +1,15 @@
# Prometheus Go client library
[![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang)
+[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/client_golang)](https://goreportcard.com/report/github.com/prometheus/client_golang)
This is the [Go](http://golang.org) client library for
[Prometheus](http://prometheus.io). It has two separate parts, one for
instrumenting application code, and one for creating clients that talk to the
Prometheus HTTP API.
+__This library requires Go1.7 or later.__
+
## Instrumenting applications
[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus)
@@ -29,7 +32,8 @@ The
[`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus)
contains the client for the
[Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you
-to write Go applications that query time series data from a Prometheus server.
+to write Go applications that query time series data from a Prometheus
+server. It is still in alpha stage.
## Where is `model`, `extraction`, and `text`?
diff --git a/vendor/github.com/prometheus/client_golang/api/client.go b/vendor/github.com/prometheus/client_golang/api/client.go
new file mode 100644
index 000000000..bf2672466
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/api/client.go
@@ -0,0 +1,131 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.7
+
+// Package api provides clients for the HTTP APIs.
+package api
+
+import (
+ "context"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "path"
+ "strings"
+ "time"
+)
+
+// DefaultRoundTripper is used if no RoundTripper is set in Config.
+var DefaultRoundTripper http.RoundTripper = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ TLSHandshakeTimeout: 10 * time.Second,
+}
+
+// Config defines configuration parameters for a new client.
+type Config struct {
+ // The address of the Prometheus to connect to.
+ Address string
+
+ // RoundTripper is used by the Client to drive HTTP requests. If not
+ // provided, DefaultRoundTripper will be used.
+ RoundTripper http.RoundTripper
+}
+
+func (cfg *Config) roundTripper() http.RoundTripper {
+ if cfg.RoundTripper == nil {
+ return DefaultRoundTripper
+ }
+ return cfg.RoundTripper
+}
+
+// Client is the interface for an API client.
+type Client interface {
+ URL(ep string, args map[string]string) *url.URL
+ Do(context.Context, *http.Request) (*http.Response, []byte, error)
+}
+
+// NewClient returns a new Client.
+//
+// It is safe to use the returned Client from multiple goroutines.
+func NewClient(cfg Config) (Client, error) {
+ u, err := url.Parse(cfg.Address)
+ if err != nil {
+ return nil, err
+ }
+ u.Path = strings.TrimRight(u.Path, "/")
+
+ return &httpClient{
+ endpoint: u,
+ client: http.Client{Transport: cfg.roundTripper()},
+ }, nil
+}
+
+type httpClient struct {
+ endpoint *url.URL
+ client http.Client
+}
+
+func (c *httpClient) URL(ep string, args map[string]string) *url.URL {
+ p := path.Join(c.endpoint.Path, ep)
+
+ for arg, val := range args {
+ arg = ":" + arg
+ p = strings.Replace(p, arg, val, -1)
+ }
+
+ u := *c.endpoint
+ u.Path = p
+
+ return &u
+}
+
+func (c *httpClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
+ if ctx != nil {
+ req = req.WithContext(ctx)
+ }
+ resp, err := c.client.Do(req)
+ defer func() {
+ if resp != nil {
+ resp.Body.Close()
+ }
+ }()
+
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var body []byte
+ done := make(chan struct{})
+ go func() {
+ body, err = ioutil.ReadAll(resp.Body)
+ close(done)
+ }()
+
+ select {
+ case <-ctx.Done():
+ err = resp.Body.Close()
+ <-done
+ if err == nil {
+ err = ctx.Err()
+ }
+ case <-done:
+ }
+
+ return resp, body, err
+}
diff --git a/vendor/github.com/prometheus/client_golang/api/client_test.go b/vendor/github.com/prometheus/client_golang/api/client_test.go
new file mode 100644
index 000000000..53226d7d2
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/api/client_test.go
@@ -0,0 +1,115 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.7
+
+package api
+
+import (
+ "net/http"
+ "net/url"
+ "testing"
+)
+
+func TestConfig(t *testing.T) {
+ c := Config{}
+ if c.roundTripper() != DefaultRoundTripper {
+ t.Fatalf("expected default roundtripper for nil RoundTripper field")
+ }
+}
+
+func TestClientURL(t *testing.T) {
+ tests := []struct {
+ address string
+ endpoint string
+ args map[string]string
+ expected string
+ }{
+ {
+ address: "http://localhost:9090",
+ endpoint: "/test",
+ expected: "http://localhost:9090/test",
+ },
+ {
+ address: "http://localhost",
+ endpoint: "/test",
+ expected: "http://localhost/test",
+ },
+ {
+ address: "http://localhost:9090",
+ endpoint: "test",
+ expected: "http://localhost:9090/test",
+ },
+ {
+ address: "http://localhost:9090/prefix",
+ endpoint: "/test",
+ expected: "http://localhost:9090/prefix/test",
+ },
+ {
+ address: "https://localhost:9090/",
+ endpoint: "/test/",
+ expected: "https://localhost:9090/test",
+ },
+ {
+ address: "http://localhost:9090",
+ endpoint: "/test/:param",
+ args: map[string]string{
+ "param": "content",
+ },
+ expected: "http://localhost:9090/test/content",
+ },
+ {
+ address: "http://localhost:9090",
+ endpoint: "/test/:param/more/:param",
+ args: map[string]string{
+ "param": "content",
+ },
+ expected: "http://localhost:9090/test/content/more/content",
+ },
+ {
+ address: "http://localhost:9090",
+ endpoint: "/test/:param/more/:foo",
+ args: map[string]string{
+ "param": "content",
+ "foo": "bar",
+ },
+ expected: "http://localhost:9090/test/content/more/bar",
+ },
+ {
+ address: "http://localhost:9090",
+ endpoint: "/test/:param",
+ args: map[string]string{
+ "nonexistant": "content",
+ },
+ expected: "http://localhost:9090/test/:param",
+ },
+ }
+
+ for _, test := range tests {
+ ep, err := url.Parse(test.address)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ hclient := &httpClient{
+ endpoint: ep,
+ client: http.Client{Transport: DefaultRoundTripper},
+ }
+
+ u := hclient.URL(test.endpoint, test.args)
+ if u.String() != test.expected {
+ t.Errorf("unexpected result: got %s, want %s", u, test.expected)
+ continue
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/api/prometheus/api.go b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go
index 3028d741d..cb07022bc 100644
--- a/vendor/github.com/prometheus/client_golang/api/prometheus/api.go
+++ b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -11,41 +11,40 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package prometheus provides bindings to the Prometheus HTTP API:
+// +build go1.7
+
+// Package v1 provides bindings to the Prometheus HTTP API v1:
// http://prometheus.io/docs/querying/api/
-package prometheus
+package v1
import (
+ "context"
"encoding/json"
"fmt"
- "io/ioutil"
- "net"
"net/http"
- "net/url"
- "path"
"strconv"
- "strings"
"time"
+ "github.com/prometheus/client_golang/api"
"github.com/prometheus/common/model"
- "golang.org/x/net/context"
- "golang.org/x/net/context/ctxhttp"
)
const (
statusAPIError = 422
- apiPrefix = "/api/v1"
- epQuery = "/query"
- epQueryRange = "/query_range"
- epLabelValues = "/label/:name/values"
- epSeries = "/series"
+ apiPrefix = "/api/v1"
+
+ epQuery = apiPrefix + "/query"
+ epQueryRange = apiPrefix + "/query_range"
+ epLabelValues = apiPrefix + "/label/:name/values"
+ epSeries = apiPrefix + "/series"
)
+// ErrorType models the different API error types.
type ErrorType string
+// Possible values for ErrorType.
const (
- // The different API error types.
ErrBadData ErrorType = "bad_data"
ErrTimeout = "timeout"
ErrCanceled = "canceled"
@@ -63,166 +62,6 @@ func (e *Error) Error() string {
return fmt.Sprintf("%s: %s", e.Type, e.Msg)
}
-// CancelableTransport is like net.Transport but provides
-// per-request cancelation functionality.
-type CancelableTransport interface {
- http.RoundTripper
- CancelRequest(req *http.Request)
-}
-
-var DefaultTransport CancelableTransport = &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- Dial: (&net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- }).Dial,
- TLSHandshakeTimeout: 10 * time.Second,
-}
-
-// Config defines configuration parameters for a new client.
-type Config struct {
- // The address of the Prometheus to connect to.
- Address string
-
- // Transport is used by the Client to drive HTTP requests. If not
- // provided, DefaultTransport will be used.
- Transport CancelableTransport
-}
-
-func (cfg *Config) transport() CancelableTransport {
- if cfg.Transport == nil {
- return DefaultTransport
- }
- return cfg.Transport
-}
-
-type Client interface {
- url(ep string, args map[string]string) *url.URL
- do(context.Context, *http.Request) (*http.Response, []byte, error)
-}
-
-// New returns a new Client.
-//
-// It is safe to use the returned Client from multiple goroutines.
-func New(cfg Config) (Client, error) {
- u, err := url.Parse(cfg.Address)
- if err != nil {
- return nil, err
- }
- u.Path = strings.TrimRight(u.Path, "/") + apiPrefix
-
- return &httpClient{
- endpoint: u,
- transport: cfg.transport(),
- }, nil
-}
-
-type httpClient struct {
- endpoint *url.URL
- transport CancelableTransport
-}
-
-func (c *httpClient) url(ep string, args map[string]string) *url.URL {
- p := path.Join(c.endpoint.Path, ep)
-
- for arg, val := range args {
- arg = ":" + arg
- p = strings.Replace(p, arg, val, -1)
- }
-
- u := *c.endpoint
- u.Path = p
-
- return &u
-}
-
-func (c *httpClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
- resp, err := ctxhttp.Do(ctx, &http.Client{Transport: c.transport}, req)
-
- defer func() {
- if resp != nil {
- resp.Body.Close()
- }
- }()
-
- if err != nil {
- return nil, nil, err
- }
-
- var body []byte
- done := make(chan struct{})
- go func() {
- body, err = ioutil.ReadAll(resp.Body)
- close(done)
- }()
-
- select {
- case <-ctx.Done():
- err = resp.Body.Close()
- <-done
- if err == nil {
- err = ctx.Err()
- }
- case <-done:
- }
-
- return resp, body, err
-}
-
-// apiClient wraps a regular client and processes successful API responses.
-// Successful also includes responses that errored at the API level.
-type apiClient struct {
- Client
-}
-
-type apiResponse struct {
- Status string `json:"status"`
- Data json.RawMessage `json:"data"`
- ErrorType ErrorType `json:"errorType"`
- Error string `json:"error"`
-}
-
-func (c apiClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
- resp, body, err := c.Client.do(ctx, req)
- if err != nil {
- return resp, body, err
- }
-
- code := resp.StatusCode
-
- if code/100 != 2 && code != statusAPIError {
- return resp, body, &Error{
- Type: ErrBadResponse,
- Msg: fmt.Sprintf("bad response code %d", resp.StatusCode),
- }
- }
-
- var result apiResponse
-
- if err = json.Unmarshal(body, &result); err != nil {
- return resp, body, &Error{
- Type: ErrBadResponse,
- Msg: err.Error(),
- }
- }
-
- if (code == statusAPIError) != (result.Status == "error") {
- err = &Error{
- Type: ErrBadResponse,
- Msg: "inconsistent body for response code",
- }
- }
-
- if code == statusAPIError && result.Status == "error" {
- err = &Error{
- Type: result.ErrorType,
- Msg: result.Error,
- }
- }
-
- return resp, []byte(result.Data), err
-}
-
// Range represents a sliced time range.
type Range struct {
// The boundaries of the time range.
@@ -231,6 +70,18 @@ type Range struct {
Step time.Duration
}
+// API provides bindings for Prometheus's v1 API.
+type API interface {
+ // Query performs a query for the given time.
+ Query(ctx context.Context, query string, ts time.Time) (model.Value, error)
+ // QueryRange performs a query for the given range.
+ QueryRange(ctx context.Context, query string, r Range) (model.Value, error)
+ // LabelValues performs a query for the values of the given label.
+ LabelValues(ctx context.Context, label string) (model.LabelValues, error)
+ // Series finds series by label matchers.
+ Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, error)
+}
+
// queryResult contains result data for a query.
type queryResult struct {
Type model.ValueType `json:"resultType"`
@@ -273,37 +124,34 @@ func (qr *queryResult) UnmarshalJSON(b []byte) error {
return err
}
-// QueryAPI provides bindings the Prometheus's query API.
-type QueryAPI interface {
- // Query performs a query for the given time.
- Query(ctx context.Context, query string, ts time.Time) (model.Value, error)
- // Query performs a query for the given range.
- QueryRange(ctx context.Context, query string, r Range) (model.Value, error)
-}
-
-// NewQueryAPI returns a new QueryAPI for the client.
+// NewAPI returns a new API for the client.
//
-// It is safe to use the returned QueryAPI from multiple goroutines.
-func NewQueryAPI(c Client) QueryAPI {
- return &httpQueryAPI{client: apiClient{c}}
+// It is safe to use the returned API from multiple goroutines.
+func NewAPI(c api.Client) API {
+ return &httpAPI{client: apiClient{c}}
}
-type httpQueryAPI struct {
- client Client
+type httpAPI struct {
+ client api.Client
}
-func (h *httpQueryAPI) Query(ctx context.Context, query string, ts time.Time) (model.Value, error) {
- u := h.client.url(epQuery, nil)
+func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time) (model.Value, error) {
+ u := h.client.URL(epQuery, nil)
q := u.Query()
q.Set("query", query)
- q.Set("time", ts.Format(time.RFC3339Nano))
+ if !ts.IsZero() {
+ q.Set("time", ts.Format(time.RFC3339Nano))
+ }
u.RawQuery = q.Encode()
- req, _ := http.NewRequest("GET", u.String(), nil)
+ req, err := http.NewRequest("GET", u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
- _, body, err := h.client.do(ctx, req)
+ _, body, err := h.client.Do(ctx, req)
if err != nil {
return nil, err
}
@@ -314,8 +162,8 @@ func (h *httpQueryAPI) Query(ctx context.Context, query string, ts time.Time) (m
return model.Value(qres.v), err
}
-func (h *httpQueryAPI) QueryRange(ctx context.Context, query string, r Range) (model.Value, error) {
- u := h.client.url(epQueryRange, nil)
+func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range) (model.Value, error) {
+ u := h.client.URL(epQueryRange, nil)
q := u.Query()
var (
@@ -331,9 +179,12 @@ func (h *httpQueryAPI) QueryRange(ctx context.Context, query string, r Range) (m
u.RawQuery = q.Encode()
- req, _ := http.NewRequest("GET", u.String(), nil)
+ req, err := http.NewRequest("GET", u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
- _, body, err := h.client.do(ctx, req)
+ _, body, err := h.client.Do(ctx, req)
if err != nil {
return nil, err
}
@@ -343,3 +194,100 @@ func (h *httpQueryAPI) QueryRange(ctx context.Context, query string, r Range) (m
return model.Value(qres.v), err
}
+
+func (h *httpAPI) LabelValues(ctx context.Context, label string) (model.LabelValues, error) {
+ u := h.client.URL(epLabelValues, map[string]string{"name": label})
+ req, err := http.NewRequest(http.MethodGet, u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+ _, body, err := h.client.Do(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+ var labelValues model.LabelValues
+ err = json.Unmarshal(body, &labelValues)
+ return labelValues, err
+}
+
+func (h *httpAPI) Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, error) {
+ u := h.client.URL(epSeries, nil)
+ q := u.Query()
+
+ for _, m := range matches {
+ q.Add("match[]", m)
+ }
+
+ q.Set("start", startTime.Format(time.RFC3339Nano))
+ q.Set("end", endTime.Format(time.RFC3339Nano))
+
+ u.RawQuery = q.Encode()
+
+ req, err := http.NewRequest("GET", u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ _, body, err := h.client.Do(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+
+ var mset []model.LabelSet
+ err = json.Unmarshal(body, &mset)
+ return mset, err
+}
+
+// apiClient wraps a regular client and processes successful API responses.
+// Successful also includes responses that errored at the API level.
+type apiClient struct {
+ api.Client
+}
+
+type apiResponse struct {
+ Status string `json:"status"`
+ Data json.RawMessage `json:"data"`
+ ErrorType ErrorType `json:"errorType"`
+ Error string `json:"error"`
+}
+
+func (c apiClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
+ resp, body, err := c.Client.Do(ctx, req)
+ if err != nil {
+ return resp, body, err
+ }
+
+ code := resp.StatusCode
+
+ if code/100 != 2 && code != statusAPIError {
+ return resp, body, &Error{
+ Type: ErrBadResponse,
+ Msg: fmt.Sprintf("bad response code %d", resp.StatusCode),
+ }
+ }
+
+ var result apiResponse
+
+ if err = json.Unmarshal(body, &result); err != nil {
+ return resp, body, &Error{
+ Type: ErrBadResponse,
+ Msg: err.Error(),
+ }
+ }
+
+ if (code == statusAPIError) != (result.Status == "error") {
+ err = &Error{
+ Type: ErrBadResponse,
+ Msg: "inconsistent body for response code",
+ }
+ }
+
+ if code == statusAPIError && result.Status == "error" {
+ err = &Error{
+ Type: result.ErrorType,
+ Msg: result.Error,
+ }
+ }
+
+ return resp, []byte(result.Data), err
+}
diff --git a/vendor/github.com/prometheus/client_golang/api/prometheus/api_test.go b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api_test.go
index 87d3e408e..e557d68f9 100644
--- a/vendor/github.com/prometheus/client_golang/api/prometheus/api_test.go
+++ b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -11,118 +11,246 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package prometheus
+// +build go1.7
+
+package v1
import (
+ "context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"reflect"
+ "strings"
"testing"
"time"
"github.com/prometheus/common/model"
- "golang.org/x/net/context"
)
-func TestConfig(t *testing.T) {
- c := Config{}
- if c.transport() != DefaultTransport {
- t.Fatalf("expected default transport for nil Transport field")
+type apiTest struct {
+ do func() (interface{}, error)
+ inErr error
+ inRes interface{}
+
+ reqPath string
+ reqParam url.Values
+ reqMethod string
+ res interface{}
+ err error
+}
+
+type apiTestClient struct {
+ *testing.T
+ curTest apiTest
+}
+
+func (c *apiTestClient) URL(ep string, args map[string]string) *url.URL {
+ path := ep
+ for k, v := range args {
+ path = strings.Replace(path, ":"+k, v, -1)
+ }
+ u := &url.URL{
+ Host: "test:9090",
+ Path: path,
}
+ return u
}
-func TestClientURL(t *testing.T) {
- tests := []struct {
- address string
- endpoint string
- args map[string]string
- expected string
- }{
+func (c *apiTestClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
+
+ test := c.curTest
+
+ if req.URL.Path != test.reqPath {
+ c.Errorf("unexpected request path: want %s, got %s", test.reqPath, req.URL.Path)
+ }
+ if req.Method != test.reqMethod {
+ c.Errorf("unexpected request method: want %s, got %s", test.reqMethod, req.Method)
+ }
+
+ b, err := json.Marshal(test.inRes)
+ if err != nil {
+ c.Fatal(err)
+ }
+
+ resp := &http.Response{}
+ if test.inErr != nil {
+ resp.StatusCode = statusAPIError
+ } else {
+ resp.StatusCode = http.StatusOK
+ }
+
+ return resp, b, test.inErr
+}
+
+func TestAPIs(t *testing.T) {
+
+ testTime := time.Now()
+
+ client := &apiTestClient{T: t}
+
+ queryAPI := &httpAPI{
+ client: client,
+ }
+
+ doQuery := func(q string, ts time.Time) func() (interface{}, error) {
+ return func() (interface{}, error) {
+ return queryAPI.Query(context.Background(), q, ts)
+ }
+ }
+
+ doQueryRange := func(q string, rng Range) func() (interface{}, error) {
+ return func() (interface{}, error) {
+ return queryAPI.QueryRange(context.Background(), q, rng)
+ }
+ }
+
+ doLabelValues := func(label string) func() (interface{}, error) {
+ return func() (interface{}, error) {
+ return queryAPI.LabelValues(context.Background(), label)
+ }
+ }
+
+ doSeries := func(matcher string, startTime time.Time, endTime time.Time) func() (interface{}, error) {
+ return func() (interface{}, error) {
+ return queryAPI.Series(context.Background(), []string{matcher}, startTime, endTime)
+ }
+ }
+
+ queryTests := []apiTest{
{
- address: "http://localhost:9090",
- endpoint: "/test",
- expected: "http://localhost:9090/test",
+ do: doQuery("2", testTime),
+ inRes: &queryResult{
+ Type: model.ValScalar,
+ Result: &model.Scalar{
+ Value: 2,
+ Timestamp: model.TimeFromUnix(testTime.Unix()),
+ },
+ },
+
+ reqMethod: "GET",
+ reqPath: "/api/v1/query",
+ reqParam: url.Values{
+ "query": []string{"2"},
+ "time": []string{testTime.Format(time.RFC3339Nano)},
+ },
+ res: &model.Scalar{
+ Value: 2,
+ Timestamp: model.TimeFromUnix(testTime.Unix()),
+ },
},
{
- address: "http://localhost",
- endpoint: "/test",
- expected: "http://localhost/test",
+ do: doQuery("2", testTime),
+ inErr: fmt.Errorf("some error"),
+
+ reqMethod: "GET",
+ reqPath: "/api/v1/query",
+ reqParam: url.Values{
+ "query": []string{"2"},
+ "time": []string{testTime.Format(time.RFC3339Nano)},
+ },
+ err: fmt.Errorf("some error"),
},
+
{
- address: "http://localhost:9090",
- endpoint: "test",
- expected: "http://localhost:9090/test",
+ do: doQueryRange("2", Range{
+ Start: testTime.Add(-time.Minute),
+ End: testTime,
+ Step: time.Minute,
+ }),
+ inErr: fmt.Errorf("some error"),
+
+ reqMethod: "GET",
+ reqPath: "/api/v1/query_range",
+ reqParam: url.Values{
+ "query": []string{"2"},
+ "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)},
+ "end": []string{testTime.Format(time.RFC3339Nano)},
+ "step": []string{time.Minute.String()},
+ },
+ err: fmt.Errorf("some error"),
},
+
{
- address: "http://localhost:9090/prefix",
- endpoint: "/test",
- expected: "http://localhost:9090/prefix/test",
+ do: doLabelValues("mylabel"),
+ inRes: []string{"val1", "val2"},
+ reqMethod: "GET",
+ reqPath: "/api/v1/label/mylabel/values",
+ res: model.LabelValues{"val1", "val2"},
},
+
{
- address: "https://localhost:9090/",
- endpoint: "/test/",
- expected: "https://localhost:9090/test",
+ do: doLabelValues("mylabel"),
+ inErr: fmt.Errorf("some error"),
+ reqMethod: "GET",
+ reqPath: "/api/v1/label/mylabel/values",
+ err: fmt.Errorf("some error"),
},
+
{
- address: "http://localhost:9090",
- endpoint: "/test/:param",
- args: map[string]string{
- "param": "content",
+ do: doSeries("up", testTime.Add(-time.Minute), testTime),
+ inRes: []map[string]string{
+ {
+ "__name__": "up",
+ "job": "prometheus",
+ "instance": "localhost:9090"},
},
- expected: "http://localhost:9090/test/content",
- },
- {
- address: "http://localhost:9090",
- endpoint: "/test/:param/more/:param",
- args: map[string]string{
- "param": "content",
+ reqMethod: "GET",
+ reqPath: "/api/v1/series",
+ reqParam: url.Values{
+ "match": []string{"up"},
+ "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)},
+ "end": []string{testTime.Format(time.RFC3339Nano)},
},
- expected: "http://localhost:9090/test/content/more/content",
- },
- {
- address: "http://localhost:9090",
- endpoint: "/test/:param/more/:foo",
- args: map[string]string{
- "param": "content",
- "foo": "bar",
+ res: []model.LabelSet{
+ model.LabelSet{
+ "__name__": "up",
+ "job": "prometheus",
+ "instance": "localhost:9090",
+ },
},
- expected: "http://localhost:9090/test/content/more/bar",
},
+
{
- address: "http://localhost:9090",
- endpoint: "/test/:param",
- args: map[string]string{
- "nonexistant": "content",
+ do: doSeries("up", testTime.Add(-time.Minute), testTime),
+ inErr: fmt.Errorf("some error"),
+ reqMethod: "GET",
+ reqPath: "/api/v1/series",
+ reqParam: url.Values{
+ "match": []string{"up"},
+ "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)},
+ "end": []string{testTime.Format(time.RFC3339Nano)},
},
- expected: "http://localhost:9090/test/:param",
+ err: fmt.Errorf("some error"),
},
}
+ var tests []apiTest
+ tests = append(tests, queryTests...)
+
for _, test := range tests {
- ep, err := url.Parse(test.address)
- if err != nil {
- t.Fatal(err)
- }
+ client.curTest = test
- hclient := &httpClient{
- endpoint: ep,
- transport: DefaultTransport,
- }
+ res, err := test.do()
- u := hclient.url(test.endpoint, test.args)
- if u.String() != test.expected {
- t.Errorf("unexpected result: got %s, want %s", u, test.expected)
+ if test.err != nil {
+ if err == nil {
+ t.Errorf("expected error %q but got none", test.err)
+ continue
+ }
+ if err.Error() != test.err.Error() {
+ t.Errorf("unexpected error: want %s, got %s", test.err, err)
+ }
+ continue
+ }
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
continue
}
- // The apiClient must return exactly the same result as the httpClient.
- aclient := &apiClient{hclient}
-
- u = aclient.url(test.endpoint, test.args)
- if u.String() != test.expected {
- t.Errorf("unexpected result: got %s, want %s", u, test.expected)
+ if !reflect.DeepEqual(res, test.res) {
+ t.Errorf("unexpected result: want %v, got %v", test.res, res)
}
}
}
@@ -141,11 +269,11 @@ type apiClientTest struct {
err *Error
}
-func (c *testClient) url(ep string, args map[string]string) *url.URL {
+func (c *testClient) URL(ep string, args map[string]string) *url.URL {
return nil
}
-func (c *testClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
+func (c *testClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
if ctx == nil {
c.Fatalf("context was not passed down")
}
@@ -271,7 +399,7 @@ func TestAPIClientDo(t *testing.T) {
tc.ch <- test
- _, body, err := client.do(context.Background(), tc.req)
+ _, body, err := client.Do(context.Background(), tc.req)
if test.err != nil {
if err == nil {
@@ -294,160 +422,3 @@ func TestAPIClientDo(t *testing.T) {
}
}
}
-
-type apiTestClient struct {
- *testing.T
- curTest apiTest
-}
-
-type apiTest struct {
- do func() (interface{}, error)
- inErr error
- inRes interface{}
-
- reqPath string
- reqParam url.Values
- reqMethod string
- res interface{}
- err error
-}
-
-func (c *apiTestClient) url(ep string, args map[string]string) *url.URL {
- u := &url.URL{
- Host: "test:9090",
- Path: apiPrefix + ep,
- }
- return u
-}
-
-func (c *apiTestClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
-
- test := c.curTest
-
- if req.URL.Path != test.reqPath {
- c.Errorf("unexpected request path: want %s, got %s", test.reqPath, req.URL.Path)
- }
- if req.Method != test.reqMethod {
- c.Errorf("unexpected request method: want %s, got %s", test.reqMethod, req.Method)
- }
-
- b, err := json.Marshal(test.inRes)
- if err != nil {
- c.Fatal(err)
- }
-
- resp := &http.Response{}
- if test.inErr != nil {
- resp.StatusCode = statusAPIError
- } else {
- resp.StatusCode = http.StatusOK
- }
-
- return resp, b, test.inErr
-}
-
-func TestAPIs(t *testing.T) {
-
- testTime := time.Now()
-
- client := &apiTestClient{T: t}
-
- queryApi := &httpQueryAPI{
- client: client,
- }
-
- doQuery := func(q string, ts time.Time) func() (interface{}, error) {
- return func() (interface{}, error) {
- return queryApi.Query(context.Background(), q, ts)
- }
- }
-
- doQueryRange := func(q string, rng Range) func() (interface{}, error) {
- return func() (interface{}, error) {
- return queryApi.QueryRange(context.Background(), q, rng)
- }
- }
-
- queryTests := []apiTest{
- {
- do: doQuery("2", testTime),
- inRes: &queryResult{
- Type: model.ValScalar,
- Result: &model.Scalar{
- Value: 2,
- Timestamp: model.TimeFromUnix(testTime.Unix()),
- },
- },
-
- reqMethod: "GET",
- reqPath: "/api/v1/query",
- reqParam: url.Values{
- "query": []string{"2"},
- "time": []string{testTime.Format(time.RFC3339Nano)},
- },
- res: &model.Scalar{
- Value: 2,
- Timestamp: model.TimeFromUnix(testTime.Unix()),
- },
- },
- {
- do: doQuery("2", testTime),
- inErr: fmt.Errorf("some error"),
-
- reqMethod: "GET",
- reqPath: "/api/v1/query",
- reqParam: url.Values{
- "query": []string{"2"},
- "time": []string{testTime.Format(time.RFC3339Nano)},
- },
- err: fmt.Errorf("some error"),
- },
-
- {
- do: doQueryRange("2", Range{
- Start: testTime.Add(-time.Minute),
- End: testTime,
- Step: time.Minute,
- }),
- inErr: fmt.Errorf("some error"),
-
- reqMethod: "GET",
- reqPath: "/api/v1/query_range",
- reqParam: url.Values{
- "query": []string{"2"},
- "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)},
- "end": []string{testTime.Format(time.RFC3339Nano)},
- "step": []string{time.Minute.String()},
- },
- err: fmt.Errorf("some error"),
- },
- }
-
- var tests []apiTest
- tests = append(tests, queryTests...)
-
- for _, test := range tests {
- client.curTest = test
-
- res, err := test.do()
-
- if test.err != nil {
- if err == nil {
- t.Errorf("expected error %q but got none", test.err)
- continue
- }
- if err.Error() != test.err.Error() {
- t.Errorf("unexpected error: want %s, got %s", test.err, err)
- }
- continue
- }
- if err != nil {
- t.Errorf("unexpected error: %s", err)
- continue
- }
-
- if !reflect.DeepEqual(res, test.res) {
- t.Errorf("unexpected result: want %v, got %v", test.res, res)
- }
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/examples/random/Dockerfile b/vendor/github.com/prometheus/client_golang/examples/random/Dockerfile
new file mode 100644
index 000000000..32b6846ea
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/examples/random/Dockerfile
@@ -0,0 +1,20 @@
+# This Dockerfile builds an image for a client_golang example.
+#
+# Use as (from the root for the client_golang repository):
+# docker build -f examples/$name/Dockerfile -t prometheus/golang-example-$name .
+
+# Builder image, where we build the example.
+FROM golang:1.9.0 AS builder
+WORKDIR /go/src/github.com/prometheus/client_golang
+COPY . .
+WORKDIR /go/src/github.com/prometheus/client_golang/prometheus
+RUN go get -d
+WORKDIR /go/src/github.com/prometheus/client_golang/examples/random
+RUN CGO_ENABLED=0 GOOS=linux go build -a -tags netgo -ldflags '-w'
+
+# Final image.
+FROM scratch
+LABEL maintainer "The Prometheus Authors <prometheus-developers@googlegroups.com>"
+COPY --from=builder /go/src/github.com/prometheus/client_golang/examples/random .
+EXPOSE 8080
+ENTRYPOINT ["/random"]
diff --git a/vendor/github.com/prometheus/client_golang/examples/random/main.go b/vendor/github.com/prometheus/client_golang/examples/random/main.go
index 563957193..eef50d200 100644
--- a/vendor/github.com/prometheus/client_golang/examples/random/main.go
+++ b/vendor/github.com/prometheus/client_golang/examples/random/main.go
@@ -18,19 +18,21 @@ package main
import (
"flag"
+ "log"
"math"
"math/rand"
"net/http"
"time"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
)
var (
addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.")
- uniformDomain = flag.Float64("uniform.domain", 200, "The domain for the uniform distribution.")
- normDomain = flag.Float64("normal.domain", 200, "The domain for the normal distribution.")
- normMean = flag.Float64("normal.mean", 10, "The mean for the normal distribution.")
+ uniformDomain = flag.Float64("uniform.domain", 0.0002, "The domain for the uniform distribution.")
+ normDomain = flag.Float64("normal.domain", 0.0002, "The domain for the normal distribution.")
+ normMean = flag.Float64("normal.mean", 0.00001, "The mean for the normal distribution.")
oscillationPeriod = flag.Duration("oscillation-period", 10*time.Minute, "The duration of the rate oscillation period.")
)
@@ -40,8 +42,9 @@ var (
// differentiated via a "service" label.
rpcDurations = prometheus.NewSummaryVec(
prometheus.SummaryOpts{
- Name: "rpc_durations_microseconds",
- Help: "RPC latency distributions.",
+ Name: "rpc_durations_seconds",
+ Help: "RPC latency distributions.",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
[]string{"service"},
)
@@ -50,7 +53,7 @@ var (
// normal distribution, with 20 buckets centered on the mean, each
// half-sigma wide.
rpcDurationsHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{
- Name: "rpc_durations_histogram_microseconds",
+ Name: "rpc_durations_histogram_seconds",
Help: "RPC latency distributions.",
Buckets: prometheus.LinearBuckets(*normMean-5**normDomain, .5**normDomain, 20),
})
@@ -91,13 +94,13 @@ func main() {
go func() {
for {
- v := rand.ExpFloat64()
+ v := rand.ExpFloat64() / 1e6
rpcDurations.WithLabelValues("exponential").Observe(v)
time.Sleep(time.Duration(50*oscillationFactor()) * time.Millisecond)
}
}()
// Expose the registered metrics via HTTP.
- http.Handle("/metrics", prometheus.Handler())
- http.ListenAndServe(*addr, nil)
+ http.Handle("/metrics", promhttp.Handler())
+ log.Fatal(http.ListenAndServe(*addr, nil))
}
diff --git a/vendor/github.com/prometheus/client_golang/examples/simple/Dockerfile b/vendor/github.com/prometheus/client_golang/examples/simple/Dockerfile
new file mode 100644
index 000000000..99b49d781
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/examples/simple/Dockerfile
@@ -0,0 +1,20 @@
+# This Dockerfile builds an image for a client_golang example.
+#
+# Use as (from the root for the client_golang repository):
+# docker build -f examples/$name/Dockerfile -t prometheus/golang-example-$name .
+
+# Builder image, where we build the example.
+FROM golang:1.9.0 AS builder
+WORKDIR /go/src/github.com/prometheus/client_golang
+COPY . .
+WORKDIR /go/src/github.com/prometheus/client_golang/prometheus
+RUN go get -d
+WORKDIR /go/src/github.com/prometheus/client_golang/examples/simple
+RUN CGO_ENABLED=0 GOOS=linux go build -a -tags netgo -ldflags '-w'
+
+# Final image.
+FROM scratch
+LABEL maintainer "The Prometheus Authors <prometheus-developers@googlegroups.com>"
+COPY --from=builder /go/src/github.com/prometheus/client_golang/examples/simple .
+EXPOSE 8080
+ENTRYPOINT ["/simple"]
diff --git a/vendor/github.com/prometheus/client_golang/examples/simple/main.go b/vendor/github.com/prometheus/client_golang/examples/simple/main.go
index 19620d2b3..1fc23249a 100644
--- a/vendor/github.com/prometheus/client_golang/examples/simple/main.go
+++ b/vendor/github.com/prometheus/client_golang/examples/simple/main.go
@@ -16,15 +16,16 @@ package main
import (
"flag"
+ "log"
"net/http"
- "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
)
var addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.")
func main() {
flag.Parse()
- http.Handle("/metrics", prometheus.Handler())
- http.ListenAndServe(*addr, nil)
+ http.Handle("/metrics", promhttp.Handler())
+ log.Fatal(http.ListenAndServe(*addr, nil))
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go b/vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go
index a3d86698b..4a05721dc 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go
@@ -129,8 +129,9 @@ func BenchmarkGaugeNoLabels(b *testing.B) {
func BenchmarkSummaryWithLabelValues(b *testing.B) {
m := NewSummaryVec(
SummaryOpts{
- Name: "benchmark_summary",
- Help: "A summary to benchmark it.",
+ Name: "benchmark_summary",
+ Help: "A summary to benchmark it.",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
[]string{"one", "two", "three"},
)
@@ -143,8 +144,9 @@ func BenchmarkSummaryWithLabelValues(b *testing.B) {
func BenchmarkSummaryNoLabels(b *testing.B) {
m := NewSummary(SummaryOpts{
- Name: "benchmark_summary",
- Help: "A summary to benchmark it.",
+ Name: "benchmark_summary",
+ Help: "A summary to benchmark it.",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
)
b.ReportAllocs()
@@ -181,3 +183,17 @@ func BenchmarkHistogramNoLabels(b *testing.B) {
m.Observe(3.1415)
}
}
+
+func BenchmarkParallelCounter(b *testing.B) {
+ c := NewCounter(CounterOpts{
+ Name: "benchmark_counter",
+ Help: "A Counter to benchmark it.",
+ })
+ b.ReportAllocs()
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ c.Inc()
+ }
+ })
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
index ee37949ad..765e4550c 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -15,6 +15,10 @@ package prometheus
import (
"errors"
+ "math"
+ "sync/atomic"
+
+ dto "github.com/prometheus/client_model/go"
)
// Counter is a Metric that represents a single numerical value that only ever
@@ -30,16 +34,8 @@ type Counter interface {
Metric
Collector
- // Set is used to set the Counter to an arbitrary value. It is only used
- // if you have to transfer a value from an external counter into this
- // Prometheus metric. Do not use it for regular handling of a
- // Prometheus counter (as it can be used to break the contract of
- // monotonically increasing values).
- //
- // Deprecated: Use NewConstMetric to create a counter for an external
- // value. A Counter should never be set.
- Set(float64)
- // Inc increments the counter by 1.
+ // Inc increments the counter by 1. Use Add to increment it by arbitrary
+ // non-negative values.
Inc()
// Add adds the given value to the counter. It panics if the value is <
// 0.
@@ -50,6 +46,14 @@ type Counter interface {
type CounterOpts Opts
// NewCounter creates a new Counter based on the provided CounterOpts.
+//
+// The returned implementation tracks the counter value in two separate
+// variables, a float64 and a uint64. The latter is used to track calls of the
+// Inc method and calls of the Add method with a value that can be represented
+// as a uint64. This allows atomic increments of the counter with optimal
+// performance. (It is common to have an Inc call in very hot execution paths.)
+// Both internal tracking values are added up in the Write method. This has to
+// be taken into account when it comes to precision and overflow behavior.
func NewCounter(opts CounterOpts) Counter {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@@ -57,20 +61,58 @@ func NewCounter(opts CounterOpts) Counter {
nil,
opts.ConstLabels,
)
- result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
+ result := &counter{desc: desc, labelPairs: desc.constLabelPairs}
result.init(result) // Init self-collection.
return result
}
type counter struct {
- value
+ // valBits contains the bits of the represented float64 value, while
+ // valInt stores values that are exact integers. Both have to go first
+ // in the struct to guarantee alignment for atomic operations.
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ valBits uint64
+ valInt uint64
+
+ selfCollector
+ desc *Desc
+
+ labelPairs []*dto.LabelPair
+}
+
+func (c *counter) Desc() *Desc {
+ return c.desc
}
func (c *counter) Add(v float64) {
if v < 0 {
panic(errors.New("counter cannot decrease in value"))
}
- c.value.Add(v)
+ ival := uint64(v)
+ if float64(ival) == v {
+ atomic.AddUint64(&c.valInt, ival)
+ return
+ }
+
+ for {
+ oldBits := atomic.LoadUint64(&c.valBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+ if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
+ return
+ }
+ }
+}
+
+func (c *counter) Inc() {
+ atomic.AddUint64(&c.valInt, 1)
+}
+
+func (c *counter) Write(out *dto.Metric) error {
+ fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
+ ival := atomic.LoadUint64(&c.valInt)
+ val := fval + float64(ival)
+
+ return populateMetric(CounterValue, val, c.labelPairs, out)
}
// CounterVec is a Collector that bundles a set of Counters that all share the
@@ -78,16 +120,12 @@ func (c *counter) Add(v float64) {
// if you want to count the same thing partitioned by various dimensions
// (e.g. number of HTTP requests, partitioned by response code and
// method). Create instances with NewCounterVec.
-//
-// CounterVec embeds MetricVec. See there for a full list of methods with
-// detailed documentation.
type CounterVec struct {
- *MetricVec
+ *metricVec
}
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
+// partitioned by the given label names.
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@@ -96,34 +134,62 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
opts.ConstLabels,
)
return &CounterVec{
- MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
- result := &counter{value: value{
- desc: desc,
- valType: CounterValue,
- labelPairs: makeLabelPairs(desc, lvs),
- }}
+ metricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ if len(lvs) != len(desc.variableLabels) {
+ panic(errInconsistentCardinality)
+ }
+ result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection.
return result
}),
}
}
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns a Counter and not a
-// Metric so that no type conversion is required.
-func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
- metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+// GetMetricWithLabelValues returns the Counter for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Counter is created.
+//
+// It is possible to call this method without using the returned Counter to only
+// create the new Counter but leave it at its starting value 0. See also the
+// SummaryVec example.
+//
+// Keeping the Counter for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Counter from the CounterVec. In that case,
+// the Counter will still exist, but it will not be exported anymore, even if a
+// Counter with the same label values is created later.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
+ metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Counter), err
}
return nil, err
}
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns a Counter and not a Metric so that no
-// type conversion is required.
-func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
- metric, err := m.MetricVec.GetMetricWith(labels)
+// GetMetricWith returns the Counter for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Counter is created. Implications of
+// creating a Counter without using it and keeping the Counter for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
+ metric, err := v.metricVec.getMetricWith(labels)
if metric != nil {
return metric.(Counter), err
}
@@ -131,18 +197,57 @@ func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
}
// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. By not returning an
-// error, WithLabelValues allows shortcuts like
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
// myVec.WithLabelValues("404", "GET").Add(42)
-func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
- return m.MetricVec.WithLabelValues(lvs...).(Counter)
+func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
+ c, err := v.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return c
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. By not returning an error, With allows shortcuts like
-// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
-func (m *CounterVec) With(labels Labels) Counter {
- return m.MetricVec.With(labels).(Counter)
+// returned an error. Not returning an error allows shortcuts like
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+func (v *CounterVec) With(labels Labels) Counter {
+ c, err := v.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return c
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the CounterVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
+ vec, err := v.curryWith(labels)
+ if vec != nil {
+ return &CounterVec{vec}, err
+ }
+ return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {
+ vec, err := v.CurryWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return vec
}
// CounterFunc is a Counter whose value is determined at collect time by calling a
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter_test.go b/vendor/github.com/prometheus/client_golang/prometheus/counter_test.go
index 67391a23a..5062f51af 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/counter_test.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter_test.go
@@ -14,6 +14,7 @@
package prometheus
import (
+ "fmt"
"math"
"testing"
@@ -27,13 +28,27 @@ func TestCounterAdd(t *testing.T) {
ConstLabels: Labels{"a": "1", "b": "2"},
}).(*counter)
counter.Inc()
- if expected, got := 1., math.Float64frombits(counter.valBits); expected != got {
+ if expected, got := 0.0, math.Float64frombits(counter.valBits); expected != got {
t.Errorf("Expected %f, got %f.", expected, got)
}
+ if expected, got := uint64(1), counter.valInt; expected != got {
+ t.Errorf("Expected %d, got %d.", expected, got)
+ }
counter.Add(42)
- if expected, got := 43., math.Float64frombits(counter.valBits); expected != got {
+ if expected, got := 0.0, math.Float64frombits(counter.valBits); expected != got {
t.Errorf("Expected %f, got %f.", expected, got)
}
+ if expected, got := uint64(43), counter.valInt; expected != got {
+ t.Errorf("Expected %d, got %d.", expected, got)
+ }
+
+ counter.Add(24.42)
+ if expected, got := 24.42, math.Float64frombits(counter.valBits); expected != got {
+ t.Errorf("Expected %f, got %f.", expected, got)
+ }
+ if expected, got := uint64(43), counter.valInt; expected != got {
+ t.Errorf("Expected %d, got %d.", expected, got)
+ }
if expected, got := "counter cannot decrease in value", decreaseCounter(counter).Error(); expected != got {
t.Errorf("Expected error %q, got %q.", expected, got)
@@ -42,7 +57,7 @@ func TestCounterAdd(t *testing.T) {
m := &dto.Metric{}
counter.Write(m)
- if expected, got := `label:<name:"a" value:"1" > label:<name:"b" value:"2" > counter:<value:43 > `, m.String(); expected != got {
+ if expected, got := `label:<name:"a" value:"1" > label:<name:"b" value:"2" > counter:<value:67.42 > `, m.String(); expected != got {
t.Errorf("expected %q, got %q", expected, got)
}
}
@@ -56,3 +71,142 @@ func decreaseCounter(c *counter) (err error) {
c.Add(-1)
return nil
}
+
+func TestCounterVecGetMetricWithInvalidLabelValues(t *testing.T) {
+ testCases := []struct {
+ desc string
+ labels Labels
+ }{
+ {
+ desc: "non utf8 label value",
+ labels: Labels{"a": "\xFF"},
+ },
+ {
+ desc: "not enough label values",
+ labels: Labels{},
+ },
+ {
+ desc: "too many label values",
+ labels: Labels{"a": "1", "b": "2"},
+ },
+ }
+
+ for _, test := range testCases {
+ counterVec := NewCounterVec(CounterOpts{
+ Name: "test",
+ }, []string{"a"})
+
+ labelValues := make([]string, len(test.labels))
+ for _, val := range test.labels {
+ labelValues = append(labelValues, val)
+ }
+
+ expectPanic(t, func() {
+ counterVec.WithLabelValues(labelValues...)
+ }, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc))
+ expectPanic(t, func() {
+ counterVec.With(test.labels)
+ }, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc))
+
+ if _, err := counterVec.GetMetricWithLabelValues(labelValues...); err == nil {
+ t.Errorf("GetMetricWithLabelValues: expected error because: %s", test.desc)
+ }
+ if _, err := counterVec.GetMetricWith(test.labels); err == nil {
+ t.Errorf("GetMetricWith: expected error because: %s", test.desc)
+ }
+ }
+}
+
+func expectPanic(t *testing.T, op func(), errorMsg string) {
+ defer func() {
+ if err := recover(); err == nil {
+ t.Error(errorMsg)
+ }
+ }()
+
+ op()
+}
+
+func TestCounterAddInf(t *testing.T) {
+ counter := NewCounter(CounterOpts{
+ Name: "test",
+ Help: "test help",
+ }).(*counter)
+
+ counter.Inc()
+ if expected, got := 0.0, math.Float64frombits(counter.valBits); expected != got {
+ t.Errorf("Expected %f, got %f.", expected, got)
+ }
+ if expected, got := uint64(1), counter.valInt; expected != got {
+ t.Errorf("Expected %d, got %d.", expected, got)
+ }
+
+ counter.Add(math.Inf(1))
+ if expected, got := math.Inf(1), math.Float64frombits(counter.valBits); expected != got {
+ t.Errorf("valBits expected %f, got %f.", expected, got)
+ }
+ if expected, got := uint64(1), counter.valInt; expected != got {
+ t.Errorf("valInts expected %d, got %d.", expected, got)
+ }
+
+ counter.Inc()
+ if expected, got := math.Inf(1), math.Float64frombits(counter.valBits); expected != got {
+ t.Errorf("Expected %f, got %f.", expected, got)
+ }
+ if expected, got := uint64(2), counter.valInt; expected != got {
+ t.Errorf("Expected %d, got %d.", expected, got)
+ }
+
+ m := &dto.Metric{}
+ counter.Write(m)
+
+ if expected, got := `counter:<value:inf > `, m.String(); expected != got {
+ t.Errorf("expected %q, got %q", expected, got)
+ }
+}
+
+func TestCounterAddLarge(t *testing.T) {
+ counter := NewCounter(CounterOpts{
+ Name: "test",
+ Help: "test help",
+ }).(*counter)
+
+ // large overflows the underlying type and should therefore be stored in valBits.
+ large := float64(math.MaxUint64 + 1)
+ counter.Add(large)
+ if expected, got := large, math.Float64frombits(counter.valBits); expected != got {
+ t.Errorf("valBits expected %f, got %f.", expected, got)
+ }
+ if expected, got := uint64(0), counter.valInt; expected != got {
+ t.Errorf("valInts expected %d, got %d.", expected, got)
+ }
+
+ m := &dto.Metric{}
+ counter.Write(m)
+
+ if expected, got := fmt.Sprintf("counter:<value:%0.16e > ", large), m.String(); expected != got {
+ t.Errorf("expected %q, got %q", expected, got)
+ }
+}
+
+func TestCounterAddSmall(t *testing.T) {
+ counter := NewCounter(CounterOpts{
+ Name: "test",
+ Help: "test help",
+ }).(*counter)
+ small := 0.000000000001
+ counter.Add(small)
+ if expected, got := small, math.Float64frombits(counter.valBits); expected != got {
+ t.Errorf("valBits expected %f, got %f.", expected, got)
+ }
+ if expected, got := uint64(0), counter.valInt; expected != got {
+ t.Errorf("valInts expected %d, got %d.", expected, got)
+ }
+
+ m := &dto.Metric{}
+ counter.Write(m)
+
+ if expected, got := fmt.Sprintf("counter:<value:%0.0e > ", small), m.String(); expected != got {
+ t.Errorf("expected %q, got %q", expected, got)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
index 77f4b30e8..4a755b0fa 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -16,33 +16,15 @@ package prometheus
import (
"errors"
"fmt"
- "regexp"
"sort"
"strings"
"github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
)
-var (
- metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
- labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
-)
-
-// reservedLabelPrefix is a prefix which is not legal in user-supplied
-// label names.
-const reservedLabelPrefix = "__"
-
-// Labels represents a collection of label name -> value mappings. This type is
-// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
-// metric vector Collectors, e.g.:
-// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
-//
-// The other use-case is the specification of constant label pairs in Opts or to
-// create a Desc.
-type Labels map[string]string
-
// Desc is the descriptor used by every Prometheus Metric. It is essentially
// the immutable meta-data of a Metric. The normal Metric implementations
// included in this package manage their Desc under the hood. Users only have to
@@ -78,7 +60,7 @@ type Desc struct {
// Help string. Each Desc with the same fqName must have the same
// dimHash.
dimHash uint64
- // err is an error that occured during construction. It is reported on
+ // err is an error that occurred during construction. It is reported on
// registration time.
err error
}
@@ -91,8 +73,7 @@ type Desc struct {
// and therefore not part of the Desc. (They are managed within the Metric.)
//
// For constLabels, the label values are constant. Therefore, they are fully
-// specified in the Desc. See the Opts documentation for the implications of
-// constant labels.
+// specified in the Desc. See the Collector example for a usage pattern.
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
d := &Desc{
fqName: fqName,
@@ -103,7 +84,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
d.err = errors.New("empty help string")
return d
}
- if !metricNameRE.MatchString(fqName) {
+ if !model.IsValidMetricName(model.LabelValue(fqName)) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
return d
}
@@ -127,6 +108,12 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
for _, labelName := range labelNames {
labelValues = append(labelValues, constLabels[labelName])
}
+ // Validate the const label values. They can't have a wrong cardinality, so
+ // use in len(labelValues) as expectedNumberOfValues.
+ if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
+ d.err = err
+ return d
+ }
// Now add the variable label names, but prefix them with something that
// cannot be in a regular label name. That prevents matching the label
// dimension with a different mix between preset and variable labels.
@@ -142,6 +129,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
d.err = errors.New("duplicate label names")
return d
}
+
vh := hashNew()
for _, val := range labelValues {
vh = hashAdd(vh, val)
@@ -198,8 +186,3 @@ func (d *Desc) String() string {
d.variableLabels,
)
}
-
-func checkLabelName(l string) bool {
- return labelNameRE.MatchString(l) &&
- !strings.HasPrefix(l, reservedLabelPrefix)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc_test.go b/vendor/github.com/prometheus/client_golang/prometheus/desc_test.go
new file mode 100644
index 000000000..2f962652c
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc_test.go
@@ -0,0 +1,17 @@
+package prometheus
+
+import (
+ "testing"
+)
+
+func TestNewDescInvalidLabelValues(t *testing.T) {
+ desc := NewDesc(
+ "sample_label",
+ "sample label",
+ nil,
+ Labels{"a": "\xFF"},
+ )
+ if desc.err == nil {
+ t.Errorf("NewDesc: expected error because: %s", desc.err)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
index b15a2d3b9..36ef15567 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -17,7 +17,7 @@
// Pushgateway (package push).
//
// All exported functions and methods are safe to be used concurrently unless
-//specified otherwise.
+// specified otherwise.
//
// A Basic Example
//
@@ -26,6 +26,7 @@
// package main
//
// import (
+// "log"
// "net/http"
//
// "github.com/prometheus/client_golang/prometheus"
@@ -59,7 +60,7 @@
// // The Handler function provides a default handler to expose metrics
// // via an HTTP server. "/metrics" is the usual endpoint for that.
// http.Handle("/metrics", promhttp.Handler())
-// http.ListenAndServe(":8080", nil)
+// log.Fatal(http.ListenAndServe(":8080", nil))
// }
//
//
@@ -69,7 +70,7 @@
// Metrics
//
// The number of exported identifiers in this package might appear a bit
-// overwhelming. Hovever, in addition to the basic plumbing shown in the example
+// overwhelming. However, in addition to the basic plumbing shown in the example
// above, you only need to understand the different metric types and their
// vector versions for basic usage.
//
@@ -95,8 +96,8 @@
// SummaryVec, HistogramVec, and UntypedVec are not.
//
// To create instances of Metrics and their vector versions, you need a suitable
-// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts,
-// HistogramOpts, or UntypedOpts.
+// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or
+// UntypedOpts.
//
// Custom Collectors and constant Metrics
//
@@ -114,8 +115,8 @@
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
// NewConstSummary (and their respective Must… versions). That will happen in
// the Collect method. The Describe method has to return separate Desc
-// instances, representative of the “throw-away” metrics to be created
-// later. NewDesc comes in handy to create those Desc instances.
+// instances, representative of the “throw-away” metrics to be created later.
+// NewDesc comes in handy to create those Desc instances.
//
// The Collector example illustrates the use case. You can also look at the
// source code of the processCollector (mirroring process metrics), the
@@ -129,34 +130,34 @@
// Advanced Uses of the Registry
//
// While MustRegister is the by far most common way of registering a Collector,
-// sometimes you might want to handle the errors the registration might
-// cause. As suggested by the name, MustRegister panics if an error occurs. With
-// the Register function, the error is returned and can be handled.
+// sometimes you might want to handle the errors the registration might cause.
+// As suggested by the name, MustRegister panics if an error occurs. With the
+// Register function, the error is returned and can be handled.
//
// An error is returned if the registered Collector is incompatible or
// inconsistent with already registered metrics. The registry aims for
-// consistency of the collected metrics according to the Prometheus data
-// model. Inconsistencies are ideally detected at registration time, not at
-// collect time. The former will usually be detected at start-up time of a
-// program, while the latter will only happen at scrape time, possibly not even
-// on the first scrape if the inconsistency only becomes relevant later. That is
-// the main reason why a Collector and a Metric have to describe themselves to
-// the registry.
+// consistency of the collected metrics according to the Prometheus data model.
+// Inconsistencies are ideally detected at registration time, not at collect
+// time. The former will usually be detected at start-up time of a program,
+// while the latter will only happen at scrape time, possibly not even on the
+// first scrape if the inconsistency only becomes relevant later. That is the
+// main reason why a Collector and a Metric have to describe themselves to the
+// registry.
//
// So far, everything we did operated on the so-called default registry, as it
-// can be found in the global DefaultRegistry variable. With NewRegistry, you
+// can be found in the global DefaultRegisterer variable. With NewRegistry, you
// can create a custom registry, or you can even implement the Registerer or
-// Gatherer interfaces yourself. The methods Register and Unregister work in
-// the same way on a custom registry as the global functions Register and
-// Unregister on the default registry.
-//
-// There are a number of uses for custom registries: You can use registries
-// with special properties, see NewPedanticRegistry. You can avoid global state,
-// as it is imposed by the DefaultRegistry. You can use multiple registries at
-// the same time to expose different metrics in different ways. You can use
+// Gatherer interfaces yourself. The methods Register and Unregister work in the
+// same way on a custom registry as the global functions Register and Unregister
+// on the default registry.
+//
+// There are a number of uses for custom registries: You can use registries with
+// special properties, see NewPedanticRegistry. You can avoid global state, as
+// it is imposed by the DefaultRegisterer. You can use multiple registries at
+// the same time to expose different metrics in different ways. You can use
// separate registries for testing purposes.
//
-// Also note that the DefaultRegistry comes registered with a Collector for Go
+// Also note that the DefaultRegisterer comes registered with a Collector for Go
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
// NewProcessCollector). With a custom registry, you are in control and decide
// yourself about the Collectors to register.
@@ -166,16 +167,20 @@
// The Registry implements the Gatherer interface. The caller of the Gather
// method can then expose the gathered metrics in some way. Usually, the metrics
// are served via HTTP on the /metrics endpoint. That's happening in the example
-// above. The tools to expose metrics via HTTP are in the promhttp
-// sub-package. (The top-level functions in the prometheus package are
-// deprecated.)
+// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
+// (The top-level functions in the prometheus package are deprecated.)
//
// Pushing to the Pushgateway
//
// Function for pushing to the Pushgateway can be found in the push sub-package.
//
+// Graphite Bridge
+//
+// Functions and examples to push metrics from a Gatherer to Graphite can be
+// found in the graphite sub-package.
+//
// Other Means of Exposition
//
-// More ways of exposing metrics can easily be added. Sending metrics to
-// Graphite would be an example that will soon be implemented.
+// More ways of exposing metrics can easily be added by following the approaches
+// of the existing implementations.
package prometheus
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/example_timer_complex_test.go b/vendor/github.com/prometheus/client_golang/prometheus/example_timer_complex_test.go
new file mode 100644
index 000000000..c5e7de5e5
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/example_timer_complex_test.go
@@ -0,0 +1,71 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus_test
+
+import (
+ "net/http"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ // apiRequestDuration tracks the duration separate for each HTTP status
+ // class (1xx, 2xx, ...). This creates a fair amount of time series on
+ // the Prometheus server. Usually, you would track the duration of
+ // serving HTTP request without partitioning by outcome. Do something
+ // like this only if needed. Also note how only status classes are
+ // tracked, not every single status code. The latter would create an
+ // even larger amount of time series. Request counters partitioned by
+ // status code are usually OK as each counter only creates one time
+ // series. Histograms are way more expensive, so partition with care and
+ // only where you really need separate latency tracking. Partitioning by
+ // status class is only an example. In concrete cases, other partitions
+ // might make more sense.
+ apiRequestDuration = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: "api_request_duration_seconds",
+ Help: "Histogram for the request duration of the public API, partitioned by status class.",
+ Buckets: prometheus.ExponentialBuckets(0.1, 1.5, 5),
+ },
+ []string{"status_class"},
+ )
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+ status := http.StatusOK
+ // The ObserverFunc gets called by the deferred ObserveDuration and
+ // decides which Histogram's Observe method is called.
+ timer := prometheus.NewTimer(prometheus.ObserverFunc(func(v float64) {
+ switch {
+ case status >= 500: // Server error.
+ apiRequestDuration.WithLabelValues("5xx").Observe(v)
+ case status >= 400: // Client error.
+ apiRequestDuration.WithLabelValues("4xx").Observe(v)
+ case status >= 300: // Redirection.
+ apiRequestDuration.WithLabelValues("3xx").Observe(v)
+ case status >= 200: // Success.
+ apiRequestDuration.WithLabelValues("2xx").Observe(v)
+ default: // Informational.
+ apiRequestDuration.WithLabelValues("1xx").Observe(v)
+ }
+ }))
+ defer timer.ObserveDuration()
+
+ // Handle the request. Set status accordingly.
+ // ...
+}
+
+func ExampleTimer_complex() {
+ http.HandleFunc("/api", handler)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/example_timer_gauge_test.go b/vendor/github.com/prometheus/client_golang/prometheus/example_timer_gauge_test.go
new file mode 100644
index 000000000..7184a0d1d
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/example_timer_gauge_test.go
@@ -0,0 +1,48 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus_test
+
+import (
+ "os"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ // If a function is called rarely (i.e. not more often than scrapes
+ // happen) or ideally only once (like in a batch job), it can make sense
+ // to use a Gauge for timing the function call. For timing a batch job
+ // and pushing the result to a Pushgateway, see also the comprehensive
+ // example in the push package.
+ funcDuration = prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "example_function_duration_seconds",
+ Help: "Duration of the last call of an example function.",
+ })
+)
+
+func run() error {
+ // The Set method of the Gauge is used to observe the duration.
+ timer := prometheus.NewTimer(prometheus.ObserverFunc(funcDuration.Set))
+ defer timer.ObserveDuration()
+
+ // Do something. Return errors as encountered. The use of 'defer' above
+ // makes sure the function is still timed properly.
+ return nil
+}
+
+func ExampleTimer_gauge() {
+ if err := run(); err != nil {
+ os.Exit(1)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/example_timer_test.go b/vendor/github.com/prometheus/client_golang/prometheus/example_timer_test.go
new file mode 100644
index 000000000..bd86bb472
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/example_timer_test.go
@@ -0,0 +1,40 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus_test
+
+import (
+ "math/rand"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ requestDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Name: "example_request_duration_seconds",
+ Help: "Histogram for the runtime of a simple example function.",
+ Buckets: prometheus.LinearBuckets(0.01, 0.01, 10),
+ })
+)
+
+func ExampleTimer() {
+ // timer times this example function. It uses a Histogram, but a Summary
+ // would also work, as both implement Observer. Check out
+ // https://prometheus.io/docs/practices/histograms/ for differences.
+ timer := prometheus.NewTimer(requestDuration)
+ defer timer.ObserveDuration()
+
+ // Do something here that takes time.
+ time.Sleep(time.Duration(rand.NormFloat64()*10000+50000) * time.Microsecond)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/examples_test.go b/vendor/github.com/prometheus/client_golang/prometheus/examples_test.go
index f87f21a8f..45f60650f 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/examples_test.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/examples_test.go
@@ -113,7 +113,7 @@ func ExampleCounter() {
pushComplete := make(chan struct{})
// TODO: Start a goroutine that performs repository pushes and reports
// each completion via the channel.
- for _ = range pushComplete {
+ for range pushComplete {
pushCounter.Inc()
}
// Output:
@@ -169,8 +169,8 @@ func ExampleInstrumentHandler() {
func ExampleLabelPairSorter() {
labelPairs := []*dto.LabelPair{
- &dto.LabelPair{Name: proto.String("status"), Value: proto.String("404")},
- &dto.LabelPair{Name: proto.String("method"), Value: proto.String("get")},
+ {Name: proto.String("status"), Value: proto.String("404")},
+ {Name: proto.String("method"), Value: proto.String("get")},
}
sort.Sort(prometheus.LabelPairSorter(labelPairs))
@@ -334,8 +334,9 @@ func ExampleRegister() {
func ExampleSummary() {
temps := prometheus.NewSummary(prometheus.SummaryOpts{
- Name: "pond_temperature_celsius",
- Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells.
+ Name: "pond_temperature_celsius",
+ Help: "The temperature of the frog pond.",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
})
// Simulate some observations.
@@ -372,8 +373,9 @@ func ExampleSummary() {
func ExampleSummaryVec() {
temps := prometheus.NewSummaryVec(
prometheus.SummaryOpts{
- Name: "pond_temperature_celsius",
- Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells.
+ Name: "pond_temperature_celsius",
+ Help: "The temperature of the frog pond.",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
[]string{"species"},
)
@@ -640,6 +642,7 @@ func ExampleAlreadyRegisteredError() {
panic(err)
}
}
+ reqCounter.Inc()
}
func ExampleGatherers() {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go
index 5d3128fae..910dac325 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go
@@ -24,7 +24,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
-func ExampleExpvarCollector() {
+func ExampleNewExpvarCollector() {
expvarCollector := prometheus.NewExpvarCollector(map[string]*prometheus.Desc{
"memstats": prometheus.NewDesc(
"expvar_memstats",
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
index 8b70e5141..17c72d7eb 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -13,6 +13,14 @@
package prometheus
+import (
+ "math"
+ "sync/atomic"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
// Gauge is a Metric that represents a single numerical value that can
// arbitrarily go up and down.
//
@@ -27,29 +35,95 @@ type Gauge interface {
// Set sets the Gauge to an arbitrary value.
Set(float64)
- // Inc increments the Gauge by 1.
+ // Inc increments the Gauge by 1. Use Add to increment it by arbitrary
+ // values.
Inc()
- // Dec decrements the Gauge by 1.
+ // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
+ // values.
Dec()
- // Add adds the given value to the Gauge. (The value can be
- // negative, resulting in a decrease of the Gauge.)
+ // Add adds the given value to the Gauge. (The value can be negative,
+ // resulting in a decrease of the Gauge.)
Add(float64)
// Sub subtracts the given value from the Gauge. (The value can be
// negative, resulting in an increase of the Gauge.)
Sub(float64)
+
+ // SetToCurrentTime sets the Gauge to the current Unix time in seconds.
+ SetToCurrentTime()
}
// GaugeOpts is an alias for Opts. See there for doc comments.
type GaugeOpts Opts
// NewGauge creates a new Gauge based on the provided GaugeOpts.
+//
+// The returned implementation is optimized for a fast Set method. If you have a
+// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick
+// the former. For example, the Inc method of the returned Gauge is slower than
+// the Inc method of a Counter returned by NewCounter. This matches the typical
+// scenarios for Gauges and Counters, where the former tends to be Set-heavy and
+// the latter Inc-heavy.
func NewGauge(opts GaugeOpts) Gauge {
- return newValue(NewDesc(
+ desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
- ), GaugeValue, 0)
+ )
+ result := &gauge{desc: desc, labelPairs: desc.constLabelPairs}
+ result.init(result) // Init self-collection.
+ return result
+}
+
+type gauge struct {
+ // valBits contains the bits of the represented float64 value. It has
+ // to go first in the struct to guarantee alignment for atomic
+ // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ valBits uint64
+
+ selfCollector
+
+ desc *Desc
+ labelPairs []*dto.LabelPair
+}
+
+func (g *gauge) Desc() *Desc {
+ return g.desc
+}
+
+func (g *gauge) Set(val float64) {
+ atomic.StoreUint64(&g.valBits, math.Float64bits(val))
+}
+
+func (g *gauge) SetToCurrentTime() {
+ g.Set(float64(time.Now().UnixNano()) / 1e9)
+}
+
+func (g *gauge) Inc() {
+ g.Add(1)
+}
+
+func (g *gauge) Dec() {
+ g.Add(-1)
+}
+
+func (g *gauge) Add(val float64) {
+ for {
+ oldBits := atomic.LoadUint64(&g.valBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
+ if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
+ return
+ }
+ }
+}
+
+func (g *gauge) Sub(val float64) {
+ g.Add(val * -1)
+}
+
+func (g *gauge) Write(out *dto.Metric) error {
+ val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
+ return populateMetric(GaugeValue, val, g.labelPairs, out)
}
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
@@ -58,12 +132,11 @@ func NewGauge(opts GaugeOpts) Gauge {
// (e.g. number of operations queued, partitioned by user and operation
// type). Create instances with NewGaugeVec.
type GaugeVec struct {
- *MetricVec
+ *metricVec
}
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
+// partitioned by the given label names.
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@@ -72,28 +145,62 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
opts.ConstLabels,
)
return &GaugeVec{
- MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
- return newValue(desc, GaugeValue, 0, lvs...)
+ metricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ if len(lvs) != len(desc.variableLabels) {
+ panic(errInconsistentCardinality)
+ }
+ result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
+ result.init(result) // Init self-collection.
+ return result
}),
}
}
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns a Gauge and not a
-// Metric so that no type conversion is required.
-func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
- metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+// GetMetricWithLabelValues returns the Gauge for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Gauge is created.
+//
+// It is possible to call this method without using the returned Gauge to only
+// create the new Gauge but leave it at its starting value 0. See also the
+// SummaryVec example.
+//
+// Keeping the Gauge for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Gauge from the GaugeVec. In that case, the
+// Gauge will still exist, but it will not be exported anymore, even if a
+// Gauge with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
+ metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Gauge), err
}
return nil, err
}
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns a Gauge and not a Metric so that no
-// type conversion is required.
-func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
- metric, err := m.MetricVec.GetMetricWith(labels)
+// GetMetricWith returns the Gauge for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Gauge is created. Implications of
+// creating a Gauge without using it and keeping the Gauge for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
+ metric, err := v.metricVec.getMetricWith(labels)
if metric != nil {
return metric.(Gauge), err
}
@@ -101,18 +208,57 @@ func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
}
// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. By not returning an
-// error, WithLabelValues allows shortcuts like
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
// myVec.WithLabelValues("404", "GET").Add(42)
-func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
- return m.MetricVec.WithLabelValues(lvs...).(Gauge)
+func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
+ g, err := v.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return g
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. By not returning an error, With allows shortcuts like
-// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
-func (m *GaugeVec) With(labels Labels) Gauge {
- return m.MetricVec.With(labels).(Gauge)
+// returned an error. Not returning an error allows shortcuts like
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+func (v *GaugeVec) With(labels Labels) Gauge {
+ g, err := v.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return g
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the GaugeVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
+ vec, err := v.curryWith(labels)
+ if vec != nil {
+ return &GaugeVec{vec}, err
+ }
+ return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec {
+ vec, err := v.CurryWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return vec
}
// GaugeFunc is a Gauge whose value is determined at collect time by calling a
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go
index 48cab4636..a2e3c1416 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go
@@ -19,6 +19,7 @@ import (
"sync"
"testing"
"testing/quick"
+ "time"
dto "github.com/prometheus/client_model/go"
)
@@ -82,7 +83,7 @@ func TestGaugeConcurrency(t *testing.T) {
}
start.Done()
- if expected, got := <-result, math.Float64frombits(gge.(*value).valBits); math.Abs(expected-got) > 0.000001 {
+ if expected, got := <-result, math.Float64frombits(gge.(*gauge).valBits); math.Abs(expected-got) > 0.000001 {
t.Fatalf("expected approx. %f, got %f", expected, got)
return false
}
@@ -146,7 +147,7 @@ func TestGaugeVecConcurrency(t *testing.T) {
start.Done()
for i := range sStreams {
- if expected, got := <-results[i], math.Float64frombits(gge.WithLabelValues(string('A'+i)).(*value).valBits); math.Abs(expected-got) > 0.000001 {
+ if expected, got := <-results[i], math.Float64frombits(gge.WithLabelValues(string('A'+i)).(*gauge).valBits); math.Abs(expected-got) > 0.000001 {
t.Fatalf("expected approx. %f, got %f", expected, got)
return false
}
@@ -180,3 +181,22 @@ func TestGaugeFunc(t *testing.T) {
t.Errorf("expected %q, got %q", expected, got)
}
}
+
+func TestGaugeSetCurrentTime(t *testing.T) {
+ g := NewGauge(GaugeOpts{
+ Name: "test_name",
+ Help: "test help",
+ })
+ g.SetToCurrentTime()
+ unixTime := float64(time.Now().Unix())
+
+ m := &dto.Metric{}
+ g.Write(m)
+
+ delta := unixTime - m.GetGauge().GetValue()
+ // This is just a smoke test to make sure SetToCurrentTime is not
+ // totally off. Tests with current time involved are hard...
+ if math.Abs(delta) > 5 {
+ t.Errorf("Gauge set to current time deviates from current time by more than 5s, delta is %f seconds", delta)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
index abc9d4ec4..096454af9 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -8,8 +8,10 @@ import (
)
type goCollector struct {
- goroutines Gauge
- gcDesc *Desc
+ goroutinesDesc *Desc
+ threadsDesc *Desc
+ gcDesc *Desc
+ goInfoDesc *Desc
// metrics to describe and collect
metrics memStatsMetrics
@@ -19,15 +21,22 @@ type goCollector struct {
// go process.
func NewGoCollector() Collector {
return &goCollector{
- goroutines: NewGauge(GaugeOpts{
- Namespace: "go",
- Name: "goroutines",
- Help: "Number of goroutines that currently exist.",
- }),
+ goroutinesDesc: NewDesc(
+ "go_goroutines",
+ "Number of goroutines that currently exist.",
+ nil, nil),
+ threadsDesc: NewDesc(
+ "go_threads",
+ "Number of OS threads created.",
+ nil, nil),
gcDesc: NewDesc(
"go_gc_duration_seconds",
"A summary of the GC invocation durations.",
nil, nil),
+ goInfoDesc: NewDesc(
+ "go_info",
+ "Information about the Go environment.",
+ nil, Labels{"version": runtime.Version()}),
metrics: memStatsMetrics{
{
desc: NewDesc(
@@ -48,7 +57,7 @@ func NewGoCollector() Collector {
}, {
desc: NewDesc(
memstatNamespace("sys_bytes"),
- "Number of bytes obtained by system. Sum of all system allocations.",
+ "Number of bytes obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
@@ -111,12 +120,12 @@ func NewGoCollector() Collector {
valType: GaugeValue,
}, {
desc: NewDesc(
- memstatNamespace("heap_released_bytes_total"),
- "Total number of heap bytes released to OS.",
+ memstatNamespace("heap_released_bytes"),
+ "Number of heap bytes released to OS.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
- valType: CounterValue,
+ valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_objects"),
@@ -213,6 +222,14 @@ func NewGoCollector() Collector {
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_cpu_fraction"),
+ "The fraction of this program's available CPU time used by the GC since the program started.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
+ valType: GaugeValue,
},
},
}
@@ -224,9 +241,10 @@ func memstatNamespace(s string) string {
// Describe returns all descriptions of the collector.
func (c *goCollector) Describe(ch chan<- *Desc) {
- ch <- c.goroutines.Desc()
+ ch <- c.goroutinesDesc
+ ch <- c.threadsDesc
ch <- c.gcDesc
-
+ ch <- c.goInfoDesc
for _, i := range c.metrics {
ch <- i.desc
}
@@ -234,8 +252,9 @@ func (c *goCollector) Describe(ch chan<- *Desc) {
// Collect returns the current state of all metrics of the collector.
func (c *goCollector) Collect(ch chan<- Metric) {
- c.goroutines.Set(float64(runtime.NumGoroutine()))
- ch <- c.goroutines
+ ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
+ n, _ := runtime.ThreadCreateProfile(nil)
+ ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
var stats debug.GCStats
stats.PauseQuantiles = make([]time.Duration, 5)
@@ -248,6 +267,8 @@ func (c *goCollector) Collect(ch chan<- Metric) {
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
+ ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
+
ms := &runtime.MemStats{}
runtime.ReadMemStats(ms)
for _, i := range c.metrics {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go
index 9a8858cbd..72264da9a 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go
@@ -29,33 +29,37 @@ func TestGoCollector(t *testing.T) {
for {
select {
- case metric := <-ch:
- switch m := metric.(type) {
- // Attention, this also catches Counter...
- case Gauge:
- pb := &dto.Metric{}
- m.Write(pb)
- if pb.GetGauge() == nil {
- continue
- }
-
- if old == -1 {
- old = int(pb.GetGauge().GetValue())
- close(waitc)
- continue
- }
+ case m := <-ch:
+ // m can be Gauge or Counter,
+ // currently just test the go_goroutines Gauge
+ // and ignore others.
+ if m.Desc().fqName != "go_goroutines" {
+ continue
+ }
+ pb := &dto.Metric{}
+ m.Write(pb)
+ if pb.GetGauge() == nil {
+ continue
+ }
- if diff := int(pb.GetGauge().GetValue()) - old; diff != 1 {
- // TODO: This is flaky in highly concurrent situations.
- t.Errorf("want 1 new goroutine, got %d", diff)
- }
+ if old == -1 {
+ old = int(pb.GetGauge().GetValue())
+ close(waitc)
+ continue
+ }
- // GoCollector performs two sends per call.
- // On line 27 we need to receive the second send
- // to shut down cleanly.
- <-ch
- return
+ if diff := int(pb.GetGauge().GetValue()) - old; diff != 1 {
+ // TODO: This is flaky in highly concurrent situations.
+ t.Errorf("want 1 new goroutine, got %d", diff)
}
+
+ // GoCollector performs three sends per call.
+ // On line 27 we need to receive three more sends
+ // to shut down cleanly.
+ <-ch
+ <-ch
+ <-ch
+ return
case <-time.After(1 * time.Second):
t.Fatalf("expected collect timed out")
}
@@ -85,37 +89,33 @@ func TestGCCollector(t *testing.T) {
for {
select {
case metric := <-ch:
- switch m := metric.(type) {
- case *constSummary, *value:
- pb := &dto.Metric{}
- m.Write(pb)
- if pb.GetSummary() == nil {
- continue
- }
-
- if len(pb.GetSummary().Quantile) != 5 {
- t.Errorf("expected 4 buckets, got %d", len(pb.GetSummary().Quantile))
- }
- for idx, want := range []float64{0.0, 0.25, 0.5, 0.75, 1.0} {
- if *pb.GetSummary().Quantile[idx].Quantile != want {
- t.Errorf("bucket #%d is off, got %f, want %f", idx, *pb.GetSummary().Quantile[idx].Quantile, want)
- }
- }
- if first {
- first = false
- oldGC = *pb.GetSummary().SampleCount
- oldPause = *pb.GetSummary().SampleSum
- close(waitc)
- continue
- }
- if diff := *pb.GetSummary().SampleCount - oldGC; diff != 1 {
- t.Errorf("want 1 new garbage collection run, got %d", diff)
- }
- if diff := *pb.GetSummary().SampleSum - oldPause; diff <= 0 {
- t.Errorf("want moar pause, got %f", diff)
+ pb := &dto.Metric{}
+ metric.Write(pb)
+ if pb.GetSummary() == nil {
+ continue
+ }
+ if len(pb.GetSummary().Quantile) != 5 {
+ t.Errorf("expected 4 buckets, got %d", len(pb.GetSummary().Quantile))
+ }
+ for idx, want := range []float64{0.0, 0.25, 0.5, 0.75, 1.0} {
+ if *pb.GetSummary().Quantile[idx].Quantile != want {
+ t.Errorf("bucket #%d is off, got %f, want %f", idx, *pb.GetSummary().Quantile[idx].Quantile, want)
}
- return
}
+ if first {
+ first = false
+ oldGC = *pb.GetSummary().SampleCount
+ oldPause = *pb.GetSummary().SampleSum
+ close(waitc)
+ continue
+ }
+ if diff := *pb.GetSummary().SampleCount - oldGC; diff != 1 {
+ t.Errorf("want 1 new garbage collection run, got %d", diff)
+ }
+ if diff := *pb.GetSummary().SampleSum - oldPause; diff <= 0 {
+ t.Errorf("want moar pause, got %f", diff)
+ }
+ return
case <-time.After(1 * time.Second):
t.Fatalf("expected collect timed out")
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/graphite/bridge.go b/vendor/github.com/prometheus/client_golang/prometheus/graphite/bridge.go
new file mode 100644
index 000000000..11533374b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/graphite/bridge.go
@@ -0,0 +1,280 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package graphite provides a bridge to push Prometheus metrics to a Graphite
+// server.
+package graphite
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "sort"
+ "time"
+
+ "github.com/prometheus/common/expfmt"
+ "github.com/prometheus/common/model"
+ "golang.org/x/net/context"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+ defaultInterval = 15 * time.Second
+ millisecondsPerSecond = 1000
+)
+
+// HandlerErrorHandling defines how a Handler serving metrics will handle
+// errors.
+type HandlerErrorHandling int
+
+// These constants cause handlers serving metrics to behave as described if
+// errors are encountered.
+const (
+ // Ignore errors and try to push as many metrics to Graphite as possible.
+ ContinueOnError HandlerErrorHandling = iota
+
+ // Abort the push to Graphite upon the first error encountered.
+ AbortOnError
+)
+
+// Config defines the Graphite bridge config.
+type Config struct {
+ // The url to push data to. Required.
+ URL string
+
+ // The prefix for the pushed Graphite metrics. Defaults to empty string.
+ Prefix string
+
+ // The interval to use for pushing data to Graphite. Defaults to 15 seconds.
+ Interval time.Duration
+
+ // The timeout for pushing metrics to Graphite. Defaults to 15 seconds.
+ Timeout time.Duration
+
+ // The Gatherer to use for metrics. Defaults to prometheus.DefaultGatherer.
+ Gatherer prometheus.Gatherer
+
+ // The logger that messages are written to. Defaults to no logging.
+ Logger Logger
+
+ // ErrorHandling defines how errors are handled. Note that errors are
+ // logged regardless of the configured ErrorHandling provided Logger
+ // is not nil.
+ ErrorHandling HandlerErrorHandling
+}
+
+// Bridge pushes metrics to the configured Graphite server.
+type Bridge struct {
+ url string
+ prefix string
+ interval time.Duration
+ timeout time.Duration
+
+ errorHandling HandlerErrorHandling
+ logger Logger
+
+ g prometheus.Gatherer
+}
+
+// Logger is the minimal interface Bridge needs for logging. Note that
+// log.Logger from the standard library implements this interface, and it is
+// easy to implement by custom loggers, if they don't do so already anyway.
+type Logger interface {
+ Println(v ...interface{})
+}
+
+// NewBridge returns a pointer to a new Bridge struct.
+func NewBridge(c *Config) (*Bridge, error) {
+ b := &Bridge{}
+
+ if c.URL == "" {
+ return nil, errors.New("missing URL")
+ }
+ b.url = c.URL
+
+ if c.Gatherer == nil {
+ b.g = prometheus.DefaultGatherer
+ } else {
+ b.g = c.Gatherer
+ }
+
+ if c.Logger != nil {
+ b.logger = c.Logger
+ }
+
+ if c.Prefix != "" {
+ b.prefix = c.Prefix
+ }
+
+ var z time.Duration
+ if c.Interval == z {
+ b.interval = defaultInterval
+ } else {
+ b.interval = c.Interval
+ }
+
+ if c.Timeout == z {
+ b.timeout = defaultInterval
+ } else {
+ b.timeout = c.Timeout
+ }
+
+ b.errorHandling = c.ErrorHandling
+
+ return b, nil
+}
+
+// Run starts the event loop that pushes Prometheus metrics to Graphite at the
+// configured interval.
+func (b *Bridge) Run(ctx context.Context) {
+ ticker := time.NewTicker(b.interval)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ if err := b.Push(); err != nil && b.logger != nil {
+ b.logger.Println("error pushing to Graphite:", err)
+ }
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+// Push pushes Prometheus metrics to the configured Graphite server.
+func (b *Bridge) Push() error {
+ mfs, err := b.g.Gather()
+ if err != nil || len(mfs) == 0 {
+ switch b.errorHandling {
+ case AbortOnError:
+ return err
+ case ContinueOnError:
+ if b.logger != nil {
+ b.logger.Println("continue on error:", err)
+ }
+ default:
+ panic("unrecognized error handling value")
+ }
+ }
+
+ conn, err := net.DialTimeout("tcp", b.url, b.timeout)
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+
+ return writeMetrics(conn, mfs, b.prefix, model.Now())
+}
+
+func writeMetrics(w io.Writer, mfs []*dto.MetricFamily, prefix string, now model.Time) error {
+ vec, err := expfmt.ExtractSamples(&expfmt.DecodeOptions{
+ Timestamp: now,
+ }, mfs...)
+ if err != nil {
+ return err
+ }
+
+ buf := bufio.NewWriter(w)
+ for _, s := range vec {
+ if err := writeSanitized(buf, prefix); err != nil {
+ return err
+ }
+ if err := buf.WriteByte('.'); err != nil {
+ return err
+ }
+ if err := writeMetric(buf, s.Metric); err != nil {
+ return err
+ }
+ if _, err := fmt.Fprintf(buf, " %g %d\n", s.Value, int64(s.Timestamp)/millisecondsPerSecond); err != nil {
+ return err
+ }
+ if err := buf.Flush(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func writeMetric(buf *bufio.Writer, m model.Metric) error {
+ metricName, hasName := m[model.MetricNameLabel]
+ numLabels := len(m) - 1
+ if !hasName {
+ numLabels = len(m)
+ }
+
+ labelStrings := make([]string, 0, numLabels)
+ for label, value := range m {
+ if label != model.MetricNameLabel {
+ labelStrings = append(labelStrings, fmt.Sprintf("%s %s", string(label), string(value)))
+ }
+ }
+
+ var err error
+ switch numLabels {
+ case 0:
+ if hasName {
+ return writeSanitized(buf, string(metricName))
+ }
+ default:
+ sort.Strings(labelStrings)
+ if err = writeSanitized(buf, string(metricName)); err != nil {
+ return err
+ }
+ for _, s := range labelStrings {
+ if err = buf.WriteByte('.'); err != nil {
+ return err
+ }
+ if err = writeSanitized(buf, s); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func writeSanitized(buf *bufio.Writer, s string) error {
+ prevUnderscore := false
+
+ for _, c := range s {
+ c = replaceInvalidRune(c)
+ if c == '_' {
+ if prevUnderscore {
+ continue
+ }
+ prevUnderscore = true
+ } else {
+ prevUnderscore = false
+ }
+ if _, err := buf.WriteRune(c); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func replaceInvalidRune(c rune) rune {
+ if c == ' ' {
+ return '.'
+ }
+ if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c == ':' || (c >= '0' && c <= '9')) {
+ return '_'
+ }
+ return c
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/graphite/bridge_test.go b/vendor/github.com/prometheus/client_golang/prometheus/graphite/bridge_test.go
new file mode 100644
index 000000000..c2b274c6a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/graphite/bridge_test.go
@@ -0,0 +1,309 @@
+package graphite
+
+import (
+ "bufio"
+ "bytes"
+ "io"
+ "log"
+ "net"
+ "os"
+ "regexp"
+ "testing"
+ "time"
+
+ "github.com/prometheus/common/model"
+ "golang.org/x/net/context"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+func TestSanitize(t *testing.T) {
+ testCases := []struct {
+ in, out string
+ }{
+ {in: "hello", out: "hello"},
+ {in: "hE/l1o", out: "hE_l1o"},
+ {in: "he,*ll(.o", out: "he_ll_o"},
+ {in: "hello_there%^&", out: "hello_there_"},
+ }
+
+ var buf bytes.Buffer
+ w := bufio.NewWriter(&buf)
+
+ for i, tc := range testCases {
+ if err := writeSanitized(w, tc.in); err != nil {
+ t.Fatalf("write failed: %v", err)
+ }
+ if err := w.Flush(); err != nil {
+ t.Fatalf("flush failed: %v", err)
+ }
+
+ if want, got := tc.out, buf.String(); want != got {
+ t.Fatalf("test case index %d: got sanitized string %s, want %s", i, got, want)
+ }
+
+ buf.Reset()
+ }
+}
+
+func TestWriteSummary(t *testing.T) {
+ sumVec := prometheus.NewSummaryVec(
+ prometheus.SummaryOpts{
+ Name: "name",
+ Help: "docstring",
+ ConstLabels: prometheus.Labels{"constname": "constvalue"},
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+ },
+ []string{"labelname"},
+ )
+
+ sumVec.WithLabelValues("val1").Observe(float64(10))
+ sumVec.WithLabelValues("val1").Observe(float64(20))
+ sumVec.WithLabelValues("val1").Observe(float64(30))
+ sumVec.WithLabelValues("val2").Observe(float64(20))
+ sumVec.WithLabelValues("val2").Observe(float64(30))
+ sumVec.WithLabelValues("val2").Observe(float64(40))
+
+ reg := prometheus.NewRegistry()
+ reg.MustRegister(sumVec)
+
+ mfs, err := reg.Gather()
+ if err != nil {
+ t.Fatalf("error: %v", err)
+ }
+
+ now := model.Time(1477043083)
+ var buf bytes.Buffer
+ err = writeMetrics(&buf, mfs, "prefix", now)
+ if err != nil {
+ t.Fatalf("error: %v", err)
+ }
+
+ want := `prefix.name.constname.constvalue.labelname.val1.quantile.0_5 20 1477043
+prefix.name.constname.constvalue.labelname.val1.quantile.0_9 30 1477043
+prefix.name.constname.constvalue.labelname.val1.quantile.0_99 30 1477043
+prefix.name_sum.constname.constvalue.labelname.val1 60 1477043
+prefix.name_count.constname.constvalue.labelname.val1 3 1477043
+prefix.name.constname.constvalue.labelname.val2.quantile.0_5 30 1477043
+prefix.name.constname.constvalue.labelname.val2.quantile.0_9 40 1477043
+prefix.name.constname.constvalue.labelname.val2.quantile.0_99 40 1477043
+prefix.name_sum.constname.constvalue.labelname.val2 90 1477043
+prefix.name_count.constname.constvalue.labelname.val2 3 1477043
+`
+
+ if got := buf.String(); want != got {
+ t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
+ }
+}
+
+func TestWriteHistogram(t *testing.T) {
+ histVec := prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: "name",
+ Help: "docstring",
+ ConstLabels: prometheus.Labels{"constname": "constvalue"},
+ Buckets: []float64{0.01, 0.02, 0.05, 0.1},
+ },
+ []string{"labelname"},
+ )
+
+ histVec.WithLabelValues("val1").Observe(float64(10))
+ histVec.WithLabelValues("val1").Observe(float64(20))
+ histVec.WithLabelValues("val1").Observe(float64(30))
+ histVec.WithLabelValues("val2").Observe(float64(20))
+ histVec.WithLabelValues("val2").Observe(float64(30))
+ histVec.WithLabelValues("val2").Observe(float64(40))
+
+ reg := prometheus.NewRegistry()
+ reg.MustRegister(histVec)
+
+ mfs, err := reg.Gather()
+ if err != nil {
+ t.Fatalf("error: %v", err)
+ }
+
+ now := model.Time(1477043083)
+ var buf bytes.Buffer
+ err = writeMetrics(&buf, mfs, "prefix", now)
+ if err != nil {
+ t.Fatalf("error: %v", err)
+ }
+
+ want := `prefix.name_bucket.constname.constvalue.labelname.val1.le.0_01 0 1477043
+prefix.name_bucket.constname.constvalue.labelname.val1.le.0_02 0 1477043
+prefix.name_bucket.constname.constvalue.labelname.val1.le.0_05 0 1477043
+prefix.name_bucket.constname.constvalue.labelname.val1.le.0_1 0 1477043
+prefix.name_sum.constname.constvalue.labelname.val1 60 1477043
+prefix.name_count.constname.constvalue.labelname.val1 3 1477043
+prefix.name_bucket.constname.constvalue.labelname.val1.le._Inf 3 1477043
+prefix.name_bucket.constname.constvalue.labelname.val2.le.0_01 0 1477043
+prefix.name_bucket.constname.constvalue.labelname.val2.le.0_02 0 1477043
+prefix.name_bucket.constname.constvalue.labelname.val2.le.0_05 0 1477043
+prefix.name_bucket.constname.constvalue.labelname.val2.le.0_1 0 1477043
+prefix.name_sum.constname.constvalue.labelname.val2 90 1477043
+prefix.name_count.constname.constvalue.labelname.val2 3 1477043
+prefix.name_bucket.constname.constvalue.labelname.val2.le._Inf 3 1477043
+`
+ if got := buf.String(); want != got {
+ t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
+ }
+}
+
+func TestToReader(t *testing.T) {
+ cntVec := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "name",
+ Help: "docstring",
+ ConstLabels: prometheus.Labels{"constname": "constvalue"},
+ },
+ []string{"labelname"},
+ )
+ cntVec.WithLabelValues("val1").Inc()
+ cntVec.WithLabelValues("val2").Inc()
+
+ reg := prometheus.NewRegistry()
+ reg.MustRegister(cntVec)
+
+ want := `prefix.name.constname.constvalue.labelname.val1 1 1477043
+prefix.name.constname.constvalue.labelname.val2 1 1477043
+`
+ mfs, err := reg.Gather()
+ if err != nil {
+ t.Fatalf("error: %v", err)
+ }
+
+ now := model.Time(1477043083)
+ var buf bytes.Buffer
+ err = writeMetrics(&buf, mfs, "prefix", now)
+ if err != nil {
+ t.Fatalf("error: %v", err)
+ }
+
+ if got := buf.String(); want != got {
+ t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
+ }
+}
+
+func TestPush(t *testing.T) {
+ reg := prometheus.NewRegistry()
+ cntVec := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "name",
+ Help: "docstring",
+ ConstLabels: prometheus.Labels{"constname": "constvalue"},
+ },
+ []string{"labelname"},
+ )
+ cntVec.WithLabelValues("val1").Inc()
+ cntVec.WithLabelValues("val2").Inc()
+ reg.MustRegister(cntVec)
+
+ host := "localhost"
+ port := ":56789"
+ b, err := NewBridge(&Config{
+ URL: host + port,
+ Gatherer: reg,
+ Prefix: "prefix",
+ })
+ if err != nil {
+ t.Fatalf("error creating bridge: %v", err)
+ }
+
+ nmg, err := newMockGraphite(port)
+ if err != nil {
+ t.Fatalf("error creating mock graphite: %v", err)
+ }
+ defer nmg.Close()
+
+ err = b.Push()
+ if err != nil {
+ t.Fatalf("error pushing: %v", err)
+ }
+
+ wants := []string{
+ "prefix.name.constname.constvalue.labelname.val1 1",
+ "prefix.name.constname.constvalue.labelname.val2 1",
+ }
+
+ select {
+ case got := <-nmg.readc:
+ for _, want := range wants {
+ matched, err := regexp.MatchString(want, got)
+ if err != nil {
+ t.Fatalf("error pushing: %v", err)
+ }
+ if !matched {
+ t.Fatalf("missing metric:\nno match for %s received by server:\n%s", want, got)
+ }
+ }
+ return
+ case err := <-nmg.errc:
+ t.Fatalf("error reading push: %v", err)
+ case <-time.After(50 * time.Millisecond):
+ t.Fatalf("no result from graphite server")
+ }
+}
+
+func newMockGraphite(port string) (*mockGraphite, error) {
+ readc := make(chan string)
+ errc := make(chan error)
+ ln, err := net.Listen("tcp", port)
+ if err != nil {
+ return nil, err
+ }
+
+ go func() {
+ conn, err := ln.Accept()
+ if err != nil {
+ errc <- err
+ }
+ var b bytes.Buffer
+ io.Copy(&b, conn)
+ readc <- b.String()
+ }()
+
+ return &mockGraphite{
+ readc: readc,
+ errc: errc,
+ Listener: ln,
+ }, nil
+}
+
+type mockGraphite struct {
+ readc chan string
+ errc chan error
+
+ net.Listener
+}
+
+func ExampleBridge() {
+ b, err := NewBridge(&Config{
+ URL: "graphite.example.org:3099",
+ Gatherer: prometheus.DefaultGatherer,
+ Prefix: "prefix",
+ Interval: 15 * time.Second,
+ Timeout: 10 * time.Second,
+ ErrorHandling: AbortOnError,
+ Logger: log.New(os.Stdout, "graphite bridge: ", log.Lshortfile),
+ })
+ if err != nil {
+ panic(err)
+ }
+
+ go func() {
+ // Start something in a goroutine that uses metrics.
+ }()
+
+ // Push initial metrics to Graphite. Fail fast if the push fails.
+ if err := b.Push(); err != nil {
+ panic(err)
+ }
+
+ // Create a Context to control stopping the Run() loop that pushes
+ // metrics to Graphite.
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ // Start pushing metrics to Graphite in the Run() loop.
+ b.Run(ctx)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index 9719e8fac..331783a75 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -126,23 +126,16 @@ type HistogramOpts struct {
// string.
Help string
- // ConstLabels are used to attach fixed labels to this
- // Histogram. Histograms with the same fully-qualified name must have the
- // same label names in their ConstLabels.
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
//
- // Note that in most cases, labels have a value that varies during the
- // lifetime of a process. Those labels are usually managed with a
- // HistogramVec. ConstLabels serve only special purposes. One is for the
- // special case where the value of a label does not change during the
- // lifetime of a process, e.g. if the revision of the running binary is
- // put into a label. Another, more advanced purpose is if more than one
- // Collector needs to collect Histograms with the same fully-qualified
- // name. In that case, those Summaries must differ in the values of
- // their ConstLabels. See the Collector examples.
- //
- // If the value of a label never changes (not even between binaries),
- // that label most likely should not be a label at all (but part of the
- // metric name).
+ // ConstLabels are only used rarely. In particular, do not use them to
+ // attach the same labels to all your metrics. Those use cases are
+ // better covered by target labels set by the scraping Prometheus
+ // server, or by one specific metric (e.g. a build_info or a
+ // machine_role metric). See also
+ // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
ConstLabels Labels
// Buckets defines the buckets into which observations are counted. Each
@@ -287,12 +280,11 @@ func (h *histogram) Write(out *dto.Metric) error {
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewHistogramVec.
type HistogramVec struct {
- *MetricVec
+ *metricVec
}
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
+// partitioned by the given label names.
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@@ -301,47 +293,116 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
opts.ConstLabels,
)
return &HistogramVec{
- MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ metricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...)
}),
}
}
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns a Histogram and not a
-// Metric so that no type conversion is required.
-func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
- metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+// GetMetricWithLabelValues returns the Histogram for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Histogram is created.
+//
+// It is possible to call this method without using the returned Histogram to only
+// create the new Histogram but leave it at its starting value, a Histogram without
+// any observations.
+//
+// Keeping the Histogram for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Histogram from the HistogramVec. In that case, the
+// Histogram will still exist, but it will not be exported anymore, even if a
+// Histogram with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
+ metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil {
- return metric.(Histogram), err
+ return metric.(Observer), err
}
return nil, err
}
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns a Histogram and not a Metric so that no
-// type conversion is required.
-func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
- metric, err := m.MetricVec.GetMetricWith(labels)
+// GetMetricWith returns the Histogram for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Histogram is created. Implications of
+// creating a Histogram without using it and keeping the Histogram for later use
+// are the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
+ metric, err := v.metricVec.getMetricWith(labels)
if metric != nil {
- return metric.(Histogram), err
+ return metric.(Observer), err
}
return nil, err
}
// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. By not returning an
-// error, WithLabelValues allows shortcuts like
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
// myVec.WithLabelValues("404", "GET").Observe(42.21)
-func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
- return m.MetricVec.WithLabelValues(lvs...).(Histogram)
+func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
+ h, err := v.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return h
+}
+
+// With works as GetMetricWith but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (v *HistogramVec) With(labels Labels) Observer {
+ h, err := v.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return h
}
-// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. By not returning an error, With allows shortcuts like
-// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
-func (m *HistogramVec) With(labels Labels) Histogram {
- return m.MetricVec.With(labels).(Histogram)
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the HistogramVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
+ vec, err := v.curryWith(labels)
+ if vec != nil {
+ return &HistogramVec{vec}, err
+ }
+ return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec {
+ vec, err := v.CurryWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return vec
}
type constHistogram struct {
@@ -401,8 +462,8 @@ func NewConstHistogram(
buckets map[float64]uint64,
labelValues ...string,
) (Metric, error) {
- if len(desc.variableLabels) != len(labelValues) {
- return nil, errInconsistentCardinality
+ if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ return nil, err
}
return &constHistogram{
desc: desc,
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go
index d1242e08d..5a20f4b6b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go
@@ -119,6 +119,28 @@ func BenchmarkHistogramWrite8(b *testing.B) {
benchmarkHistogramWrite(8, b)
}
+func TestHistogramNonMonotonicBuckets(t *testing.T) {
+ testCases := map[string][]float64{
+ "not strictly monotonic": {1, 2, 2, 3},
+ "not monotonic at all": {1, 2, 4, 3, 5},
+ "have +Inf in the middle": {1, 2, math.Inf(+1), 3},
+ }
+ for name, buckets := range testCases {
+ func() {
+ defer func() {
+ if r := recover(); r == nil {
+ t.Errorf("Buckets %v are %s but NewHistogram did not panic.", buckets, name)
+ }
+ }()
+ _ = NewHistogram(HistogramOpts{
+ Name: "test_histogram",
+ Help: "helpless",
+ Buckets: buckets,
+ })
+ }()
+ }
+}
+
// Intentionally adding +Inf here to test if that case is handled correctly.
// Also, getCumulativeCounts depends on it.
var testBuckets = []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)}
@@ -264,7 +286,7 @@ func TestHistogramVecConcurrency(t *testing.T) {
for i := 0; i < vecLength; i++ {
m := &dto.Metric{}
s := his.WithLabelValues(string('A' + i))
- s.Write(m)
+ s.(Histogram).Write(m)
if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want {
t.Errorf("got %d buckets in protobuf, want %d", got, want)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go
index 67ee5ac79..bfee5c6eb 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/http.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go
@@ -62,7 +62,8 @@ func giveBuf(buf *bytes.Buffer) {
//
// Deprecated: Please note the issues described in the doc comment of
// InstrumentHandler. You might want to consider using promhttp.Handler instead
-// (which is non instrumented).
+// (which is not instrumented, but can be instrumented with the tooling provided
+// in package promhttp).
func Handler() http.Handler {
return InstrumentHandler("prometheus", UninstrumentedHandler())
}
@@ -95,7 +96,7 @@ func UninstrumentedHandler() http.Handler {
closer.Close()
}
if lastErr != nil && buf.Len() == 0 {
- http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError)
return
}
header := w.Header()
@@ -158,7 +159,8 @@ func nowSeries(t ...time.Time) nower {
// value. http_requests_total is a metric vector partitioned by HTTP method
// (label name "method") and HTTP status code (label name "code").
//
-// Deprecated: InstrumentHandler has several issues:
+// Deprecated: InstrumentHandler has several issues. Use the tooling provided in
+// package promhttp instead. The issues are the following:
//
// - It uses Summaries rather than Histograms. Summaries are not useful if
// aggregation across multiple instances is required.
@@ -172,9 +174,8 @@ func nowSeries(t ...time.Time) nower {
// httputil.ReverseProxy is a prominent example for a handler
// performing such writes.
//
-// Upcoming versions of this package will provide ways of instrumenting HTTP
-// handlers that are more flexible and have fewer issues. Please prefer direct
-// instrumentation in the meantime.
+// - It has additional issues with HTTP/2, cf.
+// https://github.com/prometheus/client_golang/issues/272.
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
}
@@ -184,12 +185,13 @@ func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFun
// issues).
//
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
-// InstrumentHandler is.
+// InstrumentHandler is. Use the tooling provided in package promhttp instead.
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts(
SummaryOpts{
Subsystem: "http",
ConstLabels: Labels{"handler": handlerName},
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
handlerFunc,
)
@@ -222,7 +224,7 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri
// SummaryOpts.
//
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
-// InstrumentHandler is.
+// InstrumentHandler is. Use the tooling provided in package promhttp instead.
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
}
@@ -233,7 +235,7 @@ func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.Hand
// SummaryOpts are used.
//
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
-// as InstrumentHandler is.
+// as InstrumentHandler is. Use the tooling provided in package promhttp instead.
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
reqCnt := NewCounterVec(
CounterOpts{
@@ -245,34 +247,52 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
},
instLabels,
)
+ if err := Register(reqCnt); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqCnt = are.ExistingCollector.(*CounterVec)
+ } else {
+ panic(err)
+ }
+ }
opts.Name = "request_duration_microseconds"
opts.Help = "The HTTP request latencies in microseconds."
reqDur := NewSummary(opts)
+ if err := Register(reqDur); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqDur = are.ExistingCollector.(Summary)
+ } else {
+ panic(err)
+ }
+ }
opts.Name = "request_size_bytes"
opts.Help = "The HTTP request sizes in bytes."
reqSz := NewSummary(opts)
+ if err := Register(reqSz); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqSz = are.ExistingCollector.(Summary)
+ } else {
+ panic(err)
+ }
+ }
opts.Name = "response_size_bytes"
opts.Help = "The HTTP response sizes in bytes."
resSz := NewSummary(opts)
-
- regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec)
- regReqDur := MustRegisterOrGet(reqDur).(Summary)
- regReqSz := MustRegisterOrGet(reqSz).(Summary)
- regResSz := MustRegisterOrGet(resSz).(Summary)
+ if err := Register(resSz); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ resSz = are.ExistingCollector.(Summary)
+ } else {
+ panic(err)
+ }
+ }
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
delegate := &responseWriterDelegator{ResponseWriter: w}
- out := make(chan int)
- urlLen := 0
- if r.URL != nil {
- urlLen = len(r.URL.String())
- }
- go computeApproximateRequestSize(r, out, urlLen)
+ out := computeApproximateRequestSize(r)
_, cn := w.(http.CloseNotifier)
_, fl := w.(http.Flusher)
@@ -290,30 +310,44 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
method := sanitizeMethod(r.Method)
code := sanitizeCode(delegate.status)
- regReqCnt.WithLabelValues(method, code).Inc()
- regReqDur.Observe(elapsed)
- regResSz.Observe(float64(delegate.written))
- regReqSz.Observe(float64(<-out))
+ reqCnt.WithLabelValues(method, code).Inc()
+ reqDur.Observe(elapsed)
+ resSz.Observe(float64(delegate.written))
+ reqSz.Observe(float64(<-out))
})
}
-func computeApproximateRequestSize(r *http.Request, out chan int, s int) {
- s += len(r.Method)
- s += len(r.Proto)
- for name, values := range r.Header {
- s += len(name)
- for _, value := range values {
- s += len(value)
- }
+func computeApproximateRequestSize(r *http.Request) <-chan int {
+ // Get URL length in current go routine for avoiding a race condition.
+ // HandlerFunc that runs in parallel may modify the URL.
+ s := 0
+ if r.URL != nil {
+ s += len(r.URL.String())
}
- s += len(r.Host)
- // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+ out := make(chan int, 1)
- if r.ContentLength != -1 {
- s += int(r.ContentLength)
- }
- out <- s
+ go func() {
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ out <- s
+ close(out)
+ }()
+
+ return out
}
type responseWriterDelegator struct {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http_test.go b/vendor/github.com/prometheus/client_golang/prometheus/http_test.go
index ffe0418cf..0c7fa2347 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/http_test.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/http_test.go
@@ -44,9 +44,10 @@ func TestInstrumentHandler(t *testing.T) {
opts := SummaryOpts{
Subsystem: "http",
ConstLabels: Labels{"handler": "test-handler"},
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}
- reqCnt := MustRegisterOrGet(NewCounterVec(
+ reqCnt := NewCounterVec(
CounterOpts{
Namespace: opts.Namespace,
Subsystem: opts.Subsystem,
@@ -55,19 +56,51 @@ func TestInstrumentHandler(t *testing.T) {
ConstLabels: opts.ConstLabels,
},
instLabels,
- )).(*CounterVec)
+ )
+ err := Register(reqCnt)
+ if err == nil {
+ t.Fatal("expected reqCnt to be registered already")
+ }
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqCnt = are.ExistingCollector.(*CounterVec)
+ } else {
+ t.Fatal("unexpected registration error:", err)
+ }
opts.Name = "request_duration_microseconds"
opts.Help = "The HTTP request latencies in microseconds."
- reqDur := MustRegisterOrGet(NewSummary(opts)).(Summary)
+ reqDur := NewSummary(opts)
+ err = Register(reqDur)
+ if err == nil {
+ t.Fatal("expected reqDur to be registered already")
+ }
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqDur = are.ExistingCollector.(Summary)
+ } else {
+ t.Fatal("unexpected registration error:", err)
+ }
opts.Name = "request_size_bytes"
opts.Help = "The HTTP request sizes in bytes."
- MustRegisterOrGet(NewSummary(opts))
+ reqSz := NewSummary(opts)
+ err = Register(reqSz)
+ if err == nil {
+ t.Fatal("expected reqSz to be registered already")
+ }
+ if _, ok := err.(AlreadyRegisteredError); !ok {
+ t.Fatal("unexpected registration error:", err)
+ }
opts.Name = "response_size_bytes"
opts.Help = "The HTTP response sizes in bytes."
- MustRegisterOrGet(NewSummary(opts))
+ resSz := NewSummary(opts)
+ err = Register(resSz)
+ if err == nil {
+ t.Fatal("expected resSz to be registered already")
+ }
+ if _, ok := err.(AlreadyRegisteredError); !ok {
+ t.Fatal("unexpected registration error:", err)
+ }
reqCnt.Reset()
@@ -95,7 +128,7 @@ func TestInstrumentHandler(t *testing.T) {
}
out.Reset()
- if want, got := 1, len(reqCnt.children); want != got {
+ if want, got := 1, len(reqCnt.metricMap.metrics); want != got {
t.Errorf("want %d children in reqCnt, got %d", want, got)
}
cnt, err := reqCnt.GetMetricWithLabelValues("get", "418")
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
new file mode 100644
index 000000000..2502e3734
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -0,0 +1,57 @@
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "github.com/prometheus/common/model"
+)
+
+// Labels represents a collection of label name -> value mappings. This type is
+// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
+// metric vector Collectors, e.g.:
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// The other use-case is the specification of constant label pairs in Opts or to
+// create a Desc.
+type Labels map[string]string
+
+// reservedLabelPrefix is a prefix which is not legal in user-supplied
+// label names.
+const reservedLabelPrefix = "__"
+
+var errInconsistentCardinality = errors.New("inconsistent label cardinality")
+
+func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
+ if len(labels) != expectedNumberOfValues {
+ return errInconsistentCardinality
+ }
+
+ for name, val := range labels {
+ if !utf8.ValidString(val) {
+ return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val)
+ }
+ }
+
+ return nil
+}
+
+func validateLabelValues(vals []string, expectedNumberOfValues int) error {
+ if len(vals) != expectedNumberOfValues {
+ return errInconsistentCardinality
+ }
+
+ for _, val := range vals {
+ if !utf8.ValidString(val) {
+ return fmt.Errorf("label value %q is not valid UTF-8", val)
+ }
+ }
+
+ return nil
+}
+
+func checkLabelName(l string) bool {
+ return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
index d4063d98f..6213ee812 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -79,20 +79,12 @@ type Opts struct {
// with the same fully-qualified name must have the same label names in
// their ConstLabels.
//
- // Note that in most cases, labels have a value that varies during the
- // lifetime of a process. Those labels are usually managed with a metric
- // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels
- // serve only special purposes. One is for the special case where the
- // value of a label does not change during the lifetime of a process,
- // e.g. if the revision of the running binary is put into a
- // label. Another, more advanced purpose is if more than one Collector
- // needs to collect Metrics with the same fully-qualified name. In that
- // case, those Metrics must differ in the values of their
- // ConstLabels. See the Collector examples.
- //
- // If the value of a label never changes (not even between binaries),
- // that label most likely should not be a label at all (but part of the
- // metric name).
+ // ConstLabels are only used rarely. In particular, do not use them to
+ // attach the same labels to all your metrics. Those use cases are
+ // better covered by target labels set by the scraping Prometheus
+ // server, or by one specific metric (e.g. a build_info or a
+ // machine_role metric). See also
+ // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
ConstLabels Labels
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
new file mode 100644
index 000000000..5806cd09e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
@@ -0,0 +1,52 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Observer is the interface that wraps the Observe method, which is used by
+// Histogram and Summary to add observations.
+type Observer interface {
+ Observe(float64)
+}
+
+// The ObserverFunc type is an adapter to allow the use of ordinary
+// functions as Observers. If f is a function with the appropriate
+// signature, ObserverFunc(f) is an Observer that calls f.
+//
+// This adapter is usually used in connection with the Timer type, and there are
+// two general use cases:
+//
+// The most common one is to use a Gauge as the Observer for a Timer.
+// See the "Gauge" Timer example.
+//
+// The more advanced use case is to create a function that dynamically decides
+// which Observer to use for observing the duration. See the "Complex" Timer
+// example.
+type ObserverFunc func(float64)
+
+// Observe calls f(value). It implements Observer.
+func (f ObserverFunc) Observe(value float64) {
+ f(value)
+}
+
+// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
+type ObserverVec interface {
+ GetMetricWith(Labels) (Observer, error)
+ GetMetricWithLabelValues(lvs ...string) (Observer, error)
+ With(Labels) Observer
+ WithLabelValues(...string) Observer
+ CurryWith(Labels) (ObserverVec, error)
+ MustCurryWith(Labels) ObserverVec
+
+ Collector
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
index e31e62e78..94b2553e1 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -19,10 +19,10 @@ type processCollector struct {
pid int
collectFn func(chan<- Metric)
pidFn func() (int, error)
- cpuTotal Counter
- openFDs, maxFDs Gauge
- vsize, rss Gauge
- startTime Gauge
+ cpuTotal *Desc
+ openFDs, maxFDs *Desc
+ vsize, rss *Desc
+ startTime *Desc
}
// NewProcessCollector returns a collector which exports the current state of
@@ -44,40 +44,45 @@ func NewProcessCollectorPIDFn(
pidFn func() (int, error),
namespace string,
) Collector {
+ ns := ""
+ if len(namespace) > 0 {
+ ns = namespace + "_"
+ }
+
c := processCollector{
pidFn: pidFn,
collectFn: func(chan<- Metric) {},
- cpuTotal: NewCounter(CounterOpts{
- Namespace: namespace,
- Name: "process_cpu_seconds_total",
- Help: "Total user and system CPU time spent in seconds.",
- }),
- openFDs: NewGauge(GaugeOpts{
- Namespace: namespace,
- Name: "process_open_fds",
- Help: "Number of open file descriptors.",
- }),
- maxFDs: NewGauge(GaugeOpts{
- Namespace: namespace,
- Name: "process_max_fds",
- Help: "Maximum number of open file descriptors.",
- }),
- vsize: NewGauge(GaugeOpts{
- Namespace: namespace,
- Name: "process_virtual_memory_bytes",
- Help: "Virtual memory size in bytes.",
- }),
- rss: NewGauge(GaugeOpts{
- Namespace: namespace,
- Name: "process_resident_memory_bytes",
- Help: "Resident memory size in bytes.",
- }),
- startTime: NewGauge(GaugeOpts{
- Namespace: namespace,
- Name: "process_start_time_seconds",
- Help: "Start time of the process since unix epoch in seconds.",
- }),
+ cpuTotal: NewDesc(
+ ns+"process_cpu_seconds_total",
+ "Total user and system CPU time spent in seconds.",
+ nil, nil,
+ ),
+ openFDs: NewDesc(
+ ns+"process_open_fds",
+ "Number of open file descriptors.",
+ nil, nil,
+ ),
+ maxFDs: NewDesc(
+ ns+"process_max_fds",
+ "Maximum number of open file descriptors.",
+ nil, nil,
+ ),
+ vsize: NewDesc(
+ ns+"process_virtual_memory_bytes",
+ "Virtual memory size in bytes.",
+ nil, nil,
+ ),
+ rss: NewDesc(
+ ns+"process_resident_memory_bytes",
+ "Resident memory size in bytes.",
+ nil, nil,
+ ),
+ startTime: NewDesc(
+ ns+"process_start_time_seconds",
+ "Start time of the process since unix epoch in seconds.",
+ nil, nil,
+ ),
}
// Set up process metric collection if supported by the runtime.
@@ -90,12 +95,12 @@ func NewProcessCollectorPIDFn(
// Describe returns all descriptions of the collector.
func (c *processCollector) Describe(ch chan<- *Desc) {
- ch <- c.cpuTotal.Desc()
- ch <- c.openFDs.Desc()
- ch <- c.maxFDs.Desc()
- ch <- c.vsize.Desc()
- ch <- c.rss.Desc()
- ch <- c.startTime.Desc()
+ ch <- c.cpuTotal
+ ch <- c.openFDs
+ ch <- c.maxFDs
+ ch <- c.vsize
+ ch <- c.rss
+ ch <- c.startTime
}
// Collect returns the current state of all metrics of the collector.
@@ -117,26 +122,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
}
if stat, err := p.NewStat(); err == nil {
- c.cpuTotal.Set(stat.CPUTime())
- ch <- c.cpuTotal
- c.vsize.Set(float64(stat.VirtualMemory()))
- ch <- c.vsize
- c.rss.Set(float64(stat.ResidentMemory()))
- ch <- c.rss
-
+ ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
+ ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
+ ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
if startTime, err := stat.StartTime(); err == nil {
- c.startTime.Set(startTime)
- ch <- c.startTime
+ ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
}
}
if fds, err := p.FileDescriptorsLen(); err == nil {
- c.openFDs.Set(float64(fds))
- ch <- c.openFDs
+ ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
}
if limits, err := p.NewLimits(); err == nil {
- c.maxFDs.Set(float64(limits.OpenFiles))
- ch <- c.maxFDs
+ ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
}
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go
index d3362dae7..c7acb47fe 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go
@@ -38,18 +38,18 @@ func TestProcessCollector(t *testing.T) {
}
for _, re := range []*regexp.Regexp{
- regexp.MustCompile("process_cpu_seconds_total [0-9]"),
- regexp.MustCompile("process_max_fds [1-9]"),
- regexp.MustCompile("process_open_fds [1-9]"),
- regexp.MustCompile("process_virtual_memory_bytes [1-9]"),
- regexp.MustCompile("process_resident_memory_bytes [1-9]"),
- regexp.MustCompile("process_start_time_seconds [0-9.]{10,}"),
- regexp.MustCompile("foobar_process_cpu_seconds_total [0-9]"),
- regexp.MustCompile("foobar_process_max_fds [1-9]"),
- regexp.MustCompile("foobar_process_open_fds [1-9]"),
- regexp.MustCompile("foobar_process_virtual_memory_bytes [1-9]"),
- regexp.MustCompile("foobar_process_resident_memory_bytes [1-9]"),
- regexp.MustCompile("foobar_process_start_time_seconds [0-9.]{10,}"),
+ regexp.MustCompile("\nprocess_cpu_seconds_total [0-9]"),
+ regexp.MustCompile("\nprocess_max_fds [1-9]"),
+ regexp.MustCompile("\nprocess_open_fds [1-9]"),
+ regexp.MustCompile("\nprocess_virtual_memory_bytes [1-9]"),
+ regexp.MustCompile("\nprocess_resident_memory_bytes [1-9]"),
+ regexp.MustCompile("\nprocess_start_time_seconds [0-9.]{10,}"),
+ regexp.MustCompile("\nfoobar_process_cpu_seconds_total [0-9]"),
+ regexp.MustCompile("\nfoobar_process_max_fds [1-9]"),
+ regexp.MustCompile("\nfoobar_process_open_fds [1-9]"),
+ regexp.MustCompile("\nfoobar_process_virtual_memory_bytes [1-9]"),
+ regexp.MustCompile("\nfoobar_process_resident_memory_bytes [1-9]"),
+ regexp.MustCompile("\nfoobar_process_start_time_seconds [0-9.]{10,}"),
} {
if !re.Match(buf.Bytes()) {
t.Errorf("want body to match %s\n%s", re, buf.String())
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
new file mode 100644
index 000000000..5ee095b09
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
@@ -0,0 +1,199 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+)
+
+const (
+ closeNotifier = 1 << iota
+ flusher
+ hijacker
+ readerFrom
+ pusher
+)
+
+type delegator interface {
+ http.ResponseWriter
+
+ Status() int
+ Written() int64
+}
+
+type responseWriterDelegator struct {
+ http.ResponseWriter
+
+ handler, method string
+ status int
+ written int64
+ wroteHeader bool
+ observeWriteHeader func(int)
+}
+
+func (r *responseWriterDelegator) Status() int {
+ return r.status
+}
+
+func (r *responseWriterDelegator) Written() int64 {
+ return r.written
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+ r.status = code
+ r.wroteHeader = true
+ r.ResponseWriter.WriteHeader(code)
+ if r.observeWriteHeader != nil {
+ r.observeWriteHeader(code)
+ }
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ n, err := r.ResponseWriter.Write(b)
+ r.written += int64(n)
+ return n, err
+}
+
+type closeNotifierDelegator struct{ *responseWriterDelegator }
+type flusherDelegator struct{ *responseWriterDelegator }
+type hijackerDelegator struct{ *responseWriterDelegator }
+type readerFromDelegator struct{ *responseWriterDelegator }
+
+func (d *closeNotifierDelegator) CloseNotify() <-chan bool {
+ return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+func (d *flusherDelegator) Flush() {
+ d.ResponseWriter.(http.Flusher).Flush()
+}
+func (d *hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ return d.ResponseWriter.(http.Hijacker).Hijack()
+}
+func (d *readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
+ if !d.wroteHeader {
+ d.WriteHeader(http.StatusOK)
+ }
+ n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
+ d.written += n
+ return n, err
+}
+
+var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
+
+func init() {
+ // TODO(beorn7): Code generation would help here.
+ pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0
+ return d
+ }
+ pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
+ return closeNotifierDelegator{d}
+ }
+ pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
+ return flusherDelegator{d}
+ }
+ pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
+ return struct {
+ *responseWriterDelegator
+ http.Flusher
+ http.CloseNotifier
+ }{d, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
+ return hijackerDelegator{d}
+ }
+ pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
+ return struct {
+ *responseWriterDelegator
+ http.Hijacker
+ http.CloseNotifier
+ }{d, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
+ return struct {
+ *responseWriterDelegator
+ http.Hijacker
+ http.Flusher
+ }{d, &hijackerDelegator{d}, &flusherDelegator{d}}
+ }
+ pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
+ return struct {
+ *responseWriterDelegator
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
+ return readerFromDelegator{d}
+ }
+ pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.CloseNotifier
+ }{d, &readerFromDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Flusher
+ }{d, &readerFromDelegator{d}, &flusherDelegator{d}}
+ }
+ pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Flusher
+ http.CloseNotifier
+ }{d, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ }{d, &readerFromDelegator{d}, &hijackerDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ http.CloseNotifier
+ }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
new file mode 100644
index 000000000..f4d386f7a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
@@ -0,0 +1,181 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package promhttp
+
+import (
+ "io"
+ "net/http"
+)
+
+type pusherDelegator struct{ *responseWriterDelegator }
+
+func (d *pusherDelegator) Push(target string, opts *http.PushOptions) error {
+ return d.ResponseWriter.(http.Pusher).Push(target, opts)
+}
+
+func init() {
+ pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
+ return pusherDelegator{d}
+ }
+ pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Flusher
+ }{d, &pusherDelegator{d}, &flusherDelegator{d}}
+ }
+ pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Flusher
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ }{d, &pusherDelegator{d}, &hijackerDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ http.Flusher
+ }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Flusher
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Flusher
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+}
+
+func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
+ d := &responseWriterDelegator{
+ ResponseWriter: w,
+ observeWriteHeader: observeWriteHeaderFunc,
+ }
+
+ id := 0
+ if _, ok := w.(http.CloseNotifier); ok {
+ id += closeNotifier
+ }
+ if _, ok := w.(http.Flusher); ok {
+ id += flusher
+ }
+ if _, ok := w.(http.Hijacker); ok {
+ id += hijacker
+ }
+ if _, ok := w.(io.ReaderFrom); ok {
+ id += readerFrom
+ }
+ if _, ok := w.(http.Pusher); ok {
+ id += pusher
+ }
+
+ return pickDelegator[id](d)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
new file mode 100644
index 000000000..8bb9b8b68
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
@@ -0,0 +1,44 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !go1.8
+
+package promhttp
+
+import (
+ "io"
+ "net/http"
+)
+
+func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
+ d := &responseWriterDelegator{
+ ResponseWriter: w,
+ observeWriteHeader: observeWriteHeaderFunc,
+ }
+
+ id := 0
+ if _, ok := w.(http.CloseNotifier); ok {
+ id += closeNotifier
+ }
+ if _, ok := w.(http.Flusher); ok {
+ id += flusher
+ }
+ if _, ok := w.(http.Hijacker); ok {
+ id += hijacker
+ }
+ if _, ok := w.(io.ReaderFrom); ok {
+ id += readerFrom
+ }
+
+ return pickDelegator[id](d)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
index b6dd5a266..2d67f2496 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -11,21 +11,24 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Copyright (c) 2013, The Prometheus Authors
-// All rights reserved.
+// Package promhttp provides tooling around HTTP servers and clients.
//
-// Use of this source code is governed by a BSD-style license that can be found
-// in the LICENSE file.
-
-// Package promhttp contains functions to create http.Handler instances to
-// expose Prometheus metrics via HTTP. In later versions of this package, it
-// will also contain tooling to instrument instances of http.Handler and
-// http.RoundTripper.
+// First, the package allows the creation of http.Handler instances to expose
+// Prometheus metrics via HTTP. promhttp.Handler acts on the
+// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
+// custom registry or anything that implements the Gatherer interface. It also
+// allows the creation of handlers that act differently on errors or allow to
+// log errors.
+//
+// Second, the package provides tooling to instrument instances of http.Handler
+// via middleware. Middleware wrappers follow the naming scheme
+// InstrumentHandlerX, where X describes the intended use of the middleware.
+// See each function's doc comment for specific details.
//
-// promhttp.Handler acts on the prometheus.DefaultGatherer. With HandlerFor,
-// you can create a handler for a custom registry or anything that implements
-// the Gatherer interface. It also allows to create handlers that act
-// differently on errors or allow to log errors.
+// Finally, the package allows for an http.RoundTripper to be instrumented via
+// middleware. Middleware wrappers follow the naming scheme
+// InstrumentRoundTripperX, where X describes the intended use of the
+// middleware. See each function's doc comment for specific details.
package promhttp
import (
@@ -125,7 +128,7 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
closer.Close()
}
if lastErr != nil && buf.Len() == 0 {
- http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError)
return
}
header := w.Header()
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go
index d4a7d4a7b..413ff7baa 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go
@@ -11,12 +11,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Copyright (c) 2013, The Prometheus Authors
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be found
-// in the LICENSE file.
-
package promhttp
import (
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
new file mode 100644
index 000000000..86fd56447
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
@@ -0,0 +1,97 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// The RoundTripperFunc type is an adapter to allow the use of ordinary
+// functions as RoundTrippers. If f is a function with the appropriate
+// signature, RountTripperFunc(f) is a RoundTripper that calls f.
+type RoundTripperFunc func(req *http.Request) (*http.Response, error)
+
+// RoundTrip implements the RoundTripper interface.
+func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
+ return rt(r)
+}
+
+// InstrumentRoundTripperInFlight is a middleware that wraps the provided
+// http.RoundTripper. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.RoundTripper.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ gauge.Inc()
+ defer gauge.Dec()
+ return next.RoundTrip(r)
+ })
+}
+
+// InstrumentRoundTripperCounter is a middleware that wraps the provided
+// http.RoundTripper to observe the request result with the provided CounterVec.
+// The CounterVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. Partitioning of the CounterVec happens by HTTP status code
+// and/or HTTP method if the respective instance label names are present in the
+// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
+//
+// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
+// is not incremented.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {
+ code, method := checkLabels(counter)
+
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ resp, err := next.RoundTrip(r)
+ if err == nil {
+ counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()
+ }
+ return resp, err
+ })
+}
+
+// InstrumentRoundTripperDuration is a middleware that wraps the provided
+// http.RoundTripper to observe the request duration with the provided
+// ObserverVec. The ObserverVec must have zero, one, or two non-const
+// non-curried labels. For those, the only allowed label names are "code" and
+// "method". The function panics otherwise. The Observe method of the Observer
+// in the ObserverVec is called with the request duration in
+// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
+// respective instance label names are present in the ObserverVec. For
+// unpartitioned observations, use an ObserverVec with zero labels. Note that
+// partitioning of Histograms is expensive and should be used judiciously.
+//
+// If the wrapped RoundTripper panics or returns a non-nil error, no values are
+// reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {
+ code, method := checkLabels(obs)
+
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ start := time.Now()
+ resp, err := next.RoundTrip(r)
+ if err == nil {
+ obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())
+ }
+ return resp, err
+ })
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
new file mode 100644
index 000000000..0bd80c355
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
@@ -0,0 +1,144 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package promhttp
+
+import (
+ "context"
+ "crypto/tls"
+ "net/http"
+ "net/http/httptrace"
+ "time"
+)
+
+// InstrumentTrace is used to offer flexibility in instrumenting the available
+// httptrace.ClientTrace hook functions. Each function is passed a float64
+// representing the time in seconds since the start of the http request. A user
+// may choose to use separately buckets Histograms, or implement custom
+// instance labels on a per function basis.
+type InstrumentTrace struct {
+ GotConn func(float64)
+ PutIdleConn func(float64)
+ GotFirstResponseByte func(float64)
+ Got100Continue func(float64)
+ DNSStart func(float64)
+ DNSDone func(float64)
+ ConnectStart func(float64)
+ ConnectDone func(float64)
+ TLSHandshakeStart func(float64)
+ TLSHandshakeDone func(float64)
+ WroteHeaders func(float64)
+ Wait100Continue func(float64)
+ WroteRequest func(float64)
+}
+
+// InstrumentRoundTripperTrace is a middleware that wraps the provided
+// RoundTripper and reports times to hook functions provided in the
+// InstrumentTrace struct. Hook functions that are not present in the provided
+// InstrumentTrace struct are ignored. Times reported to the hook functions are
+// time since the start of the request. Only with Go1.9+, those times are
+// guaranteed to never be negative. (Earlier Go versions are not using a
+// monotonic clock.) Note that partitioning of Histograms is expensive and
+// should be used judiciously.
+//
+// For hook functions that receive an error as an argument, no observations are
+// made in the event of a non-nil error value.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ start := time.Now()
+
+ trace := &httptrace.ClientTrace{
+ GotConn: func(_ httptrace.GotConnInfo) {
+ if it.GotConn != nil {
+ it.GotConn(time.Since(start).Seconds())
+ }
+ },
+ PutIdleConn: func(err error) {
+ if err != nil {
+ return
+ }
+ if it.PutIdleConn != nil {
+ it.PutIdleConn(time.Since(start).Seconds())
+ }
+ },
+ DNSStart: func(_ httptrace.DNSStartInfo) {
+ if it.DNSStart != nil {
+ it.DNSStart(time.Since(start).Seconds())
+ }
+ },
+ DNSDone: func(_ httptrace.DNSDoneInfo) {
+ if it.DNSStart != nil {
+ it.DNSStart(time.Since(start).Seconds())
+ }
+ },
+ ConnectStart: func(_, _ string) {
+ if it.ConnectStart != nil {
+ it.ConnectStart(time.Since(start).Seconds())
+ }
+ },
+ ConnectDone: func(_, _ string, err error) {
+ if err != nil {
+ return
+ }
+ if it.ConnectDone != nil {
+ it.ConnectDone(time.Since(start).Seconds())
+ }
+ },
+ GotFirstResponseByte: func() {
+ if it.GotFirstResponseByte != nil {
+ it.GotFirstResponseByte(time.Since(start).Seconds())
+ }
+ },
+ Got100Continue: func() {
+ if it.Got100Continue != nil {
+ it.Got100Continue(time.Since(start).Seconds())
+ }
+ },
+ TLSHandshakeStart: func() {
+ if it.TLSHandshakeStart != nil {
+ it.TLSHandshakeStart(time.Since(start).Seconds())
+ }
+ },
+ TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
+ if err != nil {
+ return
+ }
+ if it.TLSHandshakeDone != nil {
+ it.TLSHandshakeDone(time.Since(start).Seconds())
+ }
+ },
+ WroteHeaders: func() {
+ if it.WroteHeaders != nil {
+ it.WroteHeaders(time.Since(start).Seconds())
+ }
+ },
+ Wait100Continue: func() {
+ if it.Wait100Continue != nil {
+ it.Wait100Continue(time.Since(start).Seconds())
+ }
+ },
+ WroteRequest: func(_ httptrace.WroteRequestInfo) {
+ if it.WroteRequest != nil {
+ it.WroteRequest(time.Since(start).Seconds())
+ }
+ },
+ }
+ r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace))
+
+ return next.RoundTrip(r)
+ })
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8_test.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8_test.go
new file mode 100644
index 000000000..7e3f5229f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8_test.go
@@ -0,0 +1,195 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package promhttp
+
+import (
+ "log"
+ "net/http"
+ "testing"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+func TestClientMiddlewareAPI(t *testing.T) {
+ client := http.DefaultClient
+ client.Timeout = 1 * time.Second
+
+ reg := prometheus.NewRegistry()
+
+ inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "client_in_flight_requests",
+ Help: "A gauge of in-flight requests for the wrapped client.",
+ })
+
+ counter := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "client_api_requests_total",
+ Help: "A counter for requests from the wrapped client.",
+ },
+ []string{"code", "method"},
+ )
+
+ dnsLatencyVec := prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: "dns_duration_seconds",
+ Help: "Trace dns latency histogram.",
+ Buckets: []float64{.005, .01, .025, .05},
+ },
+ []string{"event"},
+ )
+
+ tlsLatencyVec := prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: "tls_duration_seconds",
+ Help: "Trace tls latency histogram.",
+ Buckets: []float64{.05, .1, .25, .5},
+ },
+ []string{"event"},
+ )
+
+ histVec := prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: "request_duration_seconds",
+ Help: "A histogram of request latencies.",
+ Buckets: prometheus.DefBuckets,
+ },
+ []string{"method"},
+ )
+
+ reg.MustRegister(counter, tlsLatencyVec, dnsLatencyVec, histVec, inFlightGauge)
+
+ trace := &InstrumentTrace{
+ DNSStart: func(t float64) {
+ dnsLatencyVec.WithLabelValues("dns_start")
+ },
+ DNSDone: func(t float64) {
+ dnsLatencyVec.WithLabelValues("dns_done")
+ },
+ TLSHandshakeStart: func(t float64) {
+ tlsLatencyVec.WithLabelValues("tls_handshake_start")
+ },
+ TLSHandshakeDone: func(t float64) {
+ tlsLatencyVec.WithLabelValues("tls_handshake_done")
+ },
+ }
+
+ client.Transport = InstrumentRoundTripperInFlight(inFlightGauge,
+ InstrumentRoundTripperCounter(counter,
+ InstrumentRoundTripperTrace(trace,
+ InstrumentRoundTripperDuration(histVec, http.DefaultTransport),
+ ),
+ ),
+ )
+
+ resp, err := client.Get("http://google.com")
+ if err != nil {
+ t.Fatalf("%v", err)
+ }
+ defer resp.Body.Close()
+}
+
+func ExampleInstrumentRoundTripperDuration() {
+ client := http.DefaultClient
+ client.Timeout = 1 * time.Second
+
+ inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "client_in_flight_requests",
+ Help: "A gauge of in-flight requests for the wrapped client.",
+ })
+
+ counter := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "client_api_requests_total",
+ Help: "A counter for requests from the wrapped client.",
+ },
+ []string{"code", "method"},
+ )
+
+ // dnsLatencyVec uses custom buckets based on expected dns durations.
+ // It has an instance label "event", which is set in the
+ // DNSStart and DNSDonehook functions defined in the
+ // InstrumentTrace struct below.
+ dnsLatencyVec := prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: "dns_duration_seconds",
+ Help: "Trace dns latency histogram.",
+ Buckets: []float64{.005, .01, .025, .05},
+ },
+ []string{"event"},
+ )
+
+ // tlsLatencyVec uses custom buckets based on expected tls durations.
+ // It has an instance label "event", which is set in the
+ // TLSHandshakeStart and TLSHandshakeDone hook functions defined in the
+ // InstrumentTrace struct below.
+ tlsLatencyVec := prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: "tls_duration_seconds",
+ Help: "Trace tls latency histogram.",
+ Buckets: []float64{.05, .1, .25, .5},
+ },
+ []string{"event"},
+ )
+
+ // histVec has no labels, making it a zero-dimensional ObserverVec.
+ histVec := prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: "request_duration_seconds",
+ Help: "A histogram of request latencies.",
+ Buckets: prometheus.DefBuckets,
+ },
+ []string{},
+ )
+
+ // Register all of the metrics in the standard registry.
+ prometheus.MustRegister(counter, tlsLatencyVec, dnsLatencyVec, histVec, inFlightGauge)
+
+ // Define functions for the available httptrace.ClientTrace hook
+ // functions that we want to instrument.
+ trace := &InstrumentTrace{
+ DNSStart: func(t float64) {
+ dnsLatencyVec.WithLabelValues("dns_start")
+ },
+ DNSDone: func(t float64) {
+ dnsLatencyVec.WithLabelValues("dns_done")
+ },
+ TLSHandshakeStart: func(t float64) {
+ tlsLatencyVec.WithLabelValues("tls_handshake_start")
+ },
+ TLSHandshakeDone: func(t float64) {
+ tlsLatencyVec.WithLabelValues("tls_handshake_done")
+ },
+ }
+
+ // Wrap the default RoundTripper with middleware.
+ roundTripper := InstrumentRoundTripperInFlight(inFlightGauge,
+ InstrumentRoundTripperCounter(counter,
+ InstrumentRoundTripperTrace(trace,
+ InstrumentRoundTripperDuration(histVec, http.DefaultTransport),
+ ),
+ ),
+ )
+
+ // Set the RoundTripper on our client.
+ client.Transport = roundTripper
+
+ resp, err := client.Get("http://google.com")
+ if err != nil {
+ log.Printf("error: %v", err)
+ }
+ defer resp.Body.Close()
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
new file mode 100644
index 000000000..9db243805
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
@@ -0,0 +1,447 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "errors"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// magicString is used for the hacky label test in checkLabels. Remove once fixed.
+const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
+
+// InstrumentHandlerInFlight is a middleware that wraps the provided
+// http.Handler. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.Handler.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ g.Inc()
+ defer g.Dec()
+ next.ServeHTTP(w, r)
+ })
+}
+
+// InstrumentHandlerDuration is a middleware that wraps the provided
+// http.Handler to observe the request duration with the provided ObserverVec.
+// The ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the request duration in seconds. Partitioning happens by HTTP
+// status code and/or HTTP method if the respective instance label names are
+// present in the ObserverVec. For unpartitioned observations, use an
+// ObserverVec with zero labels. Note that partitioning of Histograms is
+// expensive and should be used judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ next.ServeHTTP(w, r)
+ obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
+ })
+}
+
+// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
+// to observe the request result with the provided CounterVec. The CounterVec
+// must have zero, one, or two non-const non-curried labels. For those, the only
+// allowed label names are "code" and "method". The function panics
+// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or
+// HTTP method if the respective instance label names are present in the
+// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, the Counter is not incremented.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(counter)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+ counter.With(labels(code, method, r.Method, d.Status())).Inc()
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ next.ServeHTTP(w, r)
+ counter.With(labels(code, method, r.Method, 0)).Inc()
+ })
+}
+
+// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
+// http.Handler to observe with the provided ObserverVec the request duration
+// until the response headers are written. The ObserverVec must have zero, one,
+// or two non-const non-curried labels. For those, the only allowed label names
+// are "code" and "method". The function panics otherwise. The Observe method of
+// the Observer in the ObserverVec is called with the request duration in
+// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
+// respective instance label names are present in the ObserverVec. For
+// unpartitioned observations, use an ObserverVec with zero labels. Note that
+// partitioning of Histograms is expensive and should be used judiciously.
+//
+// If the wrapped Handler panics before calling WriteHeader, no value is
+// reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ d := newDelegator(w, func(status int) {
+ obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
+ })
+ next.ServeHTTP(d, r)
+ })
+}
+
+// InstrumentHandlerRequestSize is a middleware that wraps the provided
+// http.Handler to observe the request size with the provided ObserverVec. The
+// ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the request size in bytes. Partitioning happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present in
+// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
+// labels. Note that partitioning of Histograms is expensive and should be used
+// judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+ size := computeApproximateRequestSize(r)
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ next.ServeHTTP(w, r)
+ size := computeApproximateRequestSize(r)
+ obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
+ })
+}
+
+// InstrumentHandlerResponseSize is a middleware that wraps the provided
+// http.Handler to observe the response size with the provided ObserverVec. The
+// ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the response size in bytes. Partitioning happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present in
+// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
+// labels. Note that partitioning of Histograms is expensive and should be used
+// judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
+ code, method := checkLabels(obs)
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
+ })
+}
+
+func checkLabels(c prometheus.Collector) (code bool, method bool) {
+ // TODO(beorn7): Remove this hacky way to check for instance labels
+ // once Descriptors can have their dimensionality queried.
+ var (
+ desc *prometheus.Desc
+ m prometheus.Metric
+ pm dto.Metric
+ lvs []string
+ )
+
+ // Get the Desc from the Collector.
+ descc := make(chan *prometheus.Desc, 1)
+ c.Describe(descc)
+
+ select {
+ case desc = <-descc:
+ default:
+ panic("no description provided by collector")
+ }
+ select {
+ case <-descc:
+ panic("more than one description provided by collector")
+ default:
+ }
+
+ close(descc)
+
+ // Create a ConstMetric with the Desc. Since we don't know how many
+ // variable labels there are, try for as long as it needs.
+ for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) {
+ m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...)
+ }
+
+ // Write out the metric into a proto message and look at the labels.
+ // If the value is not the magicString, it is a constLabel, which doesn't interest us.
+ // If the label is curried, it doesn't interest us.
+ // In all other cases, only "code" or "method" is allowed.
+ if err := m.Write(&pm); err != nil {
+ panic("error checking metric for labels")
+ }
+ for _, label := range pm.Label {
+ name, value := label.GetName(), label.GetValue()
+ if value != magicString || isLabelCurried(c, name) {
+ continue
+ }
+ switch name {
+ case "code":
+ code = true
+ case "method":
+ method = true
+ default:
+ panic("metric partitioned with non-supported labels")
+ }
+ }
+ return
+}
+
+func isLabelCurried(c prometheus.Collector, label string) bool {
+ // This is even hackier than the label test above.
+ // We essentially try to curry again and see if it works.
+ // But for that, we need to type-convert to the two
+ // types we use here, ObserverVec or *CounterVec.
+ switch v := c.(type) {
+ case *prometheus.CounterVec:
+ if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
+ return false
+ }
+ case prometheus.ObserverVec:
+ if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
+ return false
+ }
+ default:
+ panic("unsupported metric vec type")
+ }
+ return true
+}
+
+// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
+// unnecessary allocations on each request.
+var emptyLabels = prometheus.Labels{}
+
+func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
+ if !(code || method) {
+ return emptyLabels
+ }
+ labels := prometheus.Labels{}
+
+ if code {
+ labels["code"] = sanitizeCode(status)
+ }
+ if method {
+ labels["method"] = sanitizeMethod(reqMethod)
+ }
+
+ return labels
+}
+
+func computeApproximateRequestSize(r *http.Request) int {
+ s := 0
+ if r.URL != nil {
+ s += len(r.URL.String())
+ }
+
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ return s
+}
+
+func sanitizeMethod(m string) string {
+ switch m {
+ case "GET", "get":
+ return "get"
+ case "PUT", "put":
+ return "put"
+ case "HEAD", "head":
+ return "head"
+ case "POST", "post":
+ return "post"
+ case "DELETE", "delete":
+ return "delete"
+ case "CONNECT", "connect":
+ return "connect"
+ case "OPTIONS", "options":
+ return "options"
+ case "NOTIFY", "notify":
+ return "notify"
+ default:
+ return strings.ToLower(m)
+ }
+}
+
+// If the wrapped http.Handler has not set a status code, i.e. the value is
+// currently 0, santizeCode will return 200, for consistency with behavior in
+// the stdlib.
+func sanitizeCode(s int) string {
+ switch s {
+ case 100:
+ return "100"
+ case 101:
+ return "101"
+
+ case 200, 0:
+ return "200"
+ case 201:
+ return "201"
+ case 202:
+ return "202"
+ case 203:
+ return "203"
+ case 204:
+ return "204"
+ case 205:
+ return "205"
+ case 206:
+ return "206"
+
+ case 300:
+ return "300"
+ case 301:
+ return "301"
+ case 302:
+ return "302"
+ case 304:
+ return "304"
+ case 305:
+ return "305"
+ case 307:
+ return "307"
+
+ case 400:
+ return "400"
+ case 401:
+ return "401"
+ case 402:
+ return "402"
+ case 403:
+ return "403"
+ case 404:
+ return "404"
+ case 405:
+ return "405"
+ case 406:
+ return "406"
+ case 407:
+ return "407"
+ case 408:
+ return "408"
+ case 409:
+ return "409"
+ case 410:
+ return "410"
+ case 411:
+ return "411"
+ case 412:
+ return "412"
+ case 413:
+ return "413"
+ case 414:
+ return "414"
+ case 415:
+ return "415"
+ case 416:
+ return "416"
+ case 417:
+ return "417"
+ case 418:
+ return "418"
+
+ case 500:
+ return "500"
+ case 501:
+ return "501"
+ case 502:
+ return "502"
+ case 503:
+ return "503"
+ case 504:
+ return "504"
+ case 505:
+ return "505"
+
+ case 428:
+ return "428"
+ case 429:
+ return "429"
+ case 431:
+ return "431"
+ case 511:
+ return "511"
+
+ default:
+ return strconv.Itoa(s)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go
new file mode 100644
index 000000000..e9af63e04
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go
@@ -0,0 +1,375 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "io"
+ "log"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+func TestLabelCheck(t *testing.T) {
+ scenarios := map[string]struct {
+ varLabels []string
+ constLabels []string
+ curriedLabels []string
+ ok bool
+ }{
+ "empty": {
+ varLabels: []string{},
+ constLabels: []string{},
+ curriedLabels: []string{},
+ ok: true,
+ },
+ "code as single var label": {
+ varLabels: []string{"code"},
+ constLabels: []string{},
+ curriedLabels: []string{},
+ ok: true,
+ },
+ "method as single var label": {
+ varLabels: []string{"method"},
+ constLabels: []string{},
+ curriedLabels: []string{},
+ ok: true,
+ },
+ "cade and method as var labels": {
+ varLabels: []string{"method", "code"},
+ constLabels: []string{},
+ curriedLabels: []string{},
+ ok: true,
+ },
+ "valid case with all labels used": {
+ varLabels: []string{"code", "method"},
+ constLabels: []string{"foo", "bar"},
+ curriedLabels: []string{"dings", "bums"},
+ ok: true,
+ },
+ "unsupported var label": {
+ varLabels: []string{"foo"},
+ constLabels: []string{},
+ curriedLabels: []string{},
+ ok: false,
+ },
+ "mixed var labels": {
+ varLabels: []string{"method", "foo", "code"},
+ constLabels: []string{},
+ curriedLabels: []string{},
+ ok: false,
+ },
+ "unsupported var label but curried": {
+ varLabels: []string{},
+ constLabels: []string{},
+ curriedLabels: []string{"foo"},
+ ok: true,
+ },
+ "mixed var labels but unsupported curried": {
+ varLabels: []string{"code", "method"},
+ constLabels: []string{},
+ curriedLabels: []string{"foo"},
+ ok: true,
+ },
+ "supported label as const and curry": {
+ varLabels: []string{},
+ constLabels: []string{"code"},
+ curriedLabels: []string{"method"},
+ ok: true,
+ },
+ "supported label as const and curry with unsupported as var": {
+ varLabels: []string{"foo"},
+ constLabels: []string{"code"},
+ curriedLabels: []string{"method"},
+ ok: false,
+ },
+ }
+
+ for name, sc := range scenarios {
+ t.Run(name, func(t *testing.T) {
+ constLabels := prometheus.Labels{}
+ for _, l := range sc.constLabels {
+ constLabels[l] = "dummy"
+ }
+ c := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "c",
+ Help: "c help",
+ ConstLabels: constLabels,
+ },
+ append(sc.varLabels, sc.curriedLabels...),
+ )
+ o := prometheus.ObserverVec(prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: "c",
+ Help: "c help",
+ ConstLabels: constLabels,
+ },
+ append(sc.varLabels, sc.curriedLabels...),
+ ))
+ for _, l := range sc.curriedLabels {
+ c = c.MustCurryWith(prometheus.Labels{l: "dummy"})
+ o = o.MustCurryWith(prometheus.Labels{l: "dummy"})
+ }
+
+ func() {
+ defer func() {
+ if err := recover(); err != nil {
+ if sc.ok {
+ t.Error("unexpected panic:", err)
+ }
+ } else if !sc.ok {
+ t.Error("expected panic")
+ }
+ }()
+ InstrumentHandlerCounter(c, nil)
+ }()
+ func() {
+ defer func() {
+ if err := recover(); err != nil {
+ if sc.ok {
+ t.Error("unexpected panic:", err)
+ }
+ } else if !sc.ok {
+ t.Error("expected panic")
+ }
+ }()
+ InstrumentHandlerDuration(o, nil)
+ }()
+ if sc.ok {
+ // Test if wantCode and wantMethod were detected correctly.
+ var wantCode, wantMethod bool
+ for _, l := range sc.varLabels {
+ if l == "code" {
+ wantCode = true
+ }
+ if l == "method" {
+ wantMethod = true
+ }
+ }
+ gotCode, gotMethod := checkLabels(c)
+ if gotCode != wantCode {
+ t.Errorf("wanted code=%t for counter, got code=%t", wantCode, gotCode)
+ }
+ if gotMethod != wantMethod {
+ t.Errorf("wanted method=%t for counter, got method=%t", wantMethod, gotMethod)
+ }
+ gotCode, gotMethod = checkLabels(o)
+ if gotCode != wantCode {
+ t.Errorf("wanted code=%t for observer, got code=%t", wantCode, gotCode)
+ }
+ if gotMethod != wantMethod {
+ t.Errorf("wanted method=%t for observer, got method=%t", wantMethod, gotMethod)
+ }
+ }
+ })
+ }
+}
+
+func TestMiddlewareAPI(t *testing.T) {
+ reg := prometheus.NewRegistry()
+
+ inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "in_flight_requests",
+ Help: "A gauge of requests currently being served by the wrapped handler.",
+ })
+
+ counter := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "api_requests_total",
+ Help: "A counter for requests to the wrapped handler.",
+ },
+ []string{"code", "method"},
+ )
+
+ histVec := prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: "response_duration_seconds",
+ Help: "A histogram of request latencies.",
+ Buckets: prometheus.DefBuckets,
+ ConstLabels: prometheus.Labels{"handler": "api"},
+ },
+ []string{"method"},
+ )
+
+ writeHeaderVec := prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: "write_header_duration_seconds",
+ Help: "A histogram of time to first write latencies.",
+ Buckets: prometheus.DefBuckets,
+ ConstLabels: prometheus.Labels{"handler": "api"},
+ },
+ []string{},
+ )
+
+ responseSize := prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: "push_request_size_bytes",
+ Help: "A histogram of request sizes for requests.",
+ Buckets: []float64{200, 500, 900, 1500},
+ },
+ []string{},
+ )
+
+ handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("OK"))
+ })
+
+ reg.MustRegister(inFlightGauge, counter, histVec, responseSize, writeHeaderVec)
+
+ chain := InstrumentHandlerInFlight(inFlightGauge,
+ InstrumentHandlerCounter(counter,
+ InstrumentHandlerDuration(histVec,
+ InstrumentHandlerTimeToWriteHeader(writeHeaderVec,
+ InstrumentHandlerResponseSize(responseSize, handler),
+ ),
+ ),
+ ),
+ )
+
+ r, _ := http.NewRequest("GET", "www.example.com", nil)
+ w := httptest.NewRecorder()
+ chain.ServeHTTP(w, r)
+}
+
+func TestInstrumentTimeToFirstWrite(t *testing.T) {
+ var i int
+ dobs := &responseWriterDelegator{
+ ResponseWriter: httptest.NewRecorder(),
+ observeWriteHeader: func(status int) {
+ i = status
+ },
+ }
+ d := newDelegator(dobs, nil)
+
+ d.WriteHeader(http.StatusOK)
+
+ if i != http.StatusOK {
+ t.Fatalf("failed to execute observeWriteHeader")
+ }
+}
+
+// testResponseWriter is an http.ResponseWriter that also implements
+// http.CloseNotifier, http.Flusher, and io.ReaderFrom.
+type testResponseWriter struct {
+ closeNotifyCalled, flushCalled, readFromCalled bool
+}
+
+func (t *testResponseWriter) Header() http.Header { return nil }
+func (t *testResponseWriter) Write([]byte) (int, error) { return 0, nil }
+func (t *testResponseWriter) WriteHeader(int) {}
+func (t *testResponseWriter) CloseNotify() <-chan bool {
+ t.closeNotifyCalled = true
+ return nil
+}
+func (t *testResponseWriter) Flush() { t.flushCalled = true }
+func (t *testResponseWriter) ReadFrom(io.Reader) (int64, error) {
+ t.readFromCalled = true
+ return 0, nil
+}
+
+func TestInterfaceUpgrade(t *testing.T) {
+ w := &testResponseWriter{}
+ d := newDelegator(w, nil)
+ d.(http.CloseNotifier).CloseNotify()
+ if !w.closeNotifyCalled {
+ t.Error("CloseNotify not called")
+ }
+ d.(http.Flusher).Flush()
+ if !w.flushCalled {
+ t.Error("Flush not called")
+ }
+ d.(io.ReaderFrom).ReadFrom(nil)
+ if !w.readFromCalled {
+ t.Error("ReadFrom not called")
+ }
+ if _, ok := d.(http.Hijacker); ok {
+ t.Error("delegator unexpectedly implements http.Hijacker")
+ }
+}
+
+func ExampleInstrumentHandlerDuration() {
+ inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "in_flight_requests",
+ Help: "A gauge of requests currently being served by the wrapped handler.",
+ })
+
+ counter := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "api_requests_total",
+ Help: "A counter for requests to the wrapped handler.",
+ },
+ []string{"code", "method"},
+ )
+
+ // duration is partitioned by the HTTP method and handler. It uses custom
+ // buckets based on the expected request duration.
+ duration := prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: "request_duration_seconds",
+ Help: "A histogram of latencies for requests.",
+ Buckets: []float64{.25, .5, 1, 2.5, 5, 10},
+ },
+ []string{"handler", "method"},
+ )
+
+ // responseSize has no labels, making it a zero-dimensional
+ // ObserverVec.
+ responseSize := prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: "response_size_bytes",
+ Help: "A histogram of response sizes for requests.",
+ Buckets: []float64{200, 500, 900, 1500},
+ },
+ []string{},
+ )
+
+ // Create the handlers that will be wrapped by the middleware.
+ pushHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("Push"))
+ })
+ pullHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("Pull"))
+ })
+
+ // Register all of the metrics in the standard registry.
+ prometheus.MustRegister(inFlightGauge, counter, duration, responseSize)
+
+ // Instrument the handlers with all the metrics, injecting the "handler"
+ // label by currying.
+ pushChain := InstrumentHandlerInFlight(inFlightGauge,
+ InstrumentHandlerDuration(duration.MustCurryWith(prometheus.Labels{"handler": "push"}),
+ InstrumentHandlerCounter(counter,
+ InstrumentHandlerResponseSize(responseSize, pushHandler),
+ ),
+ ),
+ )
+ pullChain := InstrumentHandlerInFlight(inFlightGauge,
+ InstrumentHandlerDuration(duration.MustCurryWith(prometheus.Labels{"handler": "pull"}),
+ InstrumentHandlerCounter(counter,
+ InstrumentHandlerResponseSize(responseSize, pullHandler),
+ ),
+ ),
+ )
+
+ http.Handle("/metrics", Handler())
+ http.Handle("/push", pushChain)
+ http.Handle("/pull", pullChain)
+
+ if err := http.ListenAndServe(":3000", nil); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push/example_add_from_gatherer_test.go b/vendor/github.com/prometheus/client_golang/prometheus/push/example_add_from_gatherer_test.go
new file mode 100644
index 000000000..5180c0745
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/push/example_add_from_gatherer_test.go
@@ -0,0 +1,84 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package push_test
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/push"
+)
+
+var (
+ completionTime = prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "db_backup_last_completion_timestamp_seconds",
+ Help: "The timestamp of the last completion of a DB backup, successful or not.",
+ })
+ successTime = prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "db_backup_last_success_timestamp_seconds",
+ Help: "The timestamp of the last successful completion of a DB backup.",
+ })
+ duration = prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "db_backup_duration_seconds",
+ Help: "The duration of the last DB backup in seconds.",
+ })
+ records = prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "db_backup_records_processed",
+ Help: "The number of records processed in the last DB backup.",
+ })
+)
+
+func performBackup() (int, error) {
+ // Perform the backup and return the number of backed up records and any
+ // applicable error.
+ // ...
+ return 42, nil
+}
+
+func ExampleAddFromGatherer() {
+ registry := prometheus.NewRegistry()
+ registry.MustRegister(completionTime, duration, records)
+ // Note that successTime is not registered at this time.
+
+ start := time.Now()
+ n, err := performBackup()
+ records.Set(float64(n))
+ // Note that time.Since only uses a monotonic clock in Go1.9+.
+ duration.Set(time.Since(start).Seconds())
+ completionTime.SetToCurrentTime()
+ if err != nil {
+ fmt.Println("DB backup failed:", err)
+ } else {
+ // Only now register successTime.
+ registry.MustRegister(successTime)
+ successTime.SetToCurrentTime()
+ }
+ // AddFromGatherer is used here rather than FromGatherer to not delete a
+ // previously pushed success timestamp in case of a failure of this
+ // backup.
+ if err := push.AddFromGatherer(
+ "db_backup", nil,
+ "http://pushgateway:9091",
+ registry,
+ ); err != nil {
+ fmt.Println("Could not push to Pushgateway:", err)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push/examples_test.go b/vendor/github.com/prometheus/client_golang/prometheus/push/examples_test.go
index 7f17ca291..7e0ac66a5 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/push/examples_test.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/push/examples_test.go
@@ -15,7 +15,6 @@ package push_test
import (
"fmt"
- "time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/push"
@@ -24,9 +23,9 @@ import (
func ExampleCollectors() {
completionTime := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "db_backup_last_completion_timestamp_seconds",
- Help: "The timestamp of the last succesful completion of a DB backup.",
+ Help: "The timestamp of the last successful completion of a DB backup.",
})
- completionTime.Set(float64(time.Now().Unix()))
+ completionTime.SetToCurrentTime()
if err := push.Collectors(
"db_backup", push.HostnameGroupingKey(),
"http://pushgateway:9091",
@@ -35,22 +34,3 @@ func ExampleCollectors() {
fmt.Println("Could not push completion time to Pushgateway:", err)
}
}
-
-func ExampleRegistry() {
- registry := prometheus.NewRegistry()
-
- completionTime := prometheus.NewGauge(prometheus.GaugeOpts{
- Name: "db_backup_last_completion_timestamp_seconds",
- Help: "The timestamp of the last succesful completion of a DB backup.",
- })
- registry.MustRegister(completionTime)
-
- completionTime.Set(float64(time.Now().Unix()))
- if err := push.FromGatherer(
- "db_backup", push.HostnameGroupingKey(),
- "http://pushgateway:9091",
- registry,
- ); err != nil {
- fmt.Println("Could not push completion time to Pushgateway:", err)
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push/push.go b/vendor/github.com/prometheus/client_golang/prometheus/push/push.go
index ae40402f8..8fb6f5f17 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/push/push.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/push/push.go
@@ -84,7 +84,7 @@ func push(job string, grouping map[string]string, pushURL string, g prometheus.G
}
urlComponents := []string{url.QueryEscape(job)}
for ln, lv := range grouping {
- if !model.LabelNameRE.MatchString(ln) {
+ if !model.LabelName(ln).IsValid() {
return fmt.Errorf("grouping label has invalid name: %s", ln)
}
if strings.Contains(lv, "/") {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
index 32a3986b0..c84a4420e 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -20,6 +20,7 @@ import (
"os"
"sort"
"sync"
+ "unicode/utf8"
"github.com/golang/protobuf/proto"
@@ -80,7 +81,7 @@ func NewPedanticRegistry() *Registry {
// Registerer is the interface for the part of a registry in charge of
// registering and unregistering. Users of custom registries should use
-// Registerer as type for registration purposes (rather then the Registry type
+// Registerer as type for registration purposes (rather than the Registry type
// directly). In that way, they are free to use custom Registerer implementation
// (e.g. for testing purposes).
type Registerer interface {
@@ -152,38 +153,6 @@ func MustRegister(cs ...Collector) {
DefaultRegisterer.MustRegister(cs...)
}
-// RegisterOrGet registers the provided Collector with the DefaultRegisterer and
-// returns the Collector, unless an equal Collector was registered before, in
-// which case that Collector is returned.
-//
-// Deprecated: RegisterOrGet is merely a convenience function for the
-// implementation as described in the documentation for
-// AlreadyRegisteredError. As the use case is relatively rare, this function
-// will be removed in a future version of this package to clean up the
-// namespace.
-func RegisterOrGet(c Collector) (Collector, error) {
- if err := Register(c); err != nil {
- if are, ok := err.(AlreadyRegisteredError); ok {
- return are.ExistingCollector, nil
- }
- return nil, err
- }
- return c, nil
-}
-
-// MustRegisterOrGet behaves like RegisterOrGet but panics instead of returning
-// an error.
-//
-// Deprecated: This is deprecated for the same reason RegisterOrGet is. See
-// there for details.
-func MustRegisterOrGet(c Collector) Collector {
- c, err := RegisterOrGet(c)
- if err != nil {
- panic(err)
- }
- return c
-}
-
// Unregister removes the registration of the provided Collector from the
// DefaultRegisterer.
//
@@ -201,25 +170,6 @@ func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
return gf()
}
-// SetMetricFamilyInjectionHook replaces the DefaultGatherer with one that
-// gathers from the previous DefaultGatherers but then merges the MetricFamily
-// protobufs returned from the provided hook function with the MetricFamily
-// protobufs returned from the original DefaultGatherer.
-//
-// Deprecated: This function manipulates the DefaultGatherer variable. Consider
-// the implications, i.e. don't do this concurrently with any uses of the
-// DefaultGatherer. In the rare cases where you need to inject MetricFamily
-// protobufs directly, it is recommended to use a custom Registry and combine it
-// with a custom Gatherer using the Gatherers type (see
-// there). SetMetricFamilyInjectionHook only exists for compatibility reasons
-// with previous versions of this package.
-func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
- DefaultGatherer = Gatherers{
- DefaultGatherer,
- GathererFunc(func() ([]*dto.MetricFamily, error) { return hook(), nil }),
- }
-}
-
// AlreadyRegisteredError is returned by the Register method if the Collector to
// be registered has already been registered before, or a different Collector
// that collects the same metrics has been registered before. Registration fails
@@ -294,7 +244,7 @@ func (r *Registry) Register(c Collector) error {
}()
r.mtx.Lock()
defer r.mtx.Unlock()
- // Coduct various tests...
+ // Conduct various tests...
for desc := range descChan {
// Is the descriptor valid at all?
@@ -447,7 +397,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
// Drain metricChan in case of premature return.
defer func() {
- for _ = range metricChan {
+ for range metricChan {
}
}()
@@ -683,7 +633,7 @@ func (s metricSorter) Less(i, j int) bool {
return s[i].GetTimestampMs() < s[j].GetTimestampMs()
}
-// normalizeMetricFamilies returns a MetricFamily slice whith empty
+// normalizeMetricFamilies returns a MetricFamily slice with empty
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
// the slice, with the contained Metrics sorted within each MetricFamily.
func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
@@ -706,7 +656,7 @@ func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily)
// checkMetricConsistency checks if the provided Metric is consistent with the
// provided MetricFamily. It also hashed the Metric labels and the MetricFamily
-// name. If the resulting hash is alread in the provided metricHashes, an error
+// name. If the resulting hash is already in the provided metricHashes, an error
// is returned. If not, it is added to metricHashes. The provided dimHashes maps
// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes
// doesn't yet contain a hash for the provided MetricFamily, it is
@@ -730,6 +680,12 @@ func checkMetricConsistency(
)
}
+ for _, labelPair := range dtoMetric.GetLabel() {
+ if !utf8.ValidString(*labelPair.Value) {
+ return fmt.Errorf("collected metric's label %s is not utf8: %#v", *labelPair.Name, *labelPair.Value)
+ }
+ }
+
// Is the metric unique (i.e. no other metric with the same name and the same label values)?
h := hashNew()
h = hashAdd(h, metricFamily.GetName())
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go b/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go
index 9dacb6256..d136bba1e 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go
@@ -209,6 +209,34 @@ metric: <
expectedMetricFamilyMergedWithExternalAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"different_val" > counter:<value:42 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val1" > counter:<value:1 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val2" > counter:<value:1 > >
`)
+ externalMetricFamilyWithInvalidLabelValue := &dto.MetricFamily{
+ Name: proto.String("name"),
+ Help: proto.String("docstring"),
+ Type: dto.MetricType_COUNTER.Enum(),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{
+ {
+ Name: proto.String("constname"),
+ Value: proto.String("\xFF"),
+ },
+ {
+ Name: proto.String("labelname"),
+ Value: proto.String("different_val"),
+ },
+ },
+ Counter: &dto.Counter{
+ Value: proto.Float64(42),
+ },
+ },
+ },
+ }
+
+ expectedMetricFamilyInvalidLabelValueAsText := []byte(`An error has occurred during metrics gathering:
+
+collected metric's label constname is not utf8: "\xff"
+`)
+
type output struct {
headers map[string]string
body []byte
@@ -452,6 +480,22 @@ metric: <
externalMetricFamilyWithSameName,
},
},
+ { // 16
+ headers: map[string]string{
+ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `text/plain; charset=utf-8`,
+ },
+ body: expectedMetricFamilyInvalidLabelValueAsText,
+ },
+ collector: metricVec,
+ externalMF: []*dto.MetricFamily{
+ externalMetricFamily,
+ externalMetricFamilyWithInvalidLabelValue,
+ },
+ },
}
for i, scenario := range scenarios {
registry := prometheus.NewPedanticRegistry()
@@ -526,20 +570,21 @@ func TestRegisterWithOrGet(t *testing.T) {
},
[]string{"foo", "bar"},
)
- if err := prometheus.Register(original); err != nil {
+ var err error
+ if err = prometheus.Register(original); err != nil {
t.Fatal(err)
}
- if err := prometheus.Register(equalButNotSame); err == nil {
+ if err = prometheus.Register(equalButNotSame); err == nil {
t.Fatal("expected error when registringe equal collector")
}
- existing, err := prometheus.RegisterOrGet(equalButNotSame)
- if err != nil {
- t.Fatal(err)
- }
- if existing != original {
- t.Error("expected original collector but got something else")
- }
- if existing == equalButNotSame {
- t.Error("expected original callector but got new one")
+ if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ if are.ExistingCollector != original {
+ t.Error("expected original collector but got something else")
+ }
+ if are.ExistingCollector == equalButNotSame {
+ t.Error("expected original callector but got new one")
+ }
+ } else {
+ t.Error("unexpected error:", err)
}
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
index bce05bf9a..f7dc85b96 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -36,7 +36,10 @@ const quantileLabel = "quantile"
//
// A typical use-case is the observation of request latencies. By default, a
// Summary provides the median, the 90th and the 99th percentile of the latency
-// as rank estimations.
+// as rank estimations. However, the default behavior will change in the
+// upcoming v0.10 of the library. There will be no rank estiamtions at all by
+// default. For a sane transition, it is recommended to set the desired rank
+// estimations explicitly.
//
// Note that the rank estimations cannot be aggregated in a meaningful way with
// the Prometheus query language (i.e. you cannot average or add them). If you
@@ -54,6 +57,9 @@ type Summary interface {
}
// DefObjectives are the default Summary quantile values.
+//
+// Deprecated: DefObjectives will not be used as the default objectives in
+// v0.10 of the library. The default Summary will have no quantiles then.
var (
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
@@ -75,8 +81,10 @@ const (
)
// SummaryOpts bundles the options for creating a Summary metric. It is
-// mandatory to set Name and Help to a non-empty string. All other fields are
-// optional and can safely be left at their zero value.
+// mandatory to set Name and Help to a non-empty string. While all other fields
+// are optional and can safely be left at their zero value, it is recommended to
+// explicitly set the Objectives field to the desired value as the default value
+// will change in the upcoming v0.10 of the library.
type SummaryOpts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Summary (created by joining these components with
@@ -93,29 +101,28 @@ type SummaryOpts struct {
// string.
Help string
- // ConstLabels are used to attach fixed labels to this
- // Summary. Summaries with the same fully-qualified name must have the
- // same label names in their ConstLabels.
- //
- // Note that in most cases, labels have a value that varies during the
- // lifetime of a process. Those labels are usually managed with a
- // SummaryVec. ConstLabels serve only special purposes. One is for the
- // special case where the value of a label does not change during the
- // lifetime of a process, e.g. if the revision of the running binary is
- // put into a label. Another, more advanced purpose is if more than one
- // Collector needs to collect Summaries with the same fully-qualified
- // name. In that case, those Summaries must differ in the values of
- // their ConstLabels. See the Collector examples.
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
//
- // If the value of a label never changes (not even between binaries),
- // that label most likely should not be a label at all (but part of the
- // metric name).
+ // ConstLabels are only used rarely. In particular, do not use them to
+ // attach the same labels to all your metrics. Those use cases are
+ // better covered by target labels set by the scraping Prometheus
+ // server, or by one specific metric (e.g. a build_info or a
+ // machine_role metric). See also
+ // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
ConstLabels Labels
// Objectives defines the quantile rank estimates with their respective
- // absolute error. If Objectives[q] = e, then the value reported
- // for q will be the φ-quantile value for some φ between q-e and q+e.
- // The default value is DefObjectives.
+ // absolute error. If Objectives[q] = e, then the value reported for q
+ // will be the φ-quantile value for some φ between q-e and q+e. The
+ // default value is DefObjectives. It is used if Objectives is left at
+ // its zero value (i.e. nil). To create a Summary without Objectives,
+ // set it to an empty map (i.e. map[float64]float64{}).
+ //
+ // Deprecated: Note that the current value of DefObjectives is
+ // deprecated. It will be replaced by an empty map in v0.10 of the
+ // library. Please explicitly set Objectives to the desired value.
Objectives map[float64]float64
// MaxAge defines the duration for which an observation stays relevant
@@ -183,7 +190,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
}
}
- if len(opts.Objectives) == 0 {
+ if opts.Objectives == nil {
opts.Objectives = DefObjectives
}
@@ -390,12 +397,11 @@ func (s quantSort) Less(i, j int) bool {
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewSummaryVec.
type SummaryVec struct {
- *MetricVec
+ *metricVec
}
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
+// partitioned by the given label names.
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@@ -404,47 +410,116 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
opts.ConstLabels,
)
return &SummaryVec{
- MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ metricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newSummary(desc, opts, lvs...)
}),
}
}
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns a Summary and not a
-// Metric so that no type conversion is required.
-func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) {
- metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+// GetMetricWithLabelValues returns the Summary for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Summary is created.
+//
+// It is possible to call this method without using the returned Summary to only
+// create the new Summary but leave it at its starting value, a Summary without
+// any observations.
+//
+// Keeping the Summary for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Summary from the SummaryVec. In that case,
+// the Summary will still exist, but it will not be exported anymore, even if a
+// Summary with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
+ metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil {
- return metric.(Summary), err
+ return metric.(Observer), err
}
return nil, err
}
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns a Summary and not a Metric so that no
-// type conversion is required.
-func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
- metric, err := m.MetricVec.GetMetricWith(labels)
+// GetMetricWith returns the Summary for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Summary is created. Implications of
+// creating a Summary without using it and keeping the Summary for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
+ metric, err := v.metricVec.getMetricWith(labels)
if metric != nil {
- return metric.(Summary), err
+ return metric.(Observer), err
}
return nil, err
}
// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. By not returning an
-// error, WithLabelValues allows shortcuts like
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
// myVec.WithLabelValues("404", "GET").Observe(42.21)
-func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
- return m.MetricVec.WithLabelValues(lvs...).(Summary)
+func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
+ s, err := v.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return s
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. By not returning an error, With allows shortcuts like
-// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
-func (m *SummaryVec) With(labels Labels) Summary {
- return m.MetricVec.With(labels).(Summary)
+// returned an error. Not returning an error allows shortcuts like
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (v *SummaryVec) With(labels Labels) Observer {
+ s, err := v.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return s
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the SummaryVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
+ vec, err := v.curryWith(labels)
+ if vec != nil {
+ return &SummaryVec{vec}, err
+ }
+ return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec {
+ vec, err := v.CurryWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return vec
}
type constSummary struct {
@@ -505,8 +580,8 @@ func NewConstSummary(
quantiles map[float64]float64,
labelValues ...string,
) (Metric, error) {
- if len(desc.variableLabels) != len(labelValues) {
- return nil, errInconsistentCardinality
+ if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ return nil, err
}
return &constSummary{
desc: desc,
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary_test.go b/vendor/github.com/prometheus/client_golang/prometheus/summary_test.go
index c4575ffbd..b162ed946 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/summary_test.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary_test.go
@@ -25,6 +25,45 @@ import (
dto "github.com/prometheus/client_model/go"
)
+func TestSummaryWithDefaultObjectives(t *testing.T) {
+ reg := NewRegistry()
+ summaryWithDefaultObjectives := NewSummary(SummaryOpts{
+ Name: "default_objectives",
+ Help: "Test help.",
+ })
+ if err := reg.Register(summaryWithDefaultObjectives); err != nil {
+ t.Error(err)
+ }
+
+ m := &dto.Metric{}
+ if err := summaryWithDefaultObjectives.Write(m); err != nil {
+ t.Error(err)
+ }
+ if len(m.GetSummary().Quantile) != len(DefObjectives) {
+ t.Error("expected default objectives in summary")
+ }
+}
+
+func TestSummaryWithoutObjectives(t *testing.T) {
+ reg := NewRegistry()
+ summaryWithEmptyObjectives := NewSummary(SummaryOpts{
+ Name: "empty_objectives",
+ Help: "Test help.",
+ Objectives: map[float64]float64{},
+ })
+ if err := reg.Register(summaryWithEmptyObjectives); err != nil {
+ t.Error(err)
+ }
+
+ m := &dto.Metric{}
+ if err := summaryWithEmptyObjectives.Write(m); err != nil {
+ t.Error(err)
+ }
+ if len(m.GetSummary().Quantile) != 0 {
+ t.Error("expected no objectives in summary")
+ }
+}
+
func benchmarkSummaryObserve(w int, b *testing.B) {
b.StopTimer()
@@ -136,8 +175,9 @@ func TestSummaryConcurrency(t *testing.T) {
end.Add(concLevel)
sum := NewSummary(SummaryOpts{
- Name: "test_summary",
- Help: "helpless",
+ Name: "test_summary",
+ Help: "helpless",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
})
allVars := make([]float64, total)
@@ -223,8 +263,9 @@ func TestSummaryVecConcurrency(t *testing.T) {
sum := NewSummaryVec(
SummaryOpts{
- Name: "test_summary",
- Help: "helpless",
+ Name: "test_summary",
+ Help: "helpless",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
[]string{"label"},
)
@@ -260,7 +301,7 @@ func TestSummaryVecConcurrency(t *testing.T) {
for i := 0; i < vecLength; i++ {
m := &dto.Metric{}
s := sum.WithLabelValues(string('A' + i))
- s.Write(m)
+ s.(Summary).Write(m)
if got, want := int(*m.Summary.SampleCount), len(allVars[i]); got != want {
t.Errorf("got sample count %d for label %c, want %d", got, 'A'+i, want)
}
@@ -305,7 +346,7 @@ func TestSummaryDecay(t *testing.T) {
m := &dto.Metric{}
i := 0
tick := time.NewTicker(time.Millisecond)
- for _ = range tick.C {
+ for range tick.C {
i++
sum.Observe(float64(i))
if i%10 == 0 {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
new file mode 100644
index 000000000..b8fc5f18c
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
@@ -0,0 +1,51 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "time"
+
+// Timer is a helper type to time functions. Use NewTimer to create new
+// instances.
+type Timer struct {
+ begin time.Time
+ observer Observer
+}
+
+// NewTimer creates a new Timer. The provided Observer is used to observe a
+// duration in seconds. Timer is usually used to time a function call in the
+// following way:
+// func TimeMe() {
+// timer := NewTimer(myHistogram)
+// defer timer.ObserveDuration()
+// // Do actual work.
+// }
+func NewTimer(o Observer) *Timer {
+ return &Timer{
+ begin: time.Now(),
+ observer: o,
+ }
+}
+
+// ObserveDuration records the duration passed since the Timer was created with
+// NewTimer. It calls the Observe method of the Observer provided during
+// construction with the duration in seconds as an argument. ObserveDuration is
+// usually called with a defer statement.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func (t *Timer) ObserveDuration() {
+ if t.observer != nil {
+ t.observer.Observe(time.Since(t.begin).Seconds())
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer_test.go b/vendor/github.com/prometheus/client_golang/prometheus/timer_test.go
new file mode 100644
index 000000000..294902068
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/timer_test.go
@@ -0,0 +1,152 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "testing"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+func TestTimerObserve(t *testing.T) {
+ var (
+ his = NewHistogram(HistogramOpts{Name: "test_histogram"})
+ sum = NewSummary(SummaryOpts{Name: "test_summary"})
+ gauge = NewGauge(GaugeOpts{Name: "test_gauge"})
+ )
+
+ func() {
+ hisTimer := NewTimer(his)
+ sumTimer := NewTimer(sum)
+ gaugeTimer := NewTimer(ObserverFunc(gauge.Set))
+ defer hisTimer.ObserveDuration()
+ defer sumTimer.ObserveDuration()
+ defer gaugeTimer.ObserveDuration()
+ }()
+
+ m := &dto.Metric{}
+ his.Write(m)
+ if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
+ t.Errorf("want %d observations for histogram, got %d", want, got)
+ }
+ m.Reset()
+ sum.Write(m)
+ if want, got := uint64(1), m.GetSummary().GetSampleCount(); want != got {
+ t.Errorf("want %d observations for summary, got %d", want, got)
+ }
+ m.Reset()
+ gauge.Write(m)
+ if got := m.GetGauge().GetValue(); got <= 0 {
+ t.Errorf("want value > 0 for gauge, got %f", got)
+ }
+}
+
+func TestTimerEmpty(t *testing.T) {
+ emptyTimer := NewTimer(nil)
+ emptyTimer.ObserveDuration()
+ // Do nothing, just demonstrate it works without panic.
+}
+
+func TestTimerConditionalTiming(t *testing.T) {
+ var (
+ his = NewHistogram(HistogramOpts{
+ Name: "test_histogram",
+ })
+ timeMe = true
+ m = &dto.Metric{}
+ )
+
+ timedFunc := func() {
+ timer := NewTimer(ObserverFunc(func(v float64) {
+ if timeMe {
+ his.Observe(v)
+ }
+ }))
+ defer timer.ObserveDuration()
+ }
+
+ timedFunc() // This will time.
+ his.Write(m)
+ if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
+ t.Errorf("want %d observations for histogram, got %d", want, got)
+ }
+
+ timeMe = false
+ timedFunc() // This will not time again.
+ m.Reset()
+ his.Write(m)
+ if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
+ t.Errorf("want %d observations for histogram, got %d", want, got)
+ }
+}
+
+func TestTimerByOutcome(t *testing.T) {
+ var (
+ his = NewHistogramVec(
+ HistogramOpts{Name: "test_histogram"},
+ []string{"outcome"},
+ )
+ outcome = "foo"
+ m = &dto.Metric{}
+ )
+
+ timedFunc := func() {
+ timer := NewTimer(ObserverFunc(func(v float64) {
+ his.WithLabelValues(outcome).Observe(v)
+ }))
+ defer timer.ObserveDuration()
+
+ if outcome == "foo" {
+ outcome = "bar"
+ return
+ }
+ outcome = "foo"
+ }
+
+ timedFunc()
+ his.WithLabelValues("foo").(Histogram).Write(m)
+ if want, got := uint64(0), m.GetHistogram().GetSampleCount(); want != got {
+ t.Errorf("want %d observations for 'foo' histogram, got %d", want, got)
+ }
+ m.Reset()
+ his.WithLabelValues("bar").(Histogram).Write(m)
+ if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
+ t.Errorf("want %d observations for 'bar' histogram, got %d", want, got)
+ }
+
+ timedFunc()
+ m.Reset()
+ his.WithLabelValues("foo").(Histogram).Write(m)
+ if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
+ t.Errorf("want %d observations for 'foo' histogram, got %d", want, got)
+ }
+ m.Reset()
+ his.WithLabelValues("bar").(Histogram).Write(m)
+ if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
+ t.Errorf("want %d observations for 'bar' histogram, got %d", want, got)
+ }
+
+ timedFunc()
+ m.Reset()
+ his.WithLabelValues("foo").(Histogram).Write(m)
+ if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
+ t.Errorf("want %d observations for 'foo' histogram, got %d", want, got)
+ }
+ m.Reset()
+ his.WithLabelValues("bar").(Histogram).Write(m)
+ if want, got := uint64(2), m.GetHistogram().GetSampleCount(); want != got {
+ t.Errorf("want %d observations for 'bar' histogram, got %d", want, got)
+ }
+
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
index 5faf7e6e3..0f9ce63f4 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
@@ -13,108 +13,12 @@
package prometheus
-// Untyped is a Metric that represents a single numerical value that can
-// arbitrarily go up and down.
-//
-// An Untyped metric works the same as a Gauge. The only difference is that to
-// no type information is implied.
-//
-// To create Untyped instances, use NewUntyped.
-type Untyped interface {
- Metric
- Collector
-
- // Set sets the Untyped metric to an arbitrary value.
- Set(float64)
- // Inc increments the Untyped metric by 1.
- Inc()
- // Dec decrements the Untyped metric by 1.
- Dec()
- // Add adds the given value to the Untyped metric. (The value can be
- // negative, resulting in a decrease.)
- Add(float64)
- // Sub subtracts the given value from the Untyped metric. (The value can
- // be negative, resulting in an increase.)
- Sub(float64)
-}
-
// UntypedOpts is an alias for Opts. See there for doc comments.
type UntypedOpts Opts
-// NewUntyped creates a new Untyped metric from the provided UntypedOpts.
-func NewUntyped(opts UntypedOpts) Untyped {
- return newValue(NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ), UntypedValue, 0)
-}
-
-// UntypedVec is a Collector that bundles a set of Untyped metrics that all
-// share the same Desc, but have different values for their variable
-// labels. This is used if you want to count the same thing partitioned by
-// various dimensions. Create instances with NewUntypedVec.
-type UntypedVec struct {
- *MetricVec
-}
-
-// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
-func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
- desc := NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- labelNames,
- opts.ConstLabels,
- )
- return &UntypedVec{
- MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
- return newValue(desc, UntypedValue, 0, lvs...)
- }),
- }
-}
-
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns an Untyped and not a
-// Metric so that no type conversion is required.
-func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) {
- metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
- if metric != nil {
- return metric.(Untyped), err
- }
- return nil, err
-}
-
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns an Untyped and not a Metric so that no
-// type conversion is required.
-func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) {
- metric, err := m.MetricVec.GetMetricWith(labels)
- if metric != nil {
- return metric.(Untyped), err
- }
- return nil, err
-}
-
-// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. By not returning an
-// error, WithLabelValues allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Add(42)
-func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped {
- return m.MetricVec.WithLabelValues(lvs...).(Untyped)
-}
-
-// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. By not returning an error, With allows shortcuts like
-// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
-func (m *UntypedVec) With(labels Labels) Untyped {
- return m.MetricVec.With(labels).(Untyped)
-}
-
-// UntypedFunc is an Untyped whose value is determined at collect time by
-// calling a provided function.
+// UntypedFunc works like GaugeFunc but the collected metric is of type
+// "Untyped". UntypedFunc is useful to mirror an external metric of unknown
+// type.
//
// To create UntypedFunc instances, use NewUntypedFunc.
type UntypedFunc interface {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
index a944c3775..543b57c27 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/value.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -14,11 +14,8 @@
package prometheus
import (
- "errors"
"fmt"
- "math"
"sort"
- "sync/atomic"
dto "github.com/prometheus/client_model/go"
@@ -36,77 +33,6 @@ const (
UntypedValue
)
-var errInconsistentCardinality = errors.New("inconsistent label cardinality")
-
-// value is a generic metric for simple values. It implements Metric, Collector,
-// Counter, Gauge, and Untyped. Its effective type is determined by
-// ValueType. This is a low-level building block used by the library to back the
-// implementations of Counter, Gauge, and Untyped.
-type value struct {
- // valBits containst the bits of the represented float64 value. It has
- // to go first in the struct to guarantee alignment for atomic
- // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
- valBits uint64
-
- selfCollector
-
- desc *Desc
- valType ValueType
- labelPairs []*dto.LabelPair
-}
-
-// newValue returns a newly allocated value with the given Desc, ValueType,
-// sample value and label values. It panics if the number of label
-// values is different from the number of variable labels in Desc.
-func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {
- if len(labelValues) != len(desc.variableLabels) {
- panic(errInconsistentCardinality)
- }
- result := &value{
- desc: desc,
- valType: valueType,
- valBits: math.Float64bits(val),
- labelPairs: makeLabelPairs(desc, labelValues),
- }
- result.init(result)
- return result
-}
-
-func (v *value) Desc() *Desc {
- return v.desc
-}
-
-func (v *value) Set(val float64) {
- atomic.StoreUint64(&v.valBits, math.Float64bits(val))
-}
-
-func (v *value) Inc() {
- v.Add(1)
-}
-
-func (v *value) Dec() {
- v.Add(-1)
-}
-
-func (v *value) Add(val float64) {
- for {
- oldBits := atomic.LoadUint64(&v.valBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
- if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
- return
- }
- }
-}
-
-func (v *value) Sub(val float64) {
- v.Add(val * -1)
-}
-
-func (v *value) Write(out *dto.Metric) error {
- val := math.Float64frombits(atomic.LoadUint64(&v.valBits))
- return populateMetric(v.valType, val, v.labelPairs, out)
-}
-
// valueFunc is a generic metric for simple values retrieved on collect time
// from a function. It implements Metric and Collector. Its effective type is
// determined by ValueType. This is a low-level building block used by the
@@ -153,8 +79,8 @@ func (v *valueFunc) Write(out *dto.Metric) error {
// the Collect method. NewConstMetric returns an error if the length of
// labelValues is not consistent with the variable labels in Desc.
func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
- if len(desc.variableLabels) != len(labelValues) {
- return nil, errInconsistentCardinality
+ if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ return nil, err
}
return &constMetric{
desc: desc,
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value_test.go b/vendor/github.com/prometheus/client_golang/prometheus/value_test.go
new file mode 100644
index 000000000..eed517e7b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value_test.go
@@ -0,0 +1,43 @@
+package prometheus
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestNewConstMetricInvalidLabelValues(t *testing.T) {
+ testCases := []struct {
+ desc string
+ labels Labels
+ }{
+ {
+ desc: "non utf8 label value",
+ labels: Labels{"a": "\xFF"},
+ },
+ {
+ desc: "not enough label values",
+ labels: Labels{},
+ },
+ {
+ desc: "too many label values",
+ labels: Labels{"a": "1", "b": "2"},
+ },
+ }
+
+ for _, test := range testCases {
+ metricDesc := NewDesc(
+ "sample_value",
+ "sample value",
+ []string{"a"},
+ Labels{},
+ )
+
+ expectPanic(t, func() {
+ MustNewConstMetric(metricDesc, CounterValue, 0.3, "\xFF")
+ }, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc))
+
+ if _, err := NewConstMetric(metricDesc, CounterValue, 0.3, "\xFF"); err == nil {
+ t.Errorf("NewConstMetric: expected error because: %s", test.desc)
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
index 7f3eef9a4..cea158249 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -20,200 +20,253 @@ import (
"github.com/prometheus/common/model"
)
-// MetricVec is a Collector to bundle metrics of the same name that
-// differ in their label values. MetricVec is usually not used directly but as a
-// building block for implementations of vectors of a given metric
-// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
-// provided in this package.
-type MetricVec struct {
- mtx sync.RWMutex // Protects the children.
- children map[uint64][]metricWithLabelValues
- desc *Desc
-
- newMetric func(labelValues ...string) Metric
- hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
+// metricVec is a Collector to bundle metrics of the same name that differ in
+// their label values. metricVec is not used directly (and therefore
+// unexported). It is used as a building block for implementations of vectors of
+// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec.
+// It also handles label currying. It uses basicMetricVec internally.
+type metricVec struct {
+ *metricMap
+
+ curry []curriedLabelValue
+
+ // hashAdd and hashAddByte can be replaced for testing collision handling.
+ hashAdd func(h uint64, s string) uint64
hashAddByte func(h uint64, b byte) uint64
}
-// newMetricVec returns an initialized MetricVec. The concrete value is
-// returned for embedding into another struct.
-func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
- return &MetricVec{
- children: map[uint64][]metricWithLabelValues{},
- desc: desc,
- newMetric: newMetric,
+// newMetricVec returns an initialized metricVec.
+func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec {
+ return &metricVec{
+ metricMap: &metricMap{
+ metrics: map[uint64][]metricWithLabelValues{},
+ desc: desc,
+ newMetric: newMetric,
+ },
hashAdd: hashAdd,
hashAddByte: hashAddByte,
}
}
-// metricWithLabelValues provides the metric and its label values for
-// disambiguation on hash collision.
-type metricWithLabelValues struct {
- values []string
- metric Metric
-}
+// DeleteLabelValues removes the metric where the variable labels are the same
+// as those passed in as labels (same order as the VariableLabels in Desc). It
+// returns true if a metric was deleted.
+//
+// It is not an error if the number of label values is not the same as the
+// number of VariableLabels in Desc. However, such inconsistent label count can
+// never match an actual metric, so the method will always return false in that
+// case.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider Delete(Labels) as an
+// alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the CounterVec example.
+func (m *metricVec) DeleteLabelValues(lvs ...string) bool {
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return false
+ }
-// Describe implements Collector. The length of the returned slice
-// is always one.
-func (m *MetricVec) Describe(ch chan<- *Desc) {
- ch <- m.desc
+ return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry)
}
-// Collect implements Collector.
-func (m *MetricVec) Collect(ch chan<- Metric) {
- m.mtx.RLock()
- defer m.mtx.RUnlock()
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc. However, such inconsistent Labels
+// can never match an actual metric, so the method will always return false in
+// that case.
+//
+// This method is used for the same purpose as DeleteLabelValues(...string). See
+// there for pros and cons of the two methods.
+func (m *metricVec) Delete(labels Labels) bool {
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return false
+ }
- for _, metrics := range m.children {
- for _, metric := range metrics {
- ch <- metric.metric
+ return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
+}
+
+func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
+ var (
+ newCurry []curriedLabelValue
+ oldCurry = m.curry
+ iCurry int
+ )
+ for i, label := range m.desc.variableLabels {
+ val, ok := labels[label]
+ if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
+ if ok {
+ return nil, fmt.Errorf("label name %q is already curried", label)
+ }
+ newCurry = append(newCurry, oldCurry[iCurry])
+ iCurry++
+ } else {
+ if !ok {
+ continue // Label stays uncurried.
+ }
+ newCurry = append(newCurry, curriedLabelValue{i, val})
}
}
+ if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
+ return nil, fmt.Errorf("%d unknown label(s) found during currying", l)
+ }
+
+ return &metricVec{
+ metricMap: m.metricMap,
+ curry: newCurry,
+ hashAdd: m.hashAdd,
+ hashAddByte: m.hashAddByte,
+ }, nil
}
-// GetMetricWithLabelValues returns the Metric for the given slice of label
-// values (same order as the VariableLabels in Desc). If that combination of
-// label values is accessed for the first time, a new Metric is created.
-//
-// It is possible to call this method without using the returned Metric to only
-// create the new Metric but leave it at its start value (e.g. a Summary or
-// Histogram without any observations). See also the SummaryVec example.
-//
-// Keeping the Metric for later use is possible (and should be considered if
-// performance is critical), but keep in mind that Reset, DeleteLabelValues and
-// Delete can be used to delete the Metric from the MetricVec. In that case, the
-// Metric will still exist, but it will not be exported anymore, even if a
-// Metric with the same label values is created later. See also the CounterVec
-// example.
-//
-// An error is returned if the number of label values is not the same as the
-// number of VariableLabels in Desc.
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
-// an alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-// See also the GaugeVec example.
-func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) {
h, err := m.hashLabelValues(lvs)
if err != nil {
return nil, err
}
- return m.getOrCreateMetricWithLabelValues(h, lvs), nil
+ return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
}
-// GetMetricWith returns the Metric for the given Labels map (the label names
-// must match those of the VariableLabels in Desc). If that label map is
-// accessed for the first time, a new Metric is created. Implications of
-// creating a Metric without using it and keeping the Metric for later use are
-// the same as for GetMetricWithLabelValues.
-//
-// An error is returned if the number and names of the Labels are inconsistent
-// with those of the VariableLabels in Desc.
-//
-// This method is used for the same purpose as
-// GetMetricWithLabelValues(...string). See there for pros and cons of the two
-// methods.
-func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+func (m *metricVec) getMetricWith(labels Labels) (Metric, error) {
h, err := m.hashLabels(labels)
if err != nil {
return nil, err
}
- return m.getOrCreateMetricWithLabels(h, labels), nil
+ return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
}
-// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
-// occurs. The method allows neat syntax like:
-// httpReqs.WithLabelValues("404", "POST").Inc()
-func (m *MetricVec) WithLabelValues(lvs ...string) Metric {
- metric, err := m.GetMetricWithLabelValues(lvs...)
- if err != nil {
- panic(err)
+func (m *metricVec) hashLabelValues(vals []string) (uint64, error) {
+ if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil {
+ return 0, err
+ }
+
+ var (
+ h = hashNew()
+ curry = m.curry
+ iVals, iCurry int
+ )
+ for i := 0; i < len(m.desc.variableLabels); i++ {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ h = m.hashAdd(h, curry[iCurry].value)
+ iCurry++
+ } else {
+ h = m.hashAdd(h, vals[iVals])
+ iVals++
+ }
+ h = m.hashAddByte(h, model.SeparatorByte)
}
- return metric
+ return h, nil
}
-// With works as GetMetricWith, but panics if an error occurs. The method allows
-// neat syntax like:
-// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc()
-func (m *MetricVec) With(labels Labels) Metric {
- metric, err := m.GetMetricWith(labels)
- if err != nil {
- panic(err)
+func (m *metricVec) hashLabels(labels Labels) (uint64, error) {
+ if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil {
+ return 0, err
}
- return metric
+
+ var (
+ h = hashNew()
+ curry = m.curry
+ iCurry int
+ )
+ for i, label := range m.desc.variableLabels {
+ val, ok := labels[label]
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ if ok {
+ return 0, fmt.Errorf("label name %q is already curried", label)
+ }
+ h = m.hashAdd(h, curry[iCurry].value)
+ iCurry++
+ } else {
+ if !ok {
+ return 0, fmt.Errorf("label name %q missing in label map", label)
+ }
+ h = m.hashAdd(h, val)
+ }
+ h = m.hashAddByte(h, model.SeparatorByte)
+ }
+ return h, nil
}
-// DeleteLabelValues removes the metric where the variable labels are the same
-// as those passed in as labels (same order as the VariableLabels in Desc). It
-// returns true if a metric was deleted.
-//
-// It is not an error if the number of label values is not the same as the
-// number of VariableLabels in Desc. However, such inconsistent label count can
-// never match an actual Metric, so the method will always return false in that
-// case.
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider Delete(Labels) as an
-// alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-// See also the CounterVec example.
-func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
- m.mtx.Lock()
- defer m.mtx.Unlock()
+// metricWithLabelValues provides the metric and its label values for
+// disambiguation on hash collision.
+type metricWithLabelValues struct {
+ values []string
+ metric Metric
+}
- h, err := m.hashLabelValues(lvs)
- if err != nil {
- return false
+// curriedLabelValue sets the curried value for a label at the given index.
+type curriedLabelValue struct {
+ index int
+ value string
+}
+
+// metricMap is a helper for metricVec and shared between differently curried
+// metricVecs.
+type metricMap struct {
+ mtx sync.RWMutex // Protects metrics.
+ metrics map[uint64][]metricWithLabelValues
+ desc *Desc
+ newMetric func(labelValues ...string) Metric
+}
+
+// Describe implements Collector. It will send exactly one Desc to the provided
+// channel.
+func (m *metricMap) Describe(ch chan<- *Desc) {
+ ch <- m.desc
+}
+
+// Collect implements Collector.
+func (m *metricMap) Collect(ch chan<- Metric) {
+ m.mtx.RLock()
+ defer m.mtx.RUnlock()
+
+ for _, metrics := range m.metrics {
+ for _, metric := range metrics {
+ ch <- metric.metric
+ }
}
- return m.deleteByHashWithLabelValues(h, lvs)
}
-// Delete deletes the metric where the variable labels are the same as those
-// passed in as labels. It returns true if a metric was deleted.
-//
-// It is not an error if the number and names of the Labels are inconsistent
-// with those of the VariableLabels in the Desc of the MetricVec. However, such
-// inconsistent Labels can never match an actual Metric, so the method will
-// always return false in that case.
-//
-// This method is used for the same purpose as DeleteLabelValues(...string). See
-// there for pros and cons of the two methods.
-func (m *MetricVec) Delete(labels Labels) bool {
+// Reset deletes all metrics in this vector.
+func (m *metricMap) Reset() {
m.mtx.Lock()
defer m.mtx.Unlock()
- h, err := m.hashLabels(labels)
- if err != nil {
- return false
+ for h := range m.metrics {
+ delete(m.metrics, h)
}
-
- return m.deleteByHashWithLabels(h, labels)
}
// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
// there are multiple matches in the bucket, use lvs to select a metric and
// remove only that metric.
-func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
- metrics, ok := m.children[h]
+func (m *metricMap) deleteByHashWithLabelValues(
+ h uint64, lvs []string, curry []curriedLabelValue,
+) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ metrics, ok := m.metrics[h]
if !ok {
return false
}
- i := m.findMetricWithLabelValues(metrics, lvs)
+ i := findMetricWithLabelValues(metrics, lvs, curry)
if i >= len(metrics) {
return false
}
if len(metrics) > 1 {
- m.children[h] = append(metrics[:i], metrics[i+1:]...)
+ m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
} else {
- delete(m.children, h)
+ delete(m.metrics, h)
}
return true
}
@@ -221,69 +274,35 @@ func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
// deleteByHashWithLabels removes the metric from the hash bucket h. If there
// are multiple matches in the bucket, use lvs to select a metric and remove
// only that metric.
-func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
- metrics, ok := m.children[h]
+func (m *metricMap) deleteByHashWithLabels(
+ h uint64, labels Labels, curry []curriedLabelValue,
+) bool {
+ metrics, ok := m.metrics[h]
if !ok {
return false
}
- i := m.findMetricWithLabels(metrics, labels)
+ i := findMetricWithLabels(m.desc, metrics, labels, curry)
if i >= len(metrics) {
return false
}
if len(metrics) > 1 {
- m.children[h] = append(metrics[:i], metrics[i+1:]...)
+ m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
} else {
- delete(m.children, h)
+ delete(m.metrics, h)
}
return true
}
-// Reset deletes all metrics in this vector.
-func (m *MetricVec) Reset() {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- for h := range m.children {
- delete(m.children, h)
- }
-}
-
-func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
- if len(vals) != len(m.desc.variableLabels) {
- return 0, errInconsistentCardinality
- }
- h := hashNew()
- for _, val := range vals {
- h = m.hashAdd(h, val)
- h = m.hashAddByte(h, model.SeparatorByte)
- }
- return h, nil
-}
-
-func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
- if len(labels) != len(m.desc.variableLabels) {
- return 0, errInconsistentCardinality
- }
- h := hashNew()
- for _, label := range m.desc.variableLabels {
- val, ok := labels[label]
- if !ok {
- return 0, fmt.Errorf("label name %q missing in label map", label)
- }
- h = m.hashAdd(h, val)
- h = m.hashAddByte(h, model.SeparatorByte)
- }
- return h, nil
-}
-
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
// or creates it and returns the new one.
//
// This function holds the mutex.
-func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
+func (m *metricMap) getOrCreateMetricWithLabelValues(
+ hash uint64, lvs []string, curry []curriedLabelValue,
+) Metric {
m.mtx.RLock()
- metric, ok := m.getMetricWithLabelValues(hash, lvs)
+ metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry)
m.mtx.RUnlock()
if ok {
return metric
@@ -291,13 +310,11 @@ func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string)
m.mtx.Lock()
defer m.mtx.Unlock()
- metric, ok = m.getMetricWithLabelValues(hash, lvs)
+ metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry)
if !ok {
- // Copy to avoid allocation in case wo don't go down this code path.
- copiedLVs := make([]string, len(lvs))
- copy(copiedLVs, lvs)
- metric = m.newMetric(copiedLVs...)
- m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
+ inlinedLVs := inlineLabelValues(lvs, curry)
+ metric = m.newMetric(inlinedLVs...)
+ m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric})
}
return metric
}
@@ -306,9 +323,11 @@ func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string)
// or creates it and returns the new one.
//
// This function holds the mutex.
-func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
+func (m *metricMap) getOrCreateMetricWithLabels(
+ hash uint64, labels Labels, curry []curriedLabelValue,
+) Metric {
m.mtx.RLock()
- metric, ok := m.getMetricWithLabels(hash, labels)
+ metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry)
m.mtx.RUnlock()
if ok {
return metric
@@ -316,33 +335,37 @@ func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metr
m.mtx.Lock()
defer m.mtx.Unlock()
- metric, ok = m.getMetricWithLabels(hash, labels)
+ metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry)
if !ok {
- lvs := m.extractLabelValues(labels)
+ lvs := extractLabelValues(m.desc, labels, curry)
metric = m.newMetric(lvs...)
- m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})
+ m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric})
}
return metric
}
-// getMetricWithLabelValues gets a metric while handling possible collisions in
-// the hash space. Must be called while holding read mutex.
-func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) {
- metrics, ok := m.children[h]
+// getMetricWithHashAndLabelValues gets a metric while handling possible
+// collisions in the hash space. Must be called while holding the read mutex.
+func (m *metricMap) getMetricWithHashAndLabelValues(
+ h uint64, lvs []string, curry []curriedLabelValue,
+) (Metric, bool) {
+ metrics, ok := m.metrics[h]
if ok {
- if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
+ if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) {
return metrics[i].metric, true
}
}
return nil, false
}
-// getMetricWithLabels gets a metric while handling possible collisions in
+// getMetricWithHashAndLabels gets a metric while handling possible collisions in
// the hash space. Must be called while holding read mutex.
-func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) {
- metrics, ok := m.children[h]
+func (m *metricMap) getMetricWithHashAndLabels(
+ h uint64, labels Labels, curry []curriedLabelValue,
+) (Metric, bool) {
+ metrics, ok := m.metrics[h]
if ok {
- if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
+ if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) {
return metrics[i].metric, true
}
}
@@ -351,9 +374,11 @@ func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool)
// findMetricWithLabelValues returns the index of the matching metric or
// len(metrics) if not found.
-func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
+func findMetricWithLabelValues(
+ metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue,
+) int {
for i, metric := range metrics {
- if m.matchLabelValues(metric.values, lvs) {
+ if matchLabelValues(metric.values, lvs, curry) {
return i
}
}
@@ -362,32 +387,51 @@ func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, l
// findMetricWithLabels returns the index of the matching metric or len(metrics)
// if not found.
-func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
+func findMetricWithLabels(
+ desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
+) int {
for i, metric := range metrics {
- if m.matchLabels(metric.values, labels) {
+ if matchLabels(desc, metric.values, labels, curry) {
return i
}
}
return len(metrics)
}
-func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
- if len(values) != len(lvs) {
+func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool {
+ if len(values) != len(lvs)+len(curry) {
return false
}
+ var iLVs, iCurry int
for i, v := range values {
- if v != lvs[i] {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ if v != curry[iCurry].value {
+ return false
+ }
+ iCurry++
+ continue
+ }
+ if v != lvs[iLVs] {
return false
}
+ iLVs++
}
return true
}
-func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
- if len(labels) != len(values) {
+func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
+ if len(values) != len(labels)+len(curry) {
return false
}
- for i, k := range m.desc.variableLabels {
+ iCurry := 0
+ for i, k := range desc.variableLabels {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ if values[i] != curry[iCurry].value {
+ return false
+ }
+ iCurry++
+ continue
+ }
if values[i] != labels[k] {
return false
}
@@ -395,10 +439,31 @@ func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
return true
}
-func (m *MetricVec) extractLabelValues(labels Labels) []string {
- labelValues := make([]string, len(labels))
- for i, k := range m.desc.variableLabels {
+func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
+ labelValues := make([]string, len(labels)+len(curry))
+ iCurry := 0
+ for i, k := range desc.variableLabels {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ labelValues[i] = curry[iCurry].value
+ iCurry++
+ continue
+ }
labelValues[i] = labels[k]
}
return labelValues
}
+
+func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
+ labelValues := make([]string, len(lvs)+len(curry))
+ var iCurry, iLVs int
+ for i := range labelValues {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ labelValues[i] = curry[iCurry].value
+ iCurry++
+ continue
+ }
+ labelValues[i] = lvs[iLVs]
+ iLVs++
+ }
+ return labelValues
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec_test.go b/vendor/github.com/prometheus/client_golang/prometheus/vec_test.go
index 445a6b39f..bd18a9f4e 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec_test.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec_test.go
@@ -21,8 +21,8 @@ import (
)
func TestDelete(t *testing.T) {
- vec := NewUntypedVec(
- UntypedOpts{
+ vec := NewGaugeVec(
+ GaugeOpts{
Name: "test",
Help: "helpless",
},
@@ -32,8 +32,8 @@ func TestDelete(t *testing.T) {
}
func TestDeleteWithCollisions(t *testing.T) {
- vec := NewUntypedVec(
- UntypedOpts{
+ vec := NewGaugeVec(
+ GaugeOpts{
Name: "test",
Help: "helpless",
},
@@ -44,12 +44,12 @@ func TestDeleteWithCollisions(t *testing.T) {
testDelete(t, vec)
}
-func testDelete(t *testing.T, vec *UntypedVec) {
+func testDelete(t *testing.T, vec *GaugeVec) {
if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want {
t.Errorf("got %v, want %v", got, want)
}
- vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
+ vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42)
if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), true; got != want {
t.Errorf("got %v, want %v", got, want)
}
@@ -57,7 +57,7 @@ func testDelete(t *testing.T, vec *UntypedVec) {
t.Errorf("got %v, want %v", got, want)
}
- vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
+ vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42)
if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), true; got != want {
t.Errorf("got %v, want %v", got, want)
}
@@ -65,7 +65,7 @@ func testDelete(t *testing.T, vec *UntypedVec) {
t.Errorf("got %v, want %v", got, want)
}
- vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
+ vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42)
if got, want := vec.Delete(Labels{"l2": "v1", "l1": "v2"}), false; got != want {
t.Errorf("got %v, want %v", got, want)
}
@@ -75,8 +75,8 @@ func testDelete(t *testing.T, vec *UntypedVec) {
}
func TestDeleteLabelValues(t *testing.T) {
- vec := NewUntypedVec(
- UntypedOpts{
+ vec := NewGaugeVec(
+ GaugeOpts{
Name: "test",
Help: "helpless",
},
@@ -86,8 +86,8 @@ func TestDeleteLabelValues(t *testing.T) {
}
func TestDeleteLabelValuesWithCollisions(t *testing.T) {
- vec := NewUntypedVec(
- UntypedOpts{
+ vec := NewGaugeVec(
+ GaugeOpts{
Name: "test",
Help: "helpless",
},
@@ -98,13 +98,13 @@ func TestDeleteLabelValuesWithCollisions(t *testing.T) {
testDeleteLabelValues(t, vec)
}
-func testDeleteLabelValues(t *testing.T, vec *UntypedVec) {
+func testDeleteLabelValues(t *testing.T, vec *GaugeVec) {
if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want {
t.Errorf("got %v, want %v", got, want)
}
- vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
- vec.With(Labels{"l1": "v1", "l2": "v3"}).(Untyped).Set(42) // Add junk data for collision.
+ vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42)
+ vec.With(Labels{"l1": "v1", "l2": "v3"}).(Gauge).Set(42) // Add junk data for collision.
if got, want := vec.DeleteLabelValues("v1", "v2"), true; got != want {
t.Errorf("got %v, want %v", got, want)
}
@@ -115,7 +115,7 @@ func testDeleteLabelValues(t *testing.T, vec *UntypedVec) {
t.Errorf("got %v, want %v", got, want)
}
- vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
+ vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42)
// Delete out of order.
if got, want := vec.DeleteLabelValues("v2", "v1"), false; got != want {
t.Errorf("got %v, want %v", got, want)
@@ -126,8 +126,8 @@ func testDeleteLabelValues(t *testing.T, vec *UntypedVec) {
}
func TestMetricVec(t *testing.T) {
- vec := NewUntypedVec(
- UntypedOpts{
+ vec := NewGaugeVec(
+ GaugeOpts{
Name: "test",
Help: "helpless",
},
@@ -137,8 +137,8 @@ func TestMetricVec(t *testing.T) {
}
func TestMetricVecWithCollisions(t *testing.T) {
- vec := NewUntypedVec(
- UntypedOpts{
+ vec := NewGaugeVec(
+ GaugeOpts{
Name: "test",
Help: "helpless",
},
@@ -149,7 +149,7 @@ func TestMetricVecWithCollisions(t *testing.T) {
testMetricVec(t, vec)
}
-func testMetricVec(t *testing.T, vec *UntypedVec) {
+func testMetricVec(t *testing.T, vec *GaugeVec) {
vec.Reset() // Actually test Reset now!
var pair [2]string
@@ -162,11 +162,11 @@ func testMetricVec(t *testing.T, vec *UntypedVec) {
vec.WithLabelValues(pair[0], pair[1]).Inc()
expected[[2]string{"v1", "v2"}]++
- vec.WithLabelValues("v1", "v2").(Untyped).Inc()
+ vec.WithLabelValues("v1", "v2").(Gauge).Inc()
}
var total int
- for _, metrics := range vec.children {
+ for _, metrics := range vec.metricMap.metrics {
for _, metric := range metrics {
total++
copy(pair[:], metric.values)
@@ -175,7 +175,7 @@ func testMetricVec(t *testing.T, vec *UntypedVec) {
if err := metric.metric.Write(&metricOut); err != nil {
t.Fatal(err)
}
- actual := *metricOut.Untyped.Value
+ actual := *metricOut.Gauge.Value
var actualPair [2]string
for i, label := range metricOut.Label {
@@ -201,7 +201,7 @@ func testMetricVec(t *testing.T, vec *UntypedVec) {
vec.Reset()
- if len(vec.children) > 0 {
+ if len(vec.metricMap.metrics) > 0 {
t.Fatalf("reset failed")
}
}
@@ -239,10 +239,233 @@ func TestCounterVecEndToEndWithCollision(t *testing.T) {
}
}
+func TestCurryVec(t *testing.T) {
+ vec := NewCounterVec(
+ CounterOpts{
+ Name: "test",
+ Help: "helpless",
+ },
+ []string{"one", "two", "three"},
+ )
+ testCurryVec(t, vec)
+}
+
+func TestCurryVecWithCollisions(t *testing.T) {
+ vec := NewCounterVec(
+ CounterOpts{
+ Name: "test",
+ Help: "helpless",
+ },
+ []string{"one", "two", "three"},
+ )
+ vec.hashAdd = func(h uint64, s string) uint64 { return 1 }
+ vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 }
+ testCurryVec(t, vec)
+}
+
+func testCurryVec(t *testing.T, vec *CounterVec) {
+
+ assertMetrics := func(t *testing.T) {
+ n := 0
+ for _, m := range vec.metricMap.metrics {
+ n += len(m)
+ }
+ if n != 2 {
+ t.Error("expected two metrics, got", n)
+ }
+ m := &dto.Metric{}
+ c1, err := vec.GetMetricWithLabelValues("1", "2", "3")
+ if err != nil {
+ t.Fatal("unexpected error getting metric:", err)
+ }
+ c1.Write(m)
+ if want, got := 1., m.GetCounter().GetValue(); want != got {
+ t.Errorf("want %f as counter value, got %f", want, got)
+ }
+ m.Reset()
+ c2, err := vec.GetMetricWithLabelValues("11", "22", "33")
+ if err != nil {
+ t.Fatal("unexpected error getting metric:", err)
+ }
+ c2.Write(m)
+ if want, got := 1., m.GetCounter().GetValue(); want != got {
+ t.Errorf("want %f as counter value, got %f", want, got)
+ }
+ }
+
+ assertNoMetric := func(t *testing.T) {
+ if n := len(vec.metricMap.metrics); n != 0 {
+ t.Error("expected no metrics, got", n)
+ }
+ }
+
+ t.Run("zero labels", func(t *testing.T) {
+ c1 := vec.MustCurryWith(nil)
+ c2 := vec.MustCurryWith(nil)
+ c1.WithLabelValues("1", "2", "3").Inc()
+ c2.With(Labels{"one": "11", "two": "22", "three": "33"}).Inc()
+ assertMetrics(t)
+ if !c1.Delete(Labels{"one": "1", "two": "2", "three": "3"}) {
+ t.Error("deletion failed")
+ }
+ if !c2.DeleteLabelValues("11", "22", "33") {
+ t.Error("deletion failed")
+ }
+ assertNoMetric(t)
+ })
+ t.Run("first label", func(t *testing.T) {
+ c1 := vec.MustCurryWith(Labels{"one": "1"})
+ c2 := vec.MustCurryWith(Labels{"one": "11"})
+ c1.WithLabelValues("2", "3").Inc()
+ c2.With(Labels{"two": "22", "three": "33"}).Inc()
+ assertMetrics(t)
+ if c1.Delete(Labels{"two": "22", "three": "33"}) {
+ t.Error("deletion unexpectedly succeeded")
+ }
+ if c2.DeleteLabelValues("2", "3") {
+ t.Error("deletion unexpectedly succeeded")
+ }
+ if !c1.Delete(Labels{"two": "2", "three": "3"}) {
+ t.Error("deletion failed")
+ }
+ if !c2.DeleteLabelValues("22", "33") {
+ t.Error("deletion failed")
+ }
+ assertNoMetric(t)
+ })
+ t.Run("middle label", func(t *testing.T) {
+ c1 := vec.MustCurryWith(Labels{"two": "2"})
+ c2 := vec.MustCurryWith(Labels{"two": "22"})
+ c1.WithLabelValues("1", "3").Inc()
+ c2.With(Labels{"one": "11", "three": "33"}).Inc()
+ assertMetrics(t)
+ if c1.Delete(Labels{"one": "11", "three": "33"}) {
+ t.Error("deletion unexpectedly succeeded")
+ }
+ if c2.DeleteLabelValues("1", "3") {
+ t.Error("deletion unexpectedly succeeded")
+ }
+ if !c1.Delete(Labels{"one": "1", "three": "3"}) {
+ t.Error("deletion failed")
+ }
+ if !c2.DeleteLabelValues("11", "33") {
+ t.Error("deletion failed")
+ }
+ assertNoMetric(t)
+ })
+ t.Run("last label", func(t *testing.T) {
+ c1 := vec.MustCurryWith(Labels{"three": "3"})
+ c2 := vec.MustCurryWith(Labels{"three": "33"})
+ c1.WithLabelValues("1", "2").Inc()
+ c2.With(Labels{"one": "11", "two": "22"}).Inc()
+ assertMetrics(t)
+ if c1.Delete(Labels{"two": "22", "one": "11"}) {
+ t.Error("deletion unexpectedly succeeded")
+ }
+ if c2.DeleteLabelValues("1", "2") {
+ t.Error("deletion unexpectedly succeeded")
+ }
+ if !c1.Delete(Labels{"two": "2", "one": "1"}) {
+ t.Error("deletion failed")
+ }
+ if !c2.DeleteLabelValues("11", "22") {
+ t.Error("deletion failed")
+ }
+ assertNoMetric(t)
+ })
+ t.Run("two labels", func(t *testing.T) {
+ c1 := vec.MustCurryWith(Labels{"three": "3", "one": "1"})
+ c2 := vec.MustCurryWith(Labels{"three": "33", "one": "11"})
+ c1.WithLabelValues("2").Inc()
+ c2.With(Labels{"two": "22"}).Inc()
+ assertMetrics(t)
+ if c1.Delete(Labels{"two": "22"}) {
+ t.Error("deletion unexpectedly succeeded")
+ }
+ if c2.DeleteLabelValues("2") {
+ t.Error("deletion unexpectedly succeeded")
+ }
+ if !c1.Delete(Labels{"two": "2"}) {
+ t.Error("deletion failed")
+ }
+ if !c2.DeleteLabelValues("22") {
+ t.Error("deletion failed")
+ }
+ assertNoMetric(t)
+ })
+ t.Run("all labels", func(t *testing.T) {
+ c1 := vec.MustCurryWith(Labels{"three": "3", "two": "2", "one": "1"})
+ c2 := vec.MustCurryWith(Labels{"three": "33", "one": "11", "two": "22"})
+ c1.WithLabelValues().Inc()
+ c2.With(nil).Inc()
+ assertMetrics(t)
+ if !c1.Delete(Labels{}) {
+ t.Error("deletion failed")
+ }
+ if !c2.DeleteLabelValues() {
+ t.Error("deletion failed")
+ }
+ assertNoMetric(t)
+ })
+ t.Run("double curry", func(t *testing.T) {
+ c1 := vec.MustCurryWith(Labels{"three": "3"}).MustCurryWith(Labels{"one": "1"})
+ c2 := vec.MustCurryWith(Labels{"three": "33"}).MustCurryWith(Labels{"one": "11"})
+ c1.WithLabelValues("2").Inc()
+ c2.With(Labels{"two": "22"}).Inc()
+ assertMetrics(t)
+ if c1.Delete(Labels{"two": "22"}) {
+ t.Error("deletion unexpectedly succeeded")
+ }
+ if c2.DeleteLabelValues("2") {
+ t.Error("deletion unexpectedly succeeded")
+ }
+ if !c1.Delete(Labels{"two": "2"}) {
+ t.Error("deletion failed")
+ }
+ if !c2.DeleteLabelValues("22") {
+ t.Error("deletion failed")
+ }
+ assertNoMetric(t)
+ })
+ t.Run("use already curried label", func(t *testing.T) {
+ c1 := vec.MustCurryWith(Labels{"three": "3"})
+ if _, err := c1.GetMetricWithLabelValues("1", "2", "3"); err == nil {
+ t.Error("expected error when using already curried label")
+ }
+ if _, err := c1.GetMetricWith(Labels{"one": "1", "two": "2", "three": "3"}); err == nil {
+ t.Error("expected error when using already curried label")
+ }
+ assertNoMetric(t)
+ c1.WithLabelValues("1", "2").Inc()
+ if c1.Delete(Labels{"one": "1", "two": "2", "three": "3"}) {
+ t.Error("deletion unexpectedly succeeded")
+ }
+ if !c1.Delete(Labels{"one": "1", "two": "2"}) {
+ t.Error("deletion failed")
+ }
+ assertNoMetric(t)
+ })
+ t.Run("curry already curried label", func(t *testing.T) {
+ if _, err := vec.MustCurryWith(Labels{"three": "3"}).CurryWith(Labels{"three": "33"}); err == nil {
+ t.Error("currying unexpectedly succeeded")
+ } else if err.Error() != `label name "three" is already curried` {
+ t.Error("currying returned unexpected error:", err)
+ }
+
+ })
+ t.Run("unknown label", func(t *testing.T) {
+ if _, err := vec.CurryWith(Labels{"foo": "bar"}); err == nil {
+ t.Error("currying unexpectedly succeeded")
+ } else if err.Error() != "1 unknown label(s) found during currying" {
+ t.Error("currying returned unexpected error:", err)
+ }
+ })
+}
+
func BenchmarkMetricVecWithLabelValuesBasic(b *testing.B) {
benchmarkMetricVecWithLabelValues(b, map[string][]string{
- "l1": []string{"onevalue"},
- "l2": []string{"twovalue"},
+ "l1": {"onevalue"},
+ "l2": {"twovalue"},
})
}
@@ -290,8 +513,8 @@ func benchmarkMetricVecWithLabelValues(b *testing.B, labels map[string][]string)
}
values := make([]string, len(labels)) // Value cache for permutations.
- vec := NewUntypedVec(
- UntypedOpts{
+ vec := NewGaugeVec(
+ GaugeOpts{
Name: "test",
Help: "helpless",
},
diff --git a/vendor/github.com/prometheus/client_model/python/prometheus/client/__init__.py b/vendor/github.com/prometheus/client_model/python/prometheus/client/__init__.py
deleted file mode 100644
index 617c0ced0..000000000
--- a/vendor/github.com/prometheus/client_model/python/prometheus/client/__init__.py
+++ /dev/null
@@ -1,12 +0,0 @@
- # Copyright 2013 Prometheus Team
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
-
- # http://www.apache.org/licenses/LICENSE-2.0
-
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
diff --git a/vendor/github.com/prometheus/client_model/python/prometheus/client/model/__init__.py b/vendor/github.com/prometheus/client_model/python/prometheus/client/model/__init__.py
deleted file mode 100644
index d40327c32..000000000
--- a/vendor/github.com/prometheus/client_model/python/prometheus/client/model/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
- # Copyright 2013 Prometheus Team
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
-
- # http://www.apache.org/licenses/LICENSE-2.0
-
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
-
-__all__ = ['metrics_pb2']
diff --git a/vendor/github.com/prometheus/client_model/python/prometheus/client/model/metrics_pb2.py b/vendor/github.com/prometheus/client_model/python/prometheus/client/model/metrics_pb2.py
deleted file mode 100644
index 8c239ac06..000000000
--- a/vendor/github.com/prometheus/client_model/python/prometheus/client/model/metrics_pb2.py
+++ /dev/null
@@ -1,575 +0,0 @@
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: metrics.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf.internal import enum_type_wrapper
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
- name='metrics.proto',
- package='io.prometheus.client',
- serialized_pb=_b('\n\rmetrics.proto\x12\x14io.prometheus.client\"(\n\tLabelPair\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\x16\n\x05Gauge\x12\r\n\x05value\x18\x01 \x01(\x01\"\x18\n\x07\x43ounter\x12\r\n\x05value\x18\x01 \x01(\x01\"+\n\x08Quantile\x12\x10\n\x08quantile\x18\x01 \x01(\x01\x12\r\n\x05value\x18\x02 \x01(\x01\"e\n\x07Summary\x12\x14\n\x0csample_count\x18\x01 \x01(\x04\x12\x12\n\nsample_sum\x18\x02 \x01(\x01\x12\x30\n\x08quantile\x18\x03 \x03(\x0b\x32\x1e.io.prometheus.client.Quantile\"\x18\n\x07Untyped\x12\r\n\x05value\x18\x01 \x01(\x01\"c\n\tHistogram\x12\x14\n\x0csample_count\x18\x01 \x01(\x04\x12\x12\n\nsample_sum\x18\x02 \x01(\x01\x12,\n\x06\x62ucket\x18\x03 \x03(\x0b\x32\x1c.io.prometheus.client.Bucket\"7\n\x06\x42ucket\x12\x18\n\x10\x63umulative_count\x18\x01 \x01(\x04\x12\x13\n\x0bupper_bound\x18\x02 \x01(\x01\"\xbe\x02\n\x06Metric\x12.\n\x05label\x18\x01 \x03(\x0b\x32\x1f.io.prometheus.client.LabelPair\x12*\n\x05gauge\x18\x02 \x01(\x0b\x32\x1b.io.prometheus.client.Gauge\x12.\n\x07\x63ounter\x18\x03 \x01(\x0b\x32\x1d.io.prometheus.client.Counter\x12.\n\x07summary\x18\x04 \x01(\x0b\x32\x1d.io.prometheus.client.Summary\x12.\n\x07untyped\x18\x05 \x01(\x0b\x32\x1d.io.prometheus.client.Untyped\x12\x32\n\thistogram\x18\x07 \x01(\x0b\x32\x1f.io.prometheus.client.Histogram\x12\x14\n\x0ctimestamp_ms\x18\x06 \x01(\x03\"\x88\x01\n\x0cMetricFamily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04help\x18\x02 \x01(\t\x12.\n\x04type\x18\x03 \x01(\x0e\x32 .io.prometheus.client.MetricType\x12,\n\x06metric\x18\x04 \x03(\x0b\x32\x1c.io.prometheus.client.Metric*M\n\nMetricType\x12\x0b\n\x07\x43OUNTER\x10\x00\x12\t\n\x05GAUGE\x10\x01\x12\x0b\n\x07SUMMARY\x10\x02\x12\x0b\n\x07UNTYPED\x10\x03\x12\r\n\tHISTOGRAM\x10\x04\x42\x16\n\x14io.prometheus.client')
-)
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-_METRICTYPE = _descriptor.EnumDescriptor(
- name='MetricType',
- full_name='io.prometheus.client.MetricType',
- filename=None,
- file=DESCRIPTOR,
- values=[
- _descriptor.EnumValueDescriptor(
- name='COUNTER', index=0, number=0,
- options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='GAUGE', index=1, number=1,
- options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='SUMMARY', index=2, number=2,
- options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='UNTYPED', index=3, number=3,
- options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='HISTOGRAM', index=4, number=4,
- options=None,
- type=None),
- ],
- containing_type=None,
- options=None,
- serialized_start=923,
- serialized_end=1000,
-)
-_sym_db.RegisterEnumDescriptor(_METRICTYPE)
-
-MetricType = enum_type_wrapper.EnumTypeWrapper(_METRICTYPE)
-COUNTER = 0
-GAUGE = 1
-SUMMARY = 2
-UNTYPED = 3
-HISTOGRAM = 4
-
-
-
-_LABELPAIR = _descriptor.Descriptor(
- name='LabelPair',
- full_name='io.prometheus.client.LabelPair',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='name', full_name='io.prometheus.client.LabelPair.name', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='value', full_name='io.prometheus.client.LabelPair.value', index=1,
- number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=39,
- serialized_end=79,
-)
-
-
-_GAUGE = _descriptor.Descriptor(
- name='Gauge',
- full_name='io.prometheus.client.Gauge',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='value', full_name='io.prometheus.client.Gauge.value', index=0,
- number=1, type=1, cpp_type=5, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=81,
- serialized_end=103,
-)
-
-
-_COUNTER = _descriptor.Descriptor(
- name='Counter',
- full_name='io.prometheus.client.Counter',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='value', full_name='io.prometheus.client.Counter.value', index=0,
- number=1, type=1, cpp_type=5, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=105,
- serialized_end=129,
-)
-
-
-_QUANTILE = _descriptor.Descriptor(
- name='Quantile',
- full_name='io.prometheus.client.Quantile',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='quantile', full_name='io.prometheus.client.Quantile.quantile', index=0,
- number=1, type=1, cpp_type=5, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='value', full_name='io.prometheus.client.Quantile.value', index=1,
- number=2, type=1, cpp_type=5, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=131,
- serialized_end=174,
-)
-
-
-_SUMMARY = _descriptor.Descriptor(
- name='Summary',
- full_name='io.prometheus.client.Summary',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='sample_count', full_name='io.prometheus.client.Summary.sample_count', index=0,
- number=1, type=4, cpp_type=4, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='sample_sum', full_name='io.prometheus.client.Summary.sample_sum', index=1,
- number=2, type=1, cpp_type=5, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='quantile', full_name='io.prometheus.client.Summary.quantile', index=2,
- number=3, type=11, cpp_type=10, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=176,
- serialized_end=277,
-)
-
-
-_UNTYPED = _descriptor.Descriptor(
- name='Untyped',
- full_name='io.prometheus.client.Untyped',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='value', full_name='io.prometheus.client.Untyped.value', index=0,
- number=1, type=1, cpp_type=5, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=279,
- serialized_end=303,
-)
-
-
-_HISTOGRAM = _descriptor.Descriptor(
- name='Histogram',
- full_name='io.prometheus.client.Histogram',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='sample_count', full_name='io.prometheus.client.Histogram.sample_count', index=0,
- number=1, type=4, cpp_type=4, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='sample_sum', full_name='io.prometheus.client.Histogram.sample_sum', index=1,
- number=2, type=1, cpp_type=5, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='bucket', full_name='io.prometheus.client.Histogram.bucket', index=2,
- number=3, type=11, cpp_type=10, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=305,
- serialized_end=404,
-)
-
-
-_BUCKET = _descriptor.Descriptor(
- name='Bucket',
- full_name='io.prometheus.client.Bucket',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='cumulative_count', full_name='io.prometheus.client.Bucket.cumulative_count', index=0,
- number=1, type=4, cpp_type=4, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='upper_bound', full_name='io.prometheus.client.Bucket.upper_bound', index=1,
- number=2, type=1, cpp_type=5, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=406,
- serialized_end=461,
-)
-
-
-_METRIC = _descriptor.Descriptor(
- name='Metric',
- full_name='io.prometheus.client.Metric',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='label', full_name='io.prometheus.client.Metric.label', index=0,
- number=1, type=11, cpp_type=10, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='gauge', full_name='io.prometheus.client.Metric.gauge', index=1,
- number=2, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='counter', full_name='io.prometheus.client.Metric.counter', index=2,
- number=3, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='summary', full_name='io.prometheus.client.Metric.summary', index=3,
- number=4, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='untyped', full_name='io.prometheus.client.Metric.untyped', index=4,
- number=5, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='histogram', full_name='io.prometheus.client.Metric.histogram', index=5,
- number=7, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='timestamp_ms', full_name='io.prometheus.client.Metric.timestamp_ms', index=6,
- number=6, type=3, cpp_type=2, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=464,
- serialized_end=782,
-)
-
-
-_METRICFAMILY = _descriptor.Descriptor(
- name='MetricFamily',
- full_name='io.prometheus.client.MetricFamily',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='name', full_name='io.prometheus.client.MetricFamily.name', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='help', full_name='io.prometheus.client.MetricFamily.help', index=1,
- number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='type', full_name='io.prometheus.client.MetricFamily.type', index=2,
- number=3, type=14, cpp_type=8, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='metric', full_name='io.prometheus.client.MetricFamily.metric', index=3,
- number=4, type=11, cpp_type=10, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=785,
- serialized_end=921,
-)
-
-_SUMMARY.fields_by_name['quantile'].message_type = _QUANTILE
-_HISTOGRAM.fields_by_name['bucket'].message_type = _BUCKET
-_METRIC.fields_by_name['label'].message_type = _LABELPAIR
-_METRIC.fields_by_name['gauge'].message_type = _GAUGE
-_METRIC.fields_by_name['counter'].message_type = _COUNTER
-_METRIC.fields_by_name['summary'].message_type = _SUMMARY
-_METRIC.fields_by_name['untyped'].message_type = _UNTYPED
-_METRIC.fields_by_name['histogram'].message_type = _HISTOGRAM
-_METRICFAMILY.fields_by_name['type'].enum_type = _METRICTYPE
-_METRICFAMILY.fields_by_name['metric'].message_type = _METRIC
-DESCRIPTOR.message_types_by_name['LabelPair'] = _LABELPAIR
-DESCRIPTOR.message_types_by_name['Gauge'] = _GAUGE
-DESCRIPTOR.message_types_by_name['Counter'] = _COUNTER
-DESCRIPTOR.message_types_by_name['Quantile'] = _QUANTILE
-DESCRIPTOR.message_types_by_name['Summary'] = _SUMMARY
-DESCRIPTOR.message_types_by_name['Untyped'] = _UNTYPED
-DESCRIPTOR.message_types_by_name['Histogram'] = _HISTOGRAM
-DESCRIPTOR.message_types_by_name['Bucket'] = _BUCKET
-DESCRIPTOR.message_types_by_name['Metric'] = _METRIC
-DESCRIPTOR.message_types_by_name['MetricFamily'] = _METRICFAMILY
-DESCRIPTOR.enum_types_by_name['MetricType'] = _METRICTYPE
-
-LabelPair = _reflection.GeneratedProtocolMessageType('LabelPair', (_message.Message,), dict(
- DESCRIPTOR = _LABELPAIR,
- __module__ = 'metrics_pb2'
- # @@protoc_insertion_point(class_scope:io.prometheus.client.LabelPair)
- ))
-_sym_db.RegisterMessage(LabelPair)
-
-Gauge = _reflection.GeneratedProtocolMessageType('Gauge', (_message.Message,), dict(
- DESCRIPTOR = _GAUGE,
- __module__ = 'metrics_pb2'
- # @@protoc_insertion_point(class_scope:io.prometheus.client.Gauge)
- ))
-_sym_db.RegisterMessage(Gauge)
-
-Counter = _reflection.GeneratedProtocolMessageType('Counter', (_message.Message,), dict(
- DESCRIPTOR = _COUNTER,
- __module__ = 'metrics_pb2'
- # @@protoc_insertion_point(class_scope:io.prometheus.client.Counter)
- ))
-_sym_db.RegisterMessage(Counter)
-
-Quantile = _reflection.GeneratedProtocolMessageType('Quantile', (_message.Message,), dict(
- DESCRIPTOR = _QUANTILE,
- __module__ = 'metrics_pb2'
- # @@protoc_insertion_point(class_scope:io.prometheus.client.Quantile)
- ))
-_sym_db.RegisterMessage(Quantile)
-
-Summary = _reflection.GeneratedProtocolMessageType('Summary', (_message.Message,), dict(
- DESCRIPTOR = _SUMMARY,
- __module__ = 'metrics_pb2'
- # @@protoc_insertion_point(class_scope:io.prometheus.client.Summary)
- ))
-_sym_db.RegisterMessage(Summary)
-
-Untyped = _reflection.GeneratedProtocolMessageType('Untyped', (_message.Message,), dict(
- DESCRIPTOR = _UNTYPED,
- __module__ = 'metrics_pb2'
- # @@protoc_insertion_point(class_scope:io.prometheus.client.Untyped)
- ))
-_sym_db.RegisterMessage(Untyped)
-
-Histogram = _reflection.GeneratedProtocolMessageType('Histogram', (_message.Message,), dict(
- DESCRIPTOR = _HISTOGRAM,
- __module__ = 'metrics_pb2'
- # @@protoc_insertion_point(class_scope:io.prometheus.client.Histogram)
- ))
-_sym_db.RegisterMessage(Histogram)
-
-Bucket = _reflection.GeneratedProtocolMessageType('Bucket', (_message.Message,), dict(
- DESCRIPTOR = _BUCKET,
- __module__ = 'metrics_pb2'
- # @@protoc_insertion_point(class_scope:io.prometheus.client.Bucket)
- ))
-_sym_db.RegisterMessage(Bucket)
-
-Metric = _reflection.GeneratedProtocolMessageType('Metric', (_message.Message,), dict(
- DESCRIPTOR = _METRIC,
- __module__ = 'metrics_pb2'
- # @@protoc_insertion_point(class_scope:io.prometheus.client.Metric)
- ))
-_sym_db.RegisterMessage(Metric)
-
-MetricFamily = _reflection.GeneratedProtocolMessageType('MetricFamily', (_message.Message,), dict(
- DESCRIPTOR = _METRICFAMILY,
- __module__ = 'metrics_pb2'
- # @@protoc_insertion_point(class_scope:io.prometheus.client.MetricFamily)
- ))
-_sym_db.RegisterMessage(MetricFamily)
-
-
-DESCRIPTOR.has_options = True
-DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\024io.prometheus.client'))
-# @@protoc_insertion_point(module_scope)
diff --git a/vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model.rb b/vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model.rb
deleted file mode 100644
index b5303bf1e..000000000
--- a/vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model.rb
+++ /dev/null
@@ -1,2 +0,0 @@
-require 'prometheus/client/model/metrics.pb'
-require 'prometheus/client/model/version'
diff --git a/vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/metrics.pb.rb b/vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/metrics.pb.rb
deleted file mode 100644
index a72114b8f..000000000
--- a/vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/metrics.pb.rb
+++ /dev/null
@@ -1,111 +0,0 @@
-## Generated from metrics.proto for io.prometheus.client
-require "beefcake"
-
-module Prometheus
- module Client
-
- module MetricType
- COUNTER = 0
- GAUGE = 1
- SUMMARY = 2
- UNTYPED = 3
- HISTOGRAM = 4
- end
-
- class LabelPair
- include Beefcake::Message
- end
-
- class Gauge
- include Beefcake::Message
- end
-
- class Counter
- include Beefcake::Message
- end
-
- class Quantile
- include Beefcake::Message
- end
-
- class Summary
- include Beefcake::Message
- end
-
- class Untyped
- include Beefcake::Message
- end
-
- class Histogram
- include Beefcake::Message
- end
-
- class Bucket
- include Beefcake::Message
- end
-
- class Metric
- include Beefcake::Message
- end
-
- class MetricFamily
- include Beefcake::Message
- end
-
- class LabelPair
- optional :name, :string, 1
- optional :value, :string, 2
- end
-
- class Gauge
- optional :value, :double, 1
- end
-
- class Counter
- optional :value, :double, 1
- end
-
- class Quantile
- optional :quantile, :double, 1
- optional :value, :double, 2
- end
-
- class Summary
- optional :sample_count, :uint64, 1
- optional :sample_sum, :double, 2
- repeated :quantile, Quantile, 3
- end
-
- class Untyped
- optional :value, :double, 1
- end
-
- class Histogram
- optional :sample_count, :uint64, 1
- optional :sample_sum, :double, 2
- repeated :bucket, Bucket, 3
- end
-
- class Bucket
- optional :cumulative_count, :uint64, 1
- optional :upper_bound, :double, 2
- end
-
- class Metric
- repeated :label, LabelPair, 1
- optional :gauge, Gauge, 2
- optional :counter, Counter, 3
- optional :summary, Summary, 4
- optional :untyped, Untyped, 5
- optional :histogram, Histogram, 7
- optional :timestamp_ms, :int64, 6
- end
-
- class MetricFamily
- optional :name, :string, 1
- optional :help, :string, 2
- optional :type, MetricType, 3
- repeated :metric, Metric, 4
- end
- end
-end
diff --git a/vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/version.rb b/vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/version.rb
deleted file mode 100644
index 00b5e863e..000000000
--- a/vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/version.rb
+++ /dev/null
@@ -1,7 +0,0 @@
-module Prometheus
- module Client
- module Model
- VERSION = '0.1.0'
- end
- end
-end
diff --git a/vendor/github.com/prometheus/client_model/src/main/java/io/prometheus/client/Metrics.java b/vendor/github.com/prometheus/client_model/src/main/java/io/prometheus/client/Metrics.java
deleted file mode 100644
index fb6218e1e..000000000
--- a/vendor/github.com/prometheus/client_model/src/main/java/io/prometheus/client/Metrics.java
+++ /dev/null
@@ -1,7683 +0,0 @@
-// Generated by the protocol buffer compiler. DO NOT EDIT!
-// source: metrics.proto
-
-package io.prometheus.client;
-
-public final class Metrics {
- private Metrics() {}
- public static void registerAllExtensions(
- com.google.protobuf.ExtensionRegistry registry) {
- }
- /**
- * Protobuf enum {@code io.prometheus.client.MetricType}
- */
- public enum MetricType
- implements com.google.protobuf.ProtocolMessageEnum {
- /**
- * <code>COUNTER = 0;</code>
- */
- COUNTER(0, 0),
- /**
- * <code>GAUGE = 1;</code>
- */
- GAUGE(1, 1),
- /**
- * <code>SUMMARY = 2;</code>
- */
- SUMMARY(2, 2),
- /**
- * <code>UNTYPED = 3;</code>
- */
- UNTYPED(3, 3),
- /**
- * <code>HISTOGRAM = 4;</code>
- */
- HISTOGRAM(4, 4),
- ;
-
- /**
- * <code>COUNTER = 0;</code>
- */
- public static final int COUNTER_VALUE = 0;
- /**
- * <code>GAUGE = 1;</code>
- */
- public static final int GAUGE_VALUE = 1;
- /**
- * <code>SUMMARY = 2;</code>
- */
- public static final int SUMMARY_VALUE = 2;
- /**
- * <code>UNTYPED = 3;</code>
- */
- public static final int UNTYPED_VALUE = 3;
- /**
- * <code>HISTOGRAM = 4;</code>
- */
- public static final int HISTOGRAM_VALUE = 4;
-
-
- public final int getNumber() { return value; }
-
- public static MetricType valueOf(int value) {
- switch (value) {
- case 0: return COUNTER;
- case 1: return GAUGE;
- case 2: return SUMMARY;
- case 3: return UNTYPED;
- case 4: return HISTOGRAM;
- default: return null;
- }
- }
-
- public static com.google.protobuf.Internal.EnumLiteMap<MetricType>
- internalGetValueMap() {
- return internalValueMap;
- }
- private static com.google.protobuf.Internal.EnumLiteMap<MetricType>
- internalValueMap =
- new com.google.protobuf.Internal.EnumLiteMap<MetricType>() {
- public MetricType findValueByNumber(int number) {
- return MetricType.valueOf(number);
- }
- };
-
- public final com.google.protobuf.Descriptors.EnumValueDescriptor
- getValueDescriptor() {
- return getDescriptor().getValues().get(index);
- }
- public final com.google.protobuf.Descriptors.EnumDescriptor
- getDescriptorForType() {
- return getDescriptor();
- }
- public static final com.google.protobuf.Descriptors.EnumDescriptor
- getDescriptor() {
- return io.prometheus.client.Metrics.getDescriptor().getEnumTypes().get(0);
- }
-
- private static final MetricType[] VALUES = values();
-
- public static MetricType valueOf(
- com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
- if (desc.getType() != getDescriptor()) {
- throw new java.lang.IllegalArgumentException(
- "EnumValueDescriptor is not for this type.");
- }
- return VALUES[desc.getIndex()];
- }
-
- private final int index;
- private final int value;
-
- private MetricType(int index, int value) {
- this.index = index;
- this.value = value;
- }
-
- // @@protoc_insertion_point(enum_scope:io.prometheus.client.MetricType)
- }
-
- public interface LabelPairOrBuilder extends
- // @@protoc_insertion_point(interface_extends:io.prometheus.client.LabelPair)
- com.google.protobuf.MessageOrBuilder {
-
- /**
- * <code>optional string name = 1;</code>
- */
- boolean hasName();
- /**
- * <code>optional string name = 1;</code>
- */
- java.lang.String getName();
- /**
- * <code>optional string name = 1;</code>
- */
- com.google.protobuf.ByteString
- getNameBytes();
-
- /**
- * <code>optional string value = 2;</code>
- */
- boolean hasValue();
- /**
- * <code>optional string value = 2;</code>
- */
- java.lang.String getValue();
- /**
- * <code>optional string value = 2;</code>
- */
- com.google.protobuf.ByteString
- getValueBytes();
- }
- /**
- * Protobuf type {@code io.prometheus.client.LabelPair}
- */
- public static final class LabelPair extends
- com.google.protobuf.GeneratedMessage implements
- // @@protoc_insertion_point(message_implements:io.prometheus.client.LabelPair)
- LabelPairOrBuilder {
- // Use LabelPair.newBuilder() to construct.
- private LabelPair(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private LabelPair(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final LabelPair defaultInstance;
- public static LabelPair getDefaultInstance() {
- return defaultInstance;
- }
-
- public LabelPair getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private LabelPair(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 10: {
- com.google.protobuf.ByteString bs = input.readBytes();
- bitField0_ |= 0x00000001;
- name_ = bs;
- break;
- }
- case 18: {
- com.google.protobuf.ByteString bs = input.readBytes();
- bitField0_ |= 0x00000002;
- value_ = bs;
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_LabelPair_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_LabelPair_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- io.prometheus.client.Metrics.LabelPair.class, io.prometheus.client.Metrics.LabelPair.Builder.class);
- }
-
- public static com.google.protobuf.Parser<LabelPair> PARSER =
- new com.google.protobuf.AbstractParser<LabelPair>() {
- public LabelPair parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new LabelPair(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<LabelPair> getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- public static final int NAME_FIELD_NUMBER = 1;
- private java.lang.Object name_;
- /**
- * <code>optional string name = 1;</code>
- */
- public boolean hasName() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public java.lang.String getName() {
- java.lang.Object ref = name_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- name_ = s;
- }
- return s;
- }
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public com.google.protobuf.ByteString
- getNameBytes() {
- java.lang.Object ref = name_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- name_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- public static final int VALUE_FIELD_NUMBER = 2;
- private java.lang.Object value_;
- /**
- * <code>optional string value = 2;</code>
- */
- public boolean hasValue() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional string value = 2;</code>
- */
- public java.lang.String getValue() {
- java.lang.Object ref = value_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- value_ = s;
- }
- return s;
- }
- }
- /**
- * <code>optional string value = 2;</code>
- */
- public com.google.protobuf.ByteString
- getValueBytes() {
- java.lang.Object ref = value_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- value_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- private void initFields() {
- name_ = "";
- value_ = "";
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized == 1) return true;
- if (isInitialized == 0) return false;
-
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBytes(1, getNameBytes());
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeBytes(2, getValueBytes());
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(1, getNameBytes());
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(2, getValueBytes());
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static io.prometheus.client.Metrics.LabelPair parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static io.prometheus.client.Metrics.LabelPair parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.LabelPair parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static io.prometheus.client.Metrics.LabelPair parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.LabelPair parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static io.prometheus.client.Metrics.LabelPair parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.LabelPair parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static io.prometheus.client.Metrics.LabelPair parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.LabelPair parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static io.prometheus.client.Metrics.LabelPair parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(io.prometheus.client.Metrics.LabelPair prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code io.prometheus.client.LabelPair}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder> implements
- // @@protoc_insertion_point(builder_implements:io.prometheus.client.LabelPair)
- io.prometheus.client.Metrics.LabelPairOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_LabelPair_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_LabelPair_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- io.prometheus.client.Metrics.LabelPair.class, io.prometheus.client.Metrics.LabelPair.Builder.class);
- }
-
- // Construct using io.prometheus.client.Metrics.LabelPair.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- name_ = "";
- bitField0_ = (bitField0_ & ~0x00000001);
- value_ = "";
- bitField0_ = (bitField0_ & ~0x00000002);
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_LabelPair_descriptor;
- }
-
- public io.prometheus.client.Metrics.LabelPair getDefaultInstanceForType() {
- return io.prometheus.client.Metrics.LabelPair.getDefaultInstance();
- }
-
- public io.prometheus.client.Metrics.LabelPair build() {
- io.prometheus.client.Metrics.LabelPair result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public io.prometheus.client.Metrics.LabelPair buildPartial() {
- io.prometheus.client.Metrics.LabelPair result = new io.prometheus.client.Metrics.LabelPair(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.name_ = name_;
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
- to_bitField0_ |= 0x00000002;
- }
- result.value_ = value_;
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof io.prometheus.client.Metrics.LabelPair) {
- return mergeFrom((io.prometheus.client.Metrics.LabelPair)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(io.prometheus.client.Metrics.LabelPair other) {
- if (other == io.prometheus.client.Metrics.LabelPair.getDefaultInstance()) return this;
- if (other.hasName()) {
- bitField0_ |= 0x00000001;
- name_ = other.name_;
- onChanged();
- }
- if (other.hasValue()) {
- bitField0_ |= 0x00000002;
- value_ = other.value_;
- onChanged();
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- io.prometheus.client.Metrics.LabelPair parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (io.prometheus.client.Metrics.LabelPair) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- private java.lang.Object name_ = "";
- /**
- * <code>optional string name = 1;</code>
- */
- public boolean hasName() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public java.lang.String getName() {
- java.lang.Object ref = name_;
- if (!(ref instanceof java.lang.String)) {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- name_ = s;
- }
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public com.google.protobuf.ByteString
- getNameBytes() {
- java.lang.Object ref = name_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- name_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public Builder setName(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- name_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public Builder clearName() {
- bitField0_ = (bitField0_ & ~0x00000001);
- name_ = getDefaultInstance().getName();
- onChanged();
- return this;
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public Builder setNameBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- name_ = value;
- onChanged();
- return this;
- }
-
- private java.lang.Object value_ = "";
- /**
- * <code>optional string value = 2;</code>
- */
- public boolean hasValue() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional string value = 2;</code>
- */
- public java.lang.String getValue() {
- java.lang.Object ref = value_;
- if (!(ref instanceof java.lang.String)) {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- value_ = s;
- }
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- * <code>optional string value = 2;</code>
- */
- public com.google.protobuf.ByteString
- getValueBytes() {
- java.lang.Object ref = value_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- value_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- * <code>optional string value = 2;</code>
- */
- public Builder setValue(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000002;
- value_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional string value = 2;</code>
- */
- public Builder clearValue() {
- bitField0_ = (bitField0_ & ~0x00000002);
- value_ = getDefaultInstance().getValue();
- onChanged();
- return this;
- }
- /**
- * <code>optional string value = 2;</code>
- */
- public Builder setValueBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000002;
- value_ = value;
- onChanged();
- return this;
- }
-
- // @@protoc_insertion_point(builder_scope:io.prometheus.client.LabelPair)
- }
-
- static {
- defaultInstance = new LabelPair(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:io.prometheus.client.LabelPair)
- }
-
- public interface GaugeOrBuilder extends
- // @@protoc_insertion_point(interface_extends:io.prometheus.client.Gauge)
- com.google.protobuf.MessageOrBuilder {
-
- /**
- * <code>optional double value = 1;</code>
- */
- boolean hasValue();
- /**
- * <code>optional double value = 1;</code>
- */
- double getValue();
- }
- /**
- * Protobuf type {@code io.prometheus.client.Gauge}
- */
- public static final class Gauge extends
- com.google.protobuf.GeneratedMessage implements
- // @@protoc_insertion_point(message_implements:io.prometheus.client.Gauge)
- GaugeOrBuilder {
- // Use Gauge.newBuilder() to construct.
- private Gauge(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private Gauge(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final Gauge defaultInstance;
- public static Gauge getDefaultInstance() {
- return defaultInstance;
- }
-
- public Gauge getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private Gauge(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 9: {
- bitField0_ |= 0x00000001;
- value_ = input.readDouble();
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Gauge_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Gauge_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- io.prometheus.client.Metrics.Gauge.class, io.prometheus.client.Metrics.Gauge.Builder.class);
- }
-
- public static com.google.protobuf.Parser<Gauge> PARSER =
- new com.google.protobuf.AbstractParser<Gauge>() {
- public Gauge parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new Gauge(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<Gauge> getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- public static final int VALUE_FIELD_NUMBER = 1;
- private double value_;
- /**
- * <code>optional double value = 1;</code>
- */
- public boolean hasValue() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional double value = 1;</code>
- */
- public double getValue() {
- return value_;
- }
-
- private void initFields() {
- value_ = 0D;
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized == 1) return true;
- if (isInitialized == 0) return false;
-
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeDouble(1, value_);
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeDoubleSize(1, value_);
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static io.prometheus.client.Metrics.Gauge parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static io.prometheus.client.Metrics.Gauge parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Gauge parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static io.prometheus.client.Metrics.Gauge parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Gauge parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static io.prometheus.client.Metrics.Gauge parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Gauge parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static io.prometheus.client.Metrics.Gauge parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Gauge parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static io.prometheus.client.Metrics.Gauge parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(io.prometheus.client.Metrics.Gauge prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code io.prometheus.client.Gauge}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder> implements
- // @@protoc_insertion_point(builder_implements:io.prometheus.client.Gauge)
- io.prometheus.client.Metrics.GaugeOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Gauge_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Gauge_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- io.prometheus.client.Metrics.Gauge.class, io.prometheus.client.Metrics.Gauge.Builder.class);
- }
-
- // Construct using io.prometheus.client.Metrics.Gauge.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- value_ = 0D;
- bitField0_ = (bitField0_ & ~0x00000001);
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Gauge_descriptor;
- }
-
- public io.prometheus.client.Metrics.Gauge getDefaultInstanceForType() {
- return io.prometheus.client.Metrics.Gauge.getDefaultInstance();
- }
-
- public io.prometheus.client.Metrics.Gauge build() {
- io.prometheus.client.Metrics.Gauge result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public io.prometheus.client.Metrics.Gauge buildPartial() {
- io.prometheus.client.Metrics.Gauge result = new io.prometheus.client.Metrics.Gauge(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.value_ = value_;
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof io.prometheus.client.Metrics.Gauge) {
- return mergeFrom((io.prometheus.client.Metrics.Gauge)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(io.prometheus.client.Metrics.Gauge other) {
- if (other == io.prometheus.client.Metrics.Gauge.getDefaultInstance()) return this;
- if (other.hasValue()) {
- setValue(other.getValue());
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- io.prometheus.client.Metrics.Gauge parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (io.prometheus.client.Metrics.Gauge) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- private double value_ ;
- /**
- * <code>optional double value = 1;</code>
- */
- public boolean hasValue() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional double value = 1;</code>
- */
- public double getValue() {
- return value_;
- }
- /**
- * <code>optional double value = 1;</code>
- */
- public Builder setValue(double value) {
- bitField0_ |= 0x00000001;
- value_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional double value = 1;</code>
- */
- public Builder clearValue() {
- bitField0_ = (bitField0_ & ~0x00000001);
- value_ = 0D;
- onChanged();
- return this;
- }
-
- // @@protoc_insertion_point(builder_scope:io.prometheus.client.Gauge)
- }
-
- static {
- defaultInstance = new Gauge(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:io.prometheus.client.Gauge)
- }
-
- public interface CounterOrBuilder extends
- // @@protoc_insertion_point(interface_extends:io.prometheus.client.Counter)
- com.google.protobuf.MessageOrBuilder {
-
- /**
- * <code>optional double value = 1;</code>
- */
- boolean hasValue();
- /**
- * <code>optional double value = 1;</code>
- */
- double getValue();
- }
- /**
- * Protobuf type {@code io.prometheus.client.Counter}
- */
- public static final class Counter extends
- com.google.protobuf.GeneratedMessage implements
- // @@protoc_insertion_point(message_implements:io.prometheus.client.Counter)
- CounterOrBuilder {
- // Use Counter.newBuilder() to construct.
- private Counter(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private Counter(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final Counter defaultInstance;
- public static Counter getDefaultInstance() {
- return defaultInstance;
- }
-
- public Counter getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private Counter(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 9: {
- bitField0_ |= 0x00000001;
- value_ = input.readDouble();
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Counter_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Counter_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- io.prometheus.client.Metrics.Counter.class, io.prometheus.client.Metrics.Counter.Builder.class);
- }
-
- public static com.google.protobuf.Parser<Counter> PARSER =
- new com.google.protobuf.AbstractParser<Counter>() {
- public Counter parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new Counter(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<Counter> getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- public static final int VALUE_FIELD_NUMBER = 1;
- private double value_;
- /**
- * <code>optional double value = 1;</code>
- */
- public boolean hasValue() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional double value = 1;</code>
- */
- public double getValue() {
- return value_;
- }
-
- private void initFields() {
- value_ = 0D;
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized == 1) return true;
- if (isInitialized == 0) return false;
-
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeDouble(1, value_);
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeDoubleSize(1, value_);
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static io.prometheus.client.Metrics.Counter parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static io.prometheus.client.Metrics.Counter parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Counter parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static io.prometheus.client.Metrics.Counter parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Counter parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static io.prometheus.client.Metrics.Counter parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Counter parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static io.prometheus.client.Metrics.Counter parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Counter parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static io.prometheus.client.Metrics.Counter parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(io.prometheus.client.Metrics.Counter prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code io.prometheus.client.Counter}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder> implements
- // @@protoc_insertion_point(builder_implements:io.prometheus.client.Counter)
- io.prometheus.client.Metrics.CounterOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Counter_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Counter_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- io.prometheus.client.Metrics.Counter.class, io.prometheus.client.Metrics.Counter.Builder.class);
- }
-
- // Construct using io.prometheus.client.Metrics.Counter.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- value_ = 0D;
- bitField0_ = (bitField0_ & ~0x00000001);
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Counter_descriptor;
- }
-
- public io.prometheus.client.Metrics.Counter getDefaultInstanceForType() {
- return io.prometheus.client.Metrics.Counter.getDefaultInstance();
- }
-
- public io.prometheus.client.Metrics.Counter build() {
- io.prometheus.client.Metrics.Counter result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public io.prometheus.client.Metrics.Counter buildPartial() {
- io.prometheus.client.Metrics.Counter result = new io.prometheus.client.Metrics.Counter(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.value_ = value_;
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof io.prometheus.client.Metrics.Counter) {
- return mergeFrom((io.prometheus.client.Metrics.Counter)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(io.prometheus.client.Metrics.Counter other) {
- if (other == io.prometheus.client.Metrics.Counter.getDefaultInstance()) return this;
- if (other.hasValue()) {
- setValue(other.getValue());
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- io.prometheus.client.Metrics.Counter parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (io.prometheus.client.Metrics.Counter) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- private double value_ ;
- /**
- * <code>optional double value = 1;</code>
- */
- public boolean hasValue() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional double value = 1;</code>
- */
- public double getValue() {
- return value_;
- }
- /**
- * <code>optional double value = 1;</code>
- */
- public Builder setValue(double value) {
- bitField0_ |= 0x00000001;
- value_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional double value = 1;</code>
- */
- public Builder clearValue() {
- bitField0_ = (bitField0_ & ~0x00000001);
- value_ = 0D;
- onChanged();
- return this;
- }
-
- // @@protoc_insertion_point(builder_scope:io.prometheus.client.Counter)
- }
-
- static {
- defaultInstance = new Counter(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:io.prometheus.client.Counter)
- }
-
- public interface QuantileOrBuilder extends
- // @@protoc_insertion_point(interface_extends:io.prometheus.client.Quantile)
- com.google.protobuf.MessageOrBuilder {
-
- /**
- * <code>optional double quantile = 1;</code>
- */
- boolean hasQuantile();
- /**
- * <code>optional double quantile = 1;</code>
- */
- double getQuantile();
-
- /**
- * <code>optional double value = 2;</code>
- */
- boolean hasValue();
- /**
- * <code>optional double value = 2;</code>
- */
- double getValue();
- }
- /**
- * Protobuf type {@code io.prometheus.client.Quantile}
- */
- public static final class Quantile extends
- com.google.protobuf.GeneratedMessage implements
- // @@protoc_insertion_point(message_implements:io.prometheus.client.Quantile)
- QuantileOrBuilder {
- // Use Quantile.newBuilder() to construct.
- private Quantile(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private Quantile(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final Quantile defaultInstance;
- public static Quantile getDefaultInstance() {
- return defaultInstance;
- }
-
- public Quantile getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private Quantile(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 9: {
- bitField0_ |= 0x00000001;
- quantile_ = input.readDouble();
- break;
- }
- case 17: {
- bitField0_ |= 0x00000002;
- value_ = input.readDouble();
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Quantile_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Quantile_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- io.prometheus.client.Metrics.Quantile.class, io.prometheus.client.Metrics.Quantile.Builder.class);
- }
-
- public static com.google.protobuf.Parser<Quantile> PARSER =
- new com.google.protobuf.AbstractParser<Quantile>() {
- public Quantile parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new Quantile(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<Quantile> getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- public static final int QUANTILE_FIELD_NUMBER = 1;
- private double quantile_;
- /**
- * <code>optional double quantile = 1;</code>
- */
- public boolean hasQuantile() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional double quantile = 1;</code>
- */
- public double getQuantile() {
- return quantile_;
- }
-
- public static final int VALUE_FIELD_NUMBER = 2;
- private double value_;
- /**
- * <code>optional double value = 2;</code>
- */
- public boolean hasValue() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional double value = 2;</code>
- */
- public double getValue() {
- return value_;
- }
-
- private void initFields() {
- quantile_ = 0D;
- value_ = 0D;
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized == 1) return true;
- if (isInitialized == 0) return false;
-
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeDouble(1, quantile_);
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeDouble(2, value_);
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeDoubleSize(1, quantile_);
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- size += com.google.protobuf.CodedOutputStream
- .computeDoubleSize(2, value_);
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static io.prometheus.client.Metrics.Quantile parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static io.prometheus.client.Metrics.Quantile parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Quantile parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static io.prometheus.client.Metrics.Quantile parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Quantile parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static io.prometheus.client.Metrics.Quantile parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Quantile parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static io.prometheus.client.Metrics.Quantile parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Quantile parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static io.prometheus.client.Metrics.Quantile parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(io.prometheus.client.Metrics.Quantile prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code io.prometheus.client.Quantile}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder> implements
- // @@protoc_insertion_point(builder_implements:io.prometheus.client.Quantile)
- io.prometheus.client.Metrics.QuantileOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Quantile_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Quantile_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- io.prometheus.client.Metrics.Quantile.class, io.prometheus.client.Metrics.Quantile.Builder.class);
- }
-
- // Construct using io.prometheus.client.Metrics.Quantile.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- quantile_ = 0D;
- bitField0_ = (bitField0_ & ~0x00000001);
- value_ = 0D;
- bitField0_ = (bitField0_ & ~0x00000002);
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Quantile_descriptor;
- }
-
- public io.prometheus.client.Metrics.Quantile getDefaultInstanceForType() {
- return io.prometheus.client.Metrics.Quantile.getDefaultInstance();
- }
-
- public io.prometheus.client.Metrics.Quantile build() {
- io.prometheus.client.Metrics.Quantile result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public io.prometheus.client.Metrics.Quantile buildPartial() {
- io.prometheus.client.Metrics.Quantile result = new io.prometheus.client.Metrics.Quantile(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.quantile_ = quantile_;
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
- to_bitField0_ |= 0x00000002;
- }
- result.value_ = value_;
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof io.prometheus.client.Metrics.Quantile) {
- return mergeFrom((io.prometheus.client.Metrics.Quantile)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(io.prometheus.client.Metrics.Quantile other) {
- if (other == io.prometheus.client.Metrics.Quantile.getDefaultInstance()) return this;
- if (other.hasQuantile()) {
- setQuantile(other.getQuantile());
- }
- if (other.hasValue()) {
- setValue(other.getValue());
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- io.prometheus.client.Metrics.Quantile parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (io.prometheus.client.Metrics.Quantile) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- private double quantile_ ;
- /**
- * <code>optional double quantile = 1;</code>
- */
- public boolean hasQuantile() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional double quantile = 1;</code>
- */
- public double getQuantile() {
- return quantile_;
- }
- /**
- * <code>optional double quantile = 1;</code>
- */
- public Builder setQuantile(double value) {
- bitField0_ |= 0x00000001;
- quantile_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional double quantile = 1;</code>
- */
- public Builder clearQuantile() {
- bitField0_ = (bitField0_ & ~0x00000001);
- quantile_ = 0D;
- onChanged();
- return this;
- }
-
- private double value_ ;
- /**
- * <code>optional double value = 2;</code>
- */
- public boolean hasValue() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional double value = 2;</code>
- */
- public double getValue() {
- return value_;
- }
- /**
- * <code>optional double value = 2;</code>
- */
- public Builder setValue(double value) {
- bitField0_ |= 0x00000002;
- value_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional double value = 2;</code>
- */
- public Builder clearValue() {
- bitField0_ = (bitField0_ & ~0x00000002);
- value_ = 0D;
- onChanged();
- return this;
- }
-
- // @@protoc_insertion_point(builder_scope:io.prometheus.client.Quantile)
- }
-
- static {
- defaultInstance = new Quantile(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:io.prometheus.client.Quantile)
- }
-
- public interface SummaryOrBuilder extends
- // @@protoc_insertion_point(interface_extends:io.prometheus.client.Summary)
- com.google.protobuf.MessageOrBuilder {
-
- /**
- * <code>optional uint64 sample_count = 1;</code>
- */
- boolean hasSampleCount();
- /**
- * <code>optional uint64 sample_count = 1;</code>
- */
- long getSampleCount();
-
- /**
- * <code>optional double sample_sum = 2;</code>
- */
- boolean hasSampleSum();
- /**
- * <code>optional double sample_sum = 2;</code>
- */
- double getSampleSum();
-
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- java.util.List<io.prometheus.client.Metrics.Quantile>
- getQuantileList();
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- io.prometheus.client.Metrics.Quantile getQuantile(int index);
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- int getQuantileCount();
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- java.util.List<? extends io.prometheus.client.Metrics.QuantileOrBuilder>
- getQuantileOrBuilderList();
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- io.prometheus.client.Metrics.QuantileOrBuilder getQuantileOrBuilder(
- int index);
- }
- /**
- * Protobuf type {@code io.prometheus.client.Summary}
- */
- public static final class Summary extends
- com.google.protobuf.GeneratedMessage implements
- // @@protoc_insertion_point(message_implements:io.prometheus.client.Summary)
- SummaryOrBuilder {
- // Use Summary.newBuilder() to construct.
- private Summary(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private Summary(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final Summary defaultInstance;
- public static Summary getDefaultInstance() {
- return defaultInstance;
- }
-
- public Summary getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private Summary(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 8: {
- bitField0_ |= 0x00000001;
- sampleCount_ = input.readUInt64();
- break;
- }
- case 17: {
- bitField0_ |= 0x00000002;
- sampleSum_ = input.readDouble();
- break;
- }
- case 26: {
- if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
- quantile_ = new java.util.ArrayList<io.prometheus.client.Metrics.Quantile>();
- mutable_bitField0_ |= 0x00000004;
- }
- quantile_.add(input.readMessage(io.prometheus.client.Metrics.Quantile.PARSER, extensionRegistry));
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
- quantile_ = java.util.Collections.unmodifiableList(quantile_);
- }
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Summary_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Summary_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- io.prometheus.client.Metrics.Summary.class, io.prometheus.client.Metrics.Summary.Builder.class);
- }
-
- public static com.google.protobuf.Parser<Summary> PARSER =
- new com.google.protobuf.AbstractParser<Summary>() {
- public Summary parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new Summary(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<Summary> getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- public static final int SAMPLE_COUNT_FIELD_NUMBER = 1;
- private long sampleCount_;
- /**
- * <code>optional uint64 sample_count = 1;</code>
- */
- public boolean hasSampleCount() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional uint64 sample_count = 1;</code>
- */
- public long getSampleCount() {
- return sampleCount_;
- }
-
- public static final int SAMPLE_SUM_FIELD_NUMBER = 2;
- private double sampleSum_;
- /**
- * <code>optional double sample_sum = 2;</code>
- */
- public boolean hasSampleSum() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional double sample_sum = 2;</code>
- */
- public double getSampleSum() {
- return sampleSum_;
- }
-
- public static final int QUANTILE_FIELD_NUMBER = 3;
- private java.util.List<io.prometheus.client.Metrics.Quantile> quantile_;
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public java.util.List<io.prometheus.client.Metrics.Quantile> getQuantileList() {
- return quantile_;
- }
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public java.util.List<? extends io.prometheus.client.Metrics.QuantileOrBuilder>
- getQuantileOrBuilderList() {
- return quantile_;
- }
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public int getQuantileCount() {
- return quantile_.size();
- }
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public io.prometheus.client.Metrics.Quantile getQuantile(int index) {
- return quantile_.get(index);
- }
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public io.prometheus.client.Metrics.QuantileOrBuilder getQuantileOrBuilder(
- int index) {
- return quantile_.get(index);
- }
-
- private void initFields() {
- sampleCount_ = 0L;
- sampleSum_ = 0D;
- quantile_ = java.util.Collections.emptyList();
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized == 1) return true;
- if (isInitialized == 0) return false;
-
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeUInt64(1, sampleCount_);
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeDouble(2, sampleSum_);
- }
- for (int i = 0; i < quantile_.size(); i++) {
- output.writeMessage(3, quantile_.get(i));
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeUInt64Size(1, sampleCount_);
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- size += com.google.protobuf.CodedOutputStream
- .computeDoubleSize(2, sampleSum_);
- }
- for (int i = 0; i < quantile_.size(); i++) {
- size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(3, quantile_.get(i));
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static io.prometheus.client.Metrics.Summary parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static io.prometheus.client.Metrics.Summary parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Summary parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static io.prometheus.client.Metrics.Summary parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Summary parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static io.prometheus.client.Metrics.Summary parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Summary parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static io.prometheus.client.Metrics.Summary parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Summary parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static io.prometheus.client.Metrics.Summary parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(io.prometheus.client.Metrics.Summary prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code io.prometheus.client.Summary}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder> implements
- // @@protoc_insertion_point(builder_implements:io.prometheus.client.Summary)
- io.prometheus.client.Metrics.SummaryOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Summary_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Summary_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- io.prometheus.client.Metrics.Summary.class, io.prometheus.client.Metrics.Summary.Builder.class);
- }
-
- // Construct using io.prometheus.client.Metrics.Summary.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- getQuantileFieldBuilder();
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- sampleCount_ = 0L;
- bitField0_ = (bitField0_ & ~0x00000001);
- sampleSum_ = 0D;
- bitField0_ = (bitField0_ & ~0x00000002);
- if (quantileBuilder_ == null) {
- quantile_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000004);
- } else {
- quantileBuilder_.clear();
- }
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Summary_descriptor;
- }
-
- public io.prometheus.client.Metrics.Summary getDefaultInstanceForType() {
- return io.prometheus.client.Metrics.Summary.getDefaultInstance();
- }
-
- public io.prometheus.client.Metrics.Summary build() {
- io.prometheus.client.Metrics.Summary result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public io.prometheus.client.Metrics.Summary buildPartial() {
- io.prometheus.client.Metrics.Summary result = new io.prometheus.client.Metrics.Summary(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.sampleCount_ = sampleCount_;
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
- to_bitField0_ |= 0x00000002;
- }
- result.sampleSum_ = sampleSum_;
- if (quantileBuilder_ == null) {
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- quantile_ = java.util.Collections.unmodifiableList(quantile_);
- bitField0_ = (bitField0_ & ~0x00000004);
- }
- result.quantile_ = quantile_;
- } else {
- result.quantile_ = quantileBuilder_.build();
- }
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof io.prometheus.client.Metrics.Summary) {
- return mergeFrom((io.prometheus.client.Metrics.Summary)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(io.prometheus.client.Metrics.Summary other) {
- if (other == io.prometheus.client.Metrics.Summary.getDefaultInstance()) return this;
- if (other.hasSampleCount()) {
- setSampleCount(other.getSampleCount());
- }
- if (other.hasSampleSum()) {
- setSampleSum(other.getSampleSum());
- }
- if (quantileBuilder_ == null) {
- if (!other.quantile_.isEmpty()) {
- if (quantile_.isEmpty()) {
- quantile_ = other.quantile_;
- bitField0_ = (bitField0_ & ~0x00000004);
- } else {
- ensureQuantileIsMutable();
- quantile_.addAll(other.quantile_);
- }
- onChanged();
- }
- } else {
- if (!other.quantile_.isEmpty()) {
- if (quantileBuilder_.isEmpty()) {
- quantileBuilder_.dispose();
- quantileBuilder_ = null;
- quantile_ = other.quantile_;
- bitField0_ = (bitField0_ & ~0x00000004);
- quantileBuilder_ =
- com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
- getQuantileFieldBuilder() : null;
- } else {
- quantileBuilder_.addAllMessages(other.quantile_);
- }
- }
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- io.prometheus.client.Metrics.Summary parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (io.prometheus.client.Metrics.Summary) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- private long sampleCount_ ;
- /**
- * <code>optional uint64 sample_count = 1;</code>
- */
- public boolean hasSampleCount() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional uint64 sample_count = 1;</code>
- */
- public long getSampleCount() {
- return sampleCount_;
- }
- /**
- * <code>optional uint64 sample_count = 1;</code>
- */
- public Builder setSampleCount(long value) {
- bitField0_ |= 0x00000001;
- sampleCount_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional uint64 sample_count = 1;</code>
- */
- public Builder clearSampleCount() {
- bitField0_ = (bitField0_ & ~0x00000001);
- sampleCount_ = 0L;
- onChanged();
- return this;
- }
-
- private double sampleSum_ ;
- /**
- * <code>optional double sample_sum = 2;</code>
- */
- public boolean hasSampleSum() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional double sample_sum = 2;</code>
- */
- public double getSampleSum() {
- return sampleSum_;
- }
- /**
- * <code>optional double sample_sum = 2;</code>
- */
- public Builder setSampleSum(double value) {
- bitField0_ |= 0x00000002;
- sampleSum_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional double sample_sum = 2;</code>
- */
- public Builder clearSampleSum() {
- bitField0_ = (bitField0_ & ~0x00000002);
- sampleSum_ = 0D;
- onChanged();
- return this;
- }
-
- private java.util.List<io.prometheus.client.Metrics.Quantile> quantile_ =
- java.util.Collections.emptyList();
- private void ensureQuantileIsMutable() {
- if (!((bitField0_ & 0x00000004) == 0x00000004)) {
- quantile_ = new java.util.ArrayList<io.prometheus.client.Metrics.Quantile>(quantile_);
- bitField0_ |= 0x00000004;
- }
- }
-
- private com.google.protobuf.RepeatedFieldBuilder<
- io.prometheus.client.Metrics.Quantile, io.prometheus.client.Metrics.Quantile.Builder, io.prometheus.client.Metrics.QuantileOrBuilder> quantileBuilder_;
-
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public java.util.List<io.prometheus.client.Metrics.Quantile> getQuantileList() {
- if (quantileBuilder_ == null) {
- return java.util.Collections.unmodifiableList(quantile_);
- } else {
- return quantileBuilder_.getMessageList();
- }
- }
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public int getQuantileCount() {
- if (quantileBuilder_ == null) {
- return quantile_.size();
- } else {
- return quantileBuilder_.getCount();
- }
- }
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public io.prometheus.client.Metrics.Quantile getQuantile(int index) {
- if (quantileBuilder_ == null) {
- return quantile_.get(index);
- } else {
- return quantileBuilder_.getMessage(index);
- }
- }
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public Builder setQuantile(
- int index, io.prometheus.client.Metrics.Quantile value) {
- if (quantileBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureQuantileIsMutable();
- quantile_.set(index, value);
- onChanged();
- } else {
- quantileBuilder_.setMessage(index, value);
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public Builder setQuantile(
- int index, io.prometheus.client.Metrics.Quantile.Builder builderForValue) {
- if (quantileBuilder_ == null) {
- ensureQuantileIsMutable();
- quantile_.set(index, builderForValue.build());
- onChanged();
- } else {
- quantileBuilder_.setMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public Builder addQuantile(io.prometheus.client.Metrics.Quantile value) {
- if (quantileBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureQuantileIsMutable();
- quantile_.add(value);
- onChanged();
- } else {
- quantileBuilder_.addMessage(value);
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public Builder addQuantile(
- int index, io.prometheus.client.Metrics.Quantile value) {
- if (quantileBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureQuantileIsMutable();
- quantile_.add(index, value);
- onChanged();
- } else {
- quantileBuilder_.addMessage(index, value);
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public Builder addQuantile(
- io.prometheus.client.Metrics.Quantile.Builder builderForValue) {
- if (quantileBuilder_ == null) {
- ensureQuantileIsMutable();
- quantile_.add(builderForValue.build());
- onChanged();
- } else {
- quantileBuilder_.addMessage(builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public Builder addQuantile(
- int index, io.prometheus.client.Metrics.Quantile.Builder builderForValue) {
- if (quantileBuilder_ == null) {
- ensureQuantileIsMutable();
- quantile_.add(index, builderForValue.build());
- onChanged();
- } else {
- quantileBuilder_.addMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public Builder addAllQuantile(
- java.lang.Iterable<? extends io.prometheus.client.Metrics.Quantile> values) {
- if (quantileBuilder_ == null) {
- ensureQuantileIsMutable();
- com.google.protobuf.AbstractMessageLite.Builder.addAll(
- values, quantile_);
- onChanged();
- } else {
- quantileBuilder_.addAllMessages(values);
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public Builder clearQuantile() {
- if (quantileBuilder_ == null) {
- quantile_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000004);
- onChanged();
- } else {
- quantileBuilder_.clear();
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public Builder removeQuantile(int index) {
- if (quantileBuilder_ == null) {
- ensureQuantileIsMutable();
- quantile_.remove(index);
- onChanged();
- } else {
- quantileBuilder_.remove(index);
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public io.prometheus.client.Metrics.Quantile.Builder getQuantileBuilder(
- int index) {
- return getQuantileFieldBuilder().getBuilder(index);
- }
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public io.prometheus.client.Metrics.QuantileOrBuilder getQuantileOrBuilder(
- int index) {
- if (quantileBuilder_ == null) {
- return quantile_.get(index); } else {
- return quantileBuilder_.getMessageOrBuilder(index);
- }
- }
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public java.util.List<? extends io.prometheus.client.Metrics.QuantileOrBuilder>
- getQuantileOrBuilderList() {
- if (quantileBuilder_ != null) {
- return quantileBuilder_.getMessageOrBuilderList();
- } else {
- return java.util.Collections.unmodifiableList(quantile_);
- }
- }
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public io.prometheus.client.Metrics.Quantile.Builder addQuantileBuilder() {
- return getQuantileFieldBuilder().addBuilder(
- io.prometheus.client.Metrics.Quantile.getDefaultInstance());
- }
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public io.prometheus.client.Metrics.Quantile.Builder addQuantileBuilder(
- int index) {
- return getQuantileFieldBuilder().addBuilder(
- index, io.prometheus.client.Metrics.Quantile.getDefaultInstance());
- }
- /**
- * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
- */
- public java.util.List<io.prometheus.client.Metrics.Quantile.Builder>
- getQuantileBuilderList() {
- return getQuantileFieldBuilder().getBuilderList();
- }
- private com.google.protobuf.RepeatedFieldBuilder<
- io.prometheus.client.Metrics.Quantile, io.prometheus.client.Metrics.Quantile.Builder, io.prometheus.client.Metrics.QuantileOrBuilder>
- getQuantileFieldBuilder() {
- if (quantileBuilder_ == null) {
- quantileBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
- io.prometheus.client.Metrics.Quantile, io.prometheus.client.Metrics.Quantile.Builder, io.prometheus.client.Metrics.QuantileOrBuilder>(
- quantile_,
- ((bitField0_ & 0x00000004) == 0x00000004),
- getParentForChildren(),
- isClean());
- quantile_ = null;
- }
- return quantileBuilder_;
- }
-
- // @@protoc_insertion_point(builder_scope:io.prometheus.client.Summary)
- }
-
- static {
- defaultInstance = new Summary(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:io.prometheus.client.Summary)
- }
-
- public interface UntypedOrBuilder extends
- // @@protoc_insertion_point(interface_extends:io.prometheus.client.Untyped)
- com.google.protobuf.MessageOrBuilder {
-
- /**
- * <code>optional double value = 1;</code>
- */
- boolean hasValue();
- /**
- * <code>optional double value = 1;</code>
- */
- double getValue();
- }
- /**
- * Protobuf type {@code io.prometheus.client.Untyped}
- */
- public static final class Untyped extends
- com.google.protobuf.GeneratedMessage implements
- // @@protoc_insertion_point(message_implements:io.prometheus.client.Untyped)
- UntypedOrBuilder {
- // Use Untyped.newBuilder() to construct.
- private Untyped(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private Untyped(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final Untyped defaultInstance;
- public static Untyped getDefaultInstance() {
- return defaultInstance;
- }
-
- public Untyped getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private Untyped(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 9: {
- bitField0_ |= 0x00000001;
- value_ = input.readDouble();
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Untyped_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Untyped_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- io.prometheus.client.Metrics.Untyped.class, io.prometheus.client.Metrics.Untyped.Builder.class);
- }
-
- public static com.google.protobuf.Parser<Untyped> PARSER =
- new com.google.protobuf.AbstractParser<Untyped>() {
- public Untyped parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new Untyped(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<Untyped> getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- public static final int VALUE_FIELD_NUMBER = 1;
- private double value_;
- /**
- * <code>optional double value = 1;</code>
- */
- public boolean hasValue() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional double value = 1;</code>
- */
- public double getValue() {
- return value_;
- }
-
- private void initFields() {
- value_ = 0D;
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized == 1) return true;
- if (isInitialized == 0) return false;
-
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeDouble(1, value_);
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeDoubleSize(1, value_);
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static io.prometheus.client.Metrics.Untyped parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static io.prometheus.client.Metrics.Untyped parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Untyped parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static io.prometheus.client.Metrics.Untyped parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Untyped parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static io.prometheus.client.Metrics.Untyped parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Untyped parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static io.prometheus.client.Metrics.Untyped parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Untyped parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static io.prometheus.client.Metrics.Untyped parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(io.prometheus.client.Metrics.Untyped prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code io.prometheus.client.Untyped}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder> implements
- // @@protoc_insertion_point(builder_implements:io.prometheus.client.Untyped)
- io.prometheus.client.Metrics.UntypedOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Untyped_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Untyped_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- io.prometheus.client.Metrics.Untyped.class, io.prometheus.client.Metrics.Untyped.Builder.class);
- }
-
- // Construct using io.prometheus.client.Metrics.Untyped.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- value_ = 0D;
- bitField0_ = (bitField0_ & ~0x00000001);
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Untyped_descriptor;
- }
-
- public io.prometheus.client.Metrics.Untyped getDefaultInstanceForType() {
- return io.prometheus.client.Metrics.Untyped.getDefaultInstance();
- }
-
- public io.prometheus.client.Metrics.Untyped build() {
- io.prometheus.client.Metrics.Untyped result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public io.prometheus.client.Metrics.Untyped buildPartial() {
- io.prometheus.client.Metrics.Untyped result = new io.prometheus.client.Metrics.Untyped(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.value_ = value_;
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof io.prometheus.client.Metrics.Untyped) {
- return mergeFrom((io.prometheus.client.Metrics.Untyped)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(io.prometheus.client.Metrics.Untyped other) {
- if (other == io.prometheus.client.Metrics.Untyped.getDefaultInstance()) return this;
- if (other.hasValue()) {
- setValue(other.getValue());
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- io.prometheus.client.Metrics.Untyped parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (io.prometheus.client.Metrics.Untyped) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- private double value_ ;
- /**
- * <code>optional double value = 1;</code>
- */
- public boolean hasValue() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional double value = 1;</code>
- */
- public double getValue() {
- return value_;
- }
- /**
- * <code>optional double value = 1;</code>
- */
- public Builder setValue(double value) {
- bitField0_ |= 0x00000001;
- value_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional double value = 1;</code>
- */
- public Builder clearValue() {
- bitField0_ = (bitField0_ & ~0x00000001);
- value_ = 0D;
- onChanged();
- return this;
- }
-
- // @@protoc_insertion_point(builder_scope:io.prometheus.client.Untyped)
- }
-
- static {
- defaultInstance = new Untyped(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:io.prometheus.client.Untyped)
- }
-
- public interface HistogramOrBuilder extends
- // @@protoc_insertion_point(interface_extends:io.prometheus.client.Histogram)
- com.google.protobuf.MessageOrBuilder {
-
- /**
- * <code>optional uint64 sample_count = 1;</code>
- */
- boolean hasSampleCount();
- /**
- * <code>optional uint64 sample_count = 1;</code>
- */
- long getSampleCount();
-
- /**
- * <code>optional double sample_sum = 2;</code>
- */
- boolean hasSampleSum();
- /**
- * <code>optional double sample_sum = 2;</code>
- */
- double getSampleSum();
-
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- java.util.List<io.prometheus.client.Metrics.Bucket>
- getBucketList();
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- io.prometheus.client.Metrics.Bucket getBucket(int index);
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- int getBucketCount();
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- java.util.List<? extends io.prometheus.client.Metrics.BucketOrBuilder>
- getBucketOrBuilderList();
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- io.prometheus.client.Metrics.BucketOrBuilder getBucketOrBuilder(
- int index);
- }
- /**
- * Protobuf type {@code io.prometheus.client.Histogram}
- */
- public static final class Histogram extends
- com.google.protobuf.GeneratedMessage implements
- // @@protoc_insertion_point(message_implements:io.prometheus.client.Histogram)
- HistogramOrBuilder {
- // Use Histogram.newBuilder() to construct.
- private Histogram(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private Histogram(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final Histogram defaultInstance;
- public static Histogram getDefaultInstance() {
- return defaultInstance;
- }
-
- public Histogram getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private Histogram(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 8: {
- bitField0_ |= 0x00000001;
- sampleCount_ = input.readUInt64();
- break;
- }
- case 17: {
- bitField0_ |= 0x00000002;
- sampleSum_ = input.readDouble();
- break;
- }
- case 26: {
- if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
- bucket_ = new java.util.ArrayList<io.prometheus.client.Metrics.Bucket>();
- mutable_bitField0_ |= 0x00000004;
- }
- bucket_.add(input.readMessage(io.prometheus.client.Metrics.Bucket.PARSER, extensionRegistry));
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
- bucket_ = java.util.Collections.unmodifiableList(bucket_);
- }
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Histogram_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Histogram_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- io.prometheus.client.Metrics.Histogram.class, io.prometheus.client.Metrics.Histogram.Builder.class);
- }
-
- public static com.google.protobuf.Parser<Histogram> PARSER =
- new com.google.protobuf.AbstractParser<Histogram>() {
- public Histogram parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new Histogram(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<Histogram> getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- public static final int SAMPLE_COUNT_FIELD_NUMBER = 1;
- private long sampleCount_;
- /**
- * <code>optional uint64 sample_count = 1;</code>
- */
- public boolean hasSampleCount() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional uint64 sample_count = 1;</code>
- */
- public long getSampleCount() {
- return sampleCount_;
- }
-
- public static final int SAMPLE_SUM_FIELD_NUMBER = 2;
- private double sampleSum_;
- /**
- * <code>optional double sample_sum = 2;</code>
- */
- public boolean hasSampleSum() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional double sample_sum = 2;</code>
- */
- public double getSampleSum() {
- return sampleSum_;
- }
-
- public static final int BUCKET_FIELD_NUMBER = 3;
- private java.util.List<io.prometheus.client.Metrics.Bucket> bucket_;
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public java.util.List<io.prometheus.client.Metrics.Bucket> getBucketList() {
- return bucket_;
- }
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public java.util.List<? extends io.prometheus.client.Metrics.BucketOrBuilder>
- getBucketOrBuilderList() {
- return bucket_;
- }
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public int getBucketCount() {
- return bucket_.size();
- }
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public io.prometheus.client.Metrics.Bucket getBucket(int index) {
- return bucket_.get(index);
- }
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public io.prometheus.client.Metrics.BucketOrBuilder getBucketOrBuilder(
- int index) {
- return bucket_.get(index);
- }
-
- private void initFields() {
- sampleCount_ = 0L;
- sampleSum_ = 0D;
- bucket_ = java.util.Collections.emptyList();
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized == 1) return true;
- if (isInitialized == 0) return false;
-
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeUInt64(1, sampleCount_);
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeDouble(2, sampleSum_);
- }
- for (int i = 0; i < bucket_.size(); i++) {
- output.writeMessage(3, bucket_.get(i));
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeUInt64Size(1, sampleCount_);
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- size += com.google.protobuf.CodedOutputStream
- .computeDoubleSize(2, sampleSum_);
- }
- for (int i = 0; i < bucket_.size(); i++) {
- size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(3, bucket_.get(i));
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static io.prometheus.client.Metrics.Histogram parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static io.prometheus.client.Metrics.Histogram parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Histogram parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static io.prometheus.client.Metrics.Histogram parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Histogram parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static io.prometheus.client.Metrics.Histogram parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Histogram parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static io.prometheus.client.Metrics.Histogram parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Histogram parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static io.prometheus.client.Metrics.Histogram parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(io.prometheus.client.Metrics.Histogram prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code io.prometheus.client.Histogram}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder> implements
- // @@protoc_insertion_point(builder_implements:io.prometheus.client.Histogram)
- io.prometheus.client.Metrics.HistogramOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Histogram_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Histogram_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- io.prometheus.client.Metrics.Histogram.class, io.prometheus.client.Metrics.Histogram.Builder.class);
- }
-
- // Construct using io.prometheus.client.Metrics.Histogram.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- getBucketFieldBuilder();
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- sampleCount_ = 0L;
- bitField0_ = (bitField0_ & ~0x00000001);
- sampleSum_ = 0D;
- bitField0_ = (bitField0_ & ~0x00000002);
- if (bucketBuilder_ == null) {
- bucket_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000004);
- } else {
- bucketBuilder_.clear();
- }
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Histogram_descriptor;
- }
-
- public io.prometheus.client.Metrics.Histogram getDefaultInstanceForType() {
- return io.prometheus.client.Metrics.Histogram.getDefaultInstance();
- }
-
- public io.prometheus.client.Metrics.Histogram build() {
- io.prometheus.client.Metrics.Histogram result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public io.prometheus.client.Metrics.Histogram buildPartial() {
- io.prometheus.client.Metrics.Histogram result = new io.prometheus.client.Metrics.Histogram(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.sampleCount_ = sampleCount_;
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
- to_bitField0_ |= 0x00000002;
- }
- result.sampleSum_ = sampleSum_;
- if (bucketBuilder_ == null) {
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- bucket_ = java.util.Collections.unmodifiableList(bucket_);
- bitField0_ = (bitField0_ & ~0x00000004);
- }
- result.bucket_ = bucket_;
- } else {
- result.bucket_ = bucketBuilder_.build();
- }
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof io.prometheus.client.Metrics.Histogram) {
- return mergeFrom((io.prometheus.client.Metrics.Histogram)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(io.prometheus.client.Metrics.Histogram other) {
- if (other == io.prometheus.client.Metrics.Histogram.getDefaultInstance()) return this;
- if (other.hasSampleCount()) {
- setSampleCount(other.getSampleCount());
- }
- if (other.hasSampleSum()) {
- setSampleSum(other.getSampleSum());
- }
- if (bucketBuilder_ == null) {
- if (!other.bucket_.isEmpty()) {
- if (bucket_.isEmpty()) {
- bucket_ = other.bucket_;
- bitField0_ = (bitField0_ & ~0x00000004);
- } else {
- ensureBucketIsMutable();
- bucket_.addAll(other.bucket_);
- }
- onChanged();
- }
- } else {
- if (!other.bucket_.isEmpty()) {
- if (bucketBuilder_.isEmpty()) {
- bucketBuilder_.dispose();
- bucketBuilder_ = null;
- bucket_ = other.bucket_;
- bitField0_ = (bitField0_ & ~0x00000004);
- bucketBuilder_ =
- com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
- getBucketFieldBuilder() : null;
- } else {
- bucketBuilder_.addAllMessages(other.bucket_);
- }
- }
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- io.prometheus.client.Metrics.Histogram parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (io.prometheus.client.Metrics.Histogram) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- private long sampleCount_ ;
- /**
- * <code>optional uint64 sample_count = 1;</code>
- */
- public boolean hasSampleCount() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional uint64 sample_count = 1;</code>
- */
- public long getSampleCount() {
- return sampleCount_;
- }
- /**
- * <code>optional uint64 sample_count = 1;</code>
- */
- public Builder setSampleCount(long value) {
- bitField0_ |= 0x00000001;
- sampleCount_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional uint64 sample_count = 1;</code>
- */
- public Builder clearSampleCount() {
- bitField0_ = (bitField0_ & ~0x00000001);
- sampleCount_ = 0L;
- onChanged();
- return this;
- }
-
- private double sampleSum_ ;
- /**
- * <code>optional double sample_sum = 2;</code>
- */
- public boolean hasSampleSum() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional double sample_sum = 2;</code>
- */
- public double getSampleSum() {
- return sampleSum_;
- }
- /**
- * <code>optional double sample_sum = 2;</code>
- */
- public Builder setSampleSum(double value) {
- bitField0_ |= 0x00000002;
- sampleSum_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional double sample_sum = 2;</code>
- */
- public Builder clearSampleSum() {
- bitField0_ = (bitField0_ & ~0x00000002);
- sampleSum_ = 0D;
- onChanged();
- return this;
- }
-
- private java.util.List<io.prometheus.client.Metrics.Bucket> bucket_ =
- java.util.Collections.emptyList();
- private void ensureBucketIsMutable() {
- if (!((bitField0_ & 0x00000004) == 0x00000004)) {
- bucket_ = new java.util.ArrayList<io.prometheus.client.Metrics.Bucket>(bucket_);
- bitField0_ |= 0x00000004;
- }
- }
-
- private com.google.protobuf.RepeatedFieldBuilder<
- io.prometheus.client.Metrics.Bucket, io.prometheus.client.Metrics.Bucket.Builder, io.prometheus.client.Metrics.BucketOrBuilder> bucketBuilder_;
-
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public java.util.List<io.prometheus.client.Metrics.Bucket> getBucketList() {
- if (bucketBuilder_ == null) {
- return java.util.Collections.unmodifiableList(bucket_);
- } else {
- return bucketBuilder_.getMessageList();
- }
- }
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public int getBucketCount() {
- if (bucketBuilder_ == null) {
- return bucket_.size();
- } else {
- return bucketBuilder_.getCount();
- }
- }
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public io.prometheus.client.Metrics.Bucket getBucket(int index) {
- if (bucketBuilder_ == null) {
- return bucket_.get(index);
- } else {
- return bucketBuilder_.getMessage(index);
- }
- }
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public Builder setBucket(
- int index, io.prometheus.client.Metrics.Bucket value) {
- if (bucketBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureBucketIsMutable();
- bucket_.set(index, value);
- onChanged();
- } else {
- bucketBuilder_.setMessage(index, value);
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public Builder setBucket(
- int index, io.prometheus.client.Metrics.Bucket.Builder builderForValue) {
- if (bucketBuilder_ == null) {
- ensureBucketIsMutable();
- bucket_.set(index, builderForValue.build());
- onChanged();
- } else {
- bucketBuilder_.setMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public Builder addBucket(io.prometheus.client.Metrics.Bucket value) {
- if (bucketBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureBucketIsMutable();
- bucket_.add(value);
- onChanged();
- } else {
- bucketBuilder_.addMessage(value);
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public Builder addBucket(
- int index, io.prometheus.client.Metrics.Bucket value) {
- if (bucketBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureBucketIsMutable();
- bucket_.add(index, value);
- onChanged();
- } else {
- bucketBuilder_.addMessage(index, value);
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public Builder addBucket(
- io.prometheus.client.Metrics.Bucket.Builder builderForValue) {
- if (bucketBuilder_ == null) {
- ensureBucketIsMutable();
- bucket_.add(builderForValue.build());
- onChanged();
- } else {
- bucketBuilder_.addMessage(builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public Builder addBucket(
- int index, io.prometheus.client.Metrics.Bucket.Builder builderForValue) {
- if (bucketBuilder_ == null) {
- ensureBucketIsMutable();
- bucket_.add(index, builderForValue.build());
- onChanged();
- } else {
- bucketBuilder_.addMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public Builder addAllBucket(
- java.lang.Iterable<? extends io.prometheus.client.Metrics.Bucket> values) {
- if (bucketBuilder_ == null) {
- ensureBucketIsMutable();
- com.google.protobuf.AbstractMessageLite.Builder.addAll(
- values, bucket_);
- onChanged();
- } else {
- bucketBuilder_.addAllMessages(values);
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public Builder clearBucket() {
- if (bucketBuilder_ == null) {
- bucket_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000004);
- onChanged();
- } else {
- bucketBuilder_.clear();
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public Builder removeBucket(int index) {
- if (bucketBuilder_ == null) {
- ensureBucketIsMutable();
- bucket_.remove(index);
- onChanged();
- } else {
- bucketBuilder_.remove(index);
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public io.prometheus.client.Metrics.Bucket.Builder getBucketBuilder(
- int index) {
- return getBucketFieldBuilder().getBuilder(index);
- }
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public io.prometheus.client.Metrics.BucketOrBuilder getBucketOrBuilder(
- int index) {
- if (bucketBuilder_ == null) {
- return bucket_.get(index); } else {
- return bucketBuilder_.getMessageOrBuilder(index);
- }
- }
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public java.util.List<? extends io.prometheus.client.Metrics.BucketOrBuilder>
- getBucketOrBuilderList() {
- if (bucketBuilder_ != null) {
- return bucketBuilder_.getMessageOrBuilderList();
- } else {
- return java.util.Collections.unmodifiableList(bucket_);
- }
- }
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public io.prometheus.client.Metrics.Bucket.Builder addBucketBuilder() {
- return getBucketFieldBuilder().addBuilder(
- io.prometheus.client.Metrics.Bucket.getDefaultInstance());
- }
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public io.prometheus.client.Metrics.Bucket.Builder addBucketBuilder(
- int index) {
- return getBucketFieldBuilder().addBuilder(
- index, io.prometheus.client.Metrics.Bucket.getDefaultInstance());
- }
- /**
- * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
- *
- * <pre>
- * Ordered in increasing order of upper_bound, +Inf bucket is optional.
- * </pre>
- */
- public java.util.List<io.prometheus.client.Metrics.Bucket.Builder>
- getBucketBuilderList() {
- return getBucketFieldBuilder().getBuilderList();
- }
- private com.google.protobuf.RepeatedFieldBuilder<
- io.prometheus.client.Metrics.Bucket, io.prometheus.client.Metrics.Bucket.Builder, io.prometheus.client.Metrics.BucketOrBuilder>
- getBucketFieldBuilder() {
- if (bucketBuilder_ == null) {
- bucketBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
- io.prometheus.client.Metrics.Bucket, io.prometheus.client.Metrics.Bucket.Builder, io.prometheus.client.Metrics.BucketOrBuilder>(
- bucket_,
- ((bitField0_ & 0x00000004) == 0x00000004),
- getParentForChildren(),
- isClean());
- bucket_ = null;
- }
- return bucketBuilder_;
- }
-
- // @@protoc_insertion_point(builder_scope:io.prometheus.client.Histogram)
- }
-
- static {
- defaultInstance = new Histogram(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:io.prometheus.client.Histogram)
- }
-
- public interface BucketOrBuilder extends
- // @@protoc_insertion_point(interface_extends:io.prometheus.client.Bucket)
- com.google.protobuf.MessageOrBuilder {
-
- /**
- * <code>optional uint64 cumulative_count = 1;</code>
- *
- * <pre>
- * Cumulative in increasing order.
- * </pre>
- */
- boolean hasCumulativeCount();
- /**
- * <code>optional uint64 cumulative_count = 1;</code>
- *
- * <pre>
- * Cumulative in increasing order.
- * </pre>
- */
- long getCumulativeCount();
-
- /**
- * <code>optional double upper_bound = 2;</code>
- *
- * <pre>
- * Inclusive.
- * </pre>
- */
- boolean hasUpperBound();
- /**
- * <code>optional double upper_bound = 2;</code>
- *
- * <pre>
- * Inclusive.
- * </pre>
- */
- double getUpperBound();
- }
- /**
- * Protobuf type {@code io.prometheus.client.Bucket}
- */
- public static final class Bucket extends
- com.google.protobuf.GeneratedMessage implements
- // @@protoc_insertion_point(message_implements:io.prometheus.client.Bucket)
- BucketOrBuilder {
- // Use Bucket.newBuilder() to construct.
- private Bucket(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private Bucket(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final Bucket defaultInstance;
- public static Bucket getDefaultInstance() {
- return defaultInstance;
- }
-
- public Bucket getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private Bucket(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 8: {
- bitField0_ |= 0x00000001;
- cumulativeCount_ = input.readUInt64();
- break;
- }
- case 17: {
- bitField0_ |= 0x00000002;
- upperBound_ = input.readDouble();
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Bucket_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Bucket_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- io.prometheus.client.Metrics.Bucket.class, io.prometheus.client.Metrics.Bucket.Builder.class);
- }
-
- public static com.google.protobuf.Parser<Bucket> PARSER =
- new com.google.protobuf.AbstractParser<Bucket>() {
- public Bucket parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new Bucket(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<Bucket> getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- public static final int CUMULATIVE_COUNT_FIELD_NUMBER = 1;
- private long cumulativeCount_;
- /**
- * <code>optional uint64 cumulative_count = 1;</code>
- *
- * <pre>
- * Cumulative in increasing order.
- * </pre>
- */
- public boolean hasCumulativeCount() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional uint64 cumulative_count = 1;</code>
- *
- * <pre>
- * Cumulative in increasing order.
- * </pre>
- */
- public long getCumulativeCount() {
- return cumulativeCount_;
- }
-
- public static final int UPPER_BOUND_FIELD_NUMBER = 2;
- private double upperBound_;
- /**
- * <code>optional double upper_bound = 2;</code>
- *
- * <pre>
- * Inclusive.
- * </pre>
- */
- public boolean hasUpperBound() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional double upper_bound = 2;</code>
- *
- * <pre>
- * Inclusive.
- * </pre>
- */
- public double getUpperBound() {
- return upperBound_;
- }
-
- private void initFields() {
- cumulativeCount_ = 0L;
- upperBound_ = 0D;
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized == 1) return true;
- if (isInitialized == 0) return false;
-
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeUInt64(1, cumulativeCount_);
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeDouble(2, upperBound_);
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeUInt64Size(1, cumulativeCount_);
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- size += com.google.protobuf.CodedOutputStream
- .computeDoubleSize(2, upperBound_);
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static io.prometheus.client.Metrics.Bucket parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static io.prometheus.client.Metrics.Bucket parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Bucket parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static io.prometheus.client.Metrics.Bucket parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Bucket parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static io.prometheus.client.Metrics.Bucket parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Bucket parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static io.prometheus.client.Metrics.Bucket parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Bucket parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static io.prometheus.client.Metrics.Bucket parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(io.prometheus.client.Metrics.Bucket prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code io.prometheus.client.Bucket}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder> implements
- // @@protoc_insertion_point(builder_implements:io.prometheus.client.Bucket)
- io.prometheus.client.Metrics.BucketOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Bucket_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Bucket_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- io.prometheus.client.Metrics.Bucket.class, io.prometheus.client.Metrics.Bucket.Builder.class);
- }
-
- // Construct using io.prometheus.client.Metrics.Bucket.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- cumulativeCount_ = 0L;
- bitField0_ = (bitField0_ & ~0x00000001);
- upperBound_ = 0D;
- bitField0_ = (bitField0_ & ~0x00000002);
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Bucket_descriptor;
- }
-
- public io.prometheus.client.Metrics.Bucket getDefaultInstanceForType() {
- return io.prometheus.client.Metrics.Bucket.getDefaultInstance();
- }
-
- public io.prometheus.client.Metrics.Bucket build() {
- io.prometheus.client.Metrics.Bucket result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public io.prometheus.client.Metrics.Bucket buildPartial() {
- io.prometheus.client.Metrics.Bucket result = new io.prometheus.client.Metrics.Bucket(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.cumulativeCount_ = cumulativeCount_;
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
- to_bitField0_ |= 0x00000002;
- }
- result.upperBound_ = upperBound_;
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof io.prometheus.client.Metrics.Bucket) {
- return mergeFrom((io.prometheus.client.Metrics.Bucket)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(io.prometheus.client.Metrics.Bucket other) {
- if (other == io.prometheus.client.Metrics.Bucket.getDefaultInstance()) return this;
- if (other.hasCumulativeCount()) {
- setCumulativeCount(other.getCumulativeCount());
- }
- if (other.hasUpperBound()) {
- setUpperBound(other.getUpperBound());
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- io.prometheus.client.Metrics.Bucket parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (io.prometheus.client.Metrics.Bucket) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- private long cumulativeCount_ ;
- /**
- * <code>optional uint64 cumulative_count = 1;</code>
- *
- * <pre>
- * Cumulative in increasing order.
- * </pre>
- */
- public boolean hasCumulativeCount() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional uint64 cumulative_count = 1;</code>
- *
- * <pre>
- * Cumulative in increasing order.
- * </pre>
- */
- public long getCumulativeCount() {
- return cumulativeCount_;
- }
- /**
- * <code>optional uint64 cumulative_count = 1;</code>
- *
- * <pre>
- * Cumulative in increasing order.
- * </pre>
- */
- public Builder setCumulativeCount(long value) {
- bitField0_ |= 0x00000001;
- cumulativeCount_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional uint64 cumulative_count = 1;</code>
- *
- * <pre>
- * Cumulative in increasing order.
- * </pre>
- */
- public Builder clearCumulativeCount() {
- bitField0_ = (bitField0_ & ~0x00000001);
- cumulativeCount_ = 0L;
- onChanged();
- return this;
- }
-
- private double upperBound_ ;
- /**
- * <code>optional double upper_bound = 2;</code>
- *
- * <pre>
- * Inclusive.
- * </pre>
- */
- public boolean hasUpperBound() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional double upper_bound = 2;</code>
- *
- * <pre>
- * Inclusive.
- * </pre>
- */
- public double getUpperBound() {
- return upperBound_;
- }
- /**
- * <code>optional double upper_bound = 2;</code>
- *
- * <pre>
- * Inclusive.
- * </pre>
- */
- public Builder setUpperBound(double value) {
- bitField0_ |= 0x00000002;
- upperBound_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional double upper_bound = 2;</code>
- *
- * <pre>
- * Inclusive.
- * </pre>
- */
- public Builder clearUpperBound() {
- bitField0_ = (bitField0_ & ~0x00000002);
- upperBound_ = 0D;
- onChanged();
- return this;
- }
-
- // @@protoc_insertion_point(builder_scope:io.prometheus.client.Bucket)
- }
-
- static {
- defaultInstance = new Bucket(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:io.prometheus.client.Bucket)
- }
-
- public interface MetricOrBuilder extends
- // @@protoc_insertion_point(interface_extends:io.prometheus.client.Metric)
- com.google.protobuf.MessageOrBuilder {
-
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- java.util.List<io.prometheus.client.Metrics.LabelPair>
- getLabelList();
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- io.prometheus.client.Metrics.LabelPair getLabel(int index);
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- int getLabelCount();
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- java.util.List<? extends io.prometheus.client.Metrics.LabelPairOrBuilder>
- getLabelOrBuilderList();
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- io.prometheus.client.Metrics.LabelPairOrBuilder getLabelOrBuilder(
- int index);
-
- /**
- * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
- */
- boolean hasGauge();
- /**
- * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
- */
- io.prometheus.client.Metrics.Gauge getGauge();
- /**
- * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
- */
- io.prometheus.client.Metrics.GaugeOrBuilder getGaugeOrBuilder();
-
- /**
- * <code>optional .io.prometheus.client.Counter counter = 3;</code>
- */
- boolean hasCounter();
- /**
- * <code>optional .io.prometheus.client.Counter counter = 3;</code>
- */
- io.prometheus.client.Metrics.Counter getCounter();
- /**
- * <code>optional .io.prometheus.client.Counter counter = 3;</code>
- */
- io.prometheus.client.Metrics.CounterOrBuilder getCounterOrBuilder();
-
- /**
- * <code>optional .io.prometheus.client.Summary summary = 4;</code>
- */
- boolean hasSummary();
- /**
- * <code>optional .io.prometheus.client.Summary summary = 4;</code>
- */
- io.prometheus.client.Metrics.Summary getSummary();
- /**
- * <code>optional .io.prometheus.client.Summary summary = 4;</code>
- */
- io.prometheus.client.Metrics.SummaryOrBuilder getSummaryOrBuilder();
-
- /**
- * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
- */
- boolean hasUntyped();
- /**
- * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
- */
- io.prometheus.client.Metrics.Untyped getUntyped();
- /**
- * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
- */
- io.prometheus.client.Metrics.UntypedOrBuilder getUntypedOrBuilder();
-
- /**
- * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
- */
- boolean hasHistogram();
- /**
- * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
- */
- io.prometheus.client.Metrics.Histogram getHistogram();
- /**
- * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
- */
- io.prometheus.client.Metrics.HistogramOrBuilder getHistogramOrBuilder();
-
- /**
- * <code>optional int64 timestamp_ms = 6;</code>
- */
- boolean hasTimestampMs();
- /**
- * <code>optional int64 timestamp_ms = 6;</code>
- */
- long getTimestampMs();
- }
- /**
- * Protobuf type {@code io.prometheus.client.Metric}
- */
- public static final class Metric extends
- com.google.protobuf.GeneratedMessage implements
- // @@protoc_insertion_point(message_implements:io.prometheus.client.Metric)
- MetricOrBuilder {
- // Use Metric.newBuilder() to construct.
- private Metric(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private Metric(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final Metric defaultInstance;
- public static Metric getDefaultInstance() {
- return defaultInstance;
- }
-
- public Metric getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private Metric(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 10: {
- if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
- label_ = new java.util.ArrayList<io.prometheus.client.Metrics.LabelPair>();
- mutable_bitField0_ |= 0x00000001;
- }
- label_.add(input.readMessage(io.prometheus.client.Metrics.LabelPair.PARSER, extensionRegistry));
- break;
- }
- case 18: {
- io.prometheus.client.Metrics.Gauge.Builder subBuilder = null;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- subBuilder = gauge_.toBuilder();
- }
- gauge_ = input.readMessage(io.prometheus.client.Metrics.Gauge.PARSER, extensionRegistry);
- if (subBuilder != null) {
- subBuilder.mergeFrom(gauge_);
- gauge_ = subBuilder.buildPartial();
- }
- bitField0_ |= 0x00000001;
- break;
- }
- case 26: {
- io.prometheus.client.Metrics.Counter.Builder subBuilder = null;
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- subBuilder = counter_.toBuilder();
- }
- counter_ = input.readMessage(io.prometheus.client.Metrics.Counter.PARSER, extensionRegistry);
- if (subBuilder != null) {
- subBuilder.mergeFrom(counter_);
- counter_ = subBuilder.buildPartial();
- }
- bitField0_ |= 0x00000002;
- break;
- }
- case 34: {
- io.prometheus.client.Metrics.Summary.Builder subBuilder = null;
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- subBuilder = summary_.toBuilder();
- }
- summary_ = input.readMessage(io.prometheus.client.Metrics.Summary.PARSER, extensionRegistry);
- if (subBuilder != null) {
- subBuilder.mergeFrom(summary_);
- summary_ = subBuilder.buildPartial();
- }
- bitField0_ |= 0x00000004;
- break;
- }
- case 42: {
- io.prometheus.client.Metrics.Untyped.Builder subBuilder = null;
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
- subBuilder = untyped_.toBuilder();
- }
- untyped_ = input.readMessage(io.prometheus.client.Metrics.Untyped.PARSER, extensionRegistry);
- if (subBuilder != null) {
- subBuilder.mergeFrom(untyped_);
- untyped_ = subBuilder.buildPartial();
- }
- bitField0_ |= 0x00000008;
- break;
- }
- case 48: {
- bitField0_ |= 0x00000020;
- timestampMs_ = input.readInt64();
- break;
- }
- case 58: {
- io.prometheus.client.Metrics.Histogram.Builder subBuilder = null;
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
- subBuilder = histogram_.toBuilder();
- }
- histogram_ = input.readMessage(io.prometheus.client.Metrics.Histogram.PARSER, extensionRegistry);
- if (subBuilder != null) {
- subBuilder.mergeFrom(histogram_);
- histogram_ = subBuilder.buildPartial();
- }
- bitField0_ |= 0x00000010;
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
- label_ = java.util.Collections.unmodifiableList(label_);
- }
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Metric_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Metric_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- io.prometheus.client.Metrics.Metric.class, io.prometheus.client.Metrics.Metric.Builder.class);
- }
-
- public static com.google.protobuf.Parser<Metric> PARSER =
- new com.google.protobuf.AbstractParser<Metric>() {
- public Metric parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new Metric(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<Metric> getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- public static final int LABEL_FIELD_NUMBER = 1;
- private java.util.List<io.prometheus.client.Metrics.LabelPair> label_;
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public java.util.List<io.prometheus.client.Metrics.LabelPair> getLabelList() {
- return label_;
- }
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public java.util.List<? extends io.prometheus.client.Metrics.LabelPairOrBuilder>
- getLabelOrBuilderList() {
- return label_;
- }
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public int getLabelCount() {
- return label_.size();
- }
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public io.prometheus.client.Metrics.LabelPair getLabel(int index) {
- return label_.get(index);
- }
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public io.prometheus.client.Metrics.LabelPairOrBuilder getLabelOrBuilder(
- int index) {
- return label_.get(index);
- }
-
- public static final int GAUGE_FIELD_NUMBER = 2;
- private io.prometheus.client.Metrics.Gauge gauge_;
- /**
- * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
- */
- public boolean hasGauge() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
- */
- public io.prometheus.client.Metrics.Gauge getGauge() {
- return gauge_;
- }
- /**
- * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
- */
- public io.prometheus.client.Metrics.GaugeOrBuilder getGaugeOrBuilder() {
- return gauge_;
- }
-
- public static final int COUNTER_FIELD_NUMBER = 3;
- private io.prometheus.client.Metrics.Counter counter_;
- /**
- * <code>optional .io.prometheus.client.Counter counter = 3;</code>
- */
- public boolean hasCounter() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional .io.prometheus.client.Counter counter = 3;</code>
- */
- public io.prometheus.client.Metrics.Counter getCounter() {
- return counter_;
- }
- /**
- * <code>optional .io.prometheus.client.Counter counter = 3;</code>
- */
- public io.prometheus.client.Metrics.CounterOrBuilder getCounterOrBuilder() {
- return counter_;
- }
-
- public static final int SUMMARY_FIELD_NUMBER = 4;
- private io.prometheus.client.Metrics.Summary summary_;
- /**
- * <code>optional .io.prometheus.client.Summary summary = 4;</code>
- */
- public boolean hasSummary() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
- }
- /**
- * <code>optional .io.prometheus.client.Summary summary = 4;</code>
- */
- public io.prometheus.client.Metrics.Summary getSummary() {
- return summary_;
- }
- /**
- * <code>optional .io.prometheus.client.Summary summary = 4;</code>
- */
- public io.prometheus.client.Metrics.SummaryOrBuilder getSummaryOrBuilder() {
- return summary_;
- }
-
- public static final int UNTYPED_FIELD_NUMBER = 5;
- private io.prometheus.client.Metrics.Untyped untyped_;
- /**
- * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
- */
- public boolean hasUntyped() {
- return ((bitField0_ & 0x00000008) == 0x00000008);
- }
- /**
- * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
- */
- public io.prometheus.client.Metrics.Untyped getUntyped() {
- return untyped_;
- }
- /**
- * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
- */
- public io.prometheus.client.Metrics.UntypedOrBuilder getUntypedOrBuilder() {
- return untyped_;
- }
-
- public static final int HISTOGRAM_FIELD_NUMBER = 7;
- private io.prometheus.client.Metrics.Histogram histogram_;
- /**
- * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
- */
- public boolean hasHistogram() {
- return ((bitField0_ & 0x00000010) == 0x00000010);
- }
- /**
- * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
- */
- public io.prometheus.client.Metrics.Histogram getHistogram() {
- return histogram_;
- }
- /**
- * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
- */
- public io.prometheus.client.Metrics.HistogramOrBuilder getHistogramOrBuilder() {
- return histogram_;
- }
-
- public static final int TIMESTAMP_MS_FIELD_NUMBER = 6;
- private long timestampMs_;
- /**
- * <code>optional int64 timestamp_ms = 6;</code>
- */
- public boolean hasTimestampMs() {
- return ((bitField0_ & 0x00000020) == 0x00000020);
- }
- /**
- * <code>optional int64 timestamp_ms = 6;</code>
- */
- public long getTimestampMs() {
- return timestampMs_;
- }
-
- private void initFields() {
- label_ = java.util.Collections.emptyList();
- gauge_ = io.prometheus.client.Metrics.Gauge.getDefaultInstance();
- counter_ = io.prometheus.client.Metrics.Counter.getDefaultInstance();
- summary_ = io.prometheus.client.Metrics.Summary.getDefaultInstance();
- untyped_ = io.prometheus.client.Metrics.Untyped.getDefaultInstance();
- histogram_ = io.prometheus.client.Metrics.Histogram.getDefaultInstance();
- timestampMs_ = 0L;
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized == 1) return true;
- if (isInitialized == 0) return false;
-
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- for (int i = 0; i < label_.size(); i++) {
- output.writeMessage(1, label_.get(i));
- }
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeMessage(2, gauge_);
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeMessage(3, counter_);
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- output.writeMessage(4, summary_);
- }
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
- output.writeMessage(5, untyped_);
- }
- if (((bitField0_ & 0x00000020) == 0x00000020)) {
- output.writeInt64(6, timestampMs_);
- }
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
- output.writeMessage(7, histogram_);
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- for (int i = 0; i < label_.size(); i++) {
- size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(1, label_.get(i));
- }
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(2, gauge_);
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(3, counter_);
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(4, summary_);
- }
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
- size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(5, untyped_);
- }
- if (((bitField0_ & 0x00000020) == 0x00000020)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt64Size(6, timestampMs_);
- }
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
- size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(7, histogram_);
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static io.prometheus.client.Metrics.Metric parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static io.prometheus.client.Metrics.Metric parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Metric parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static io.prometheus.client.Metrics.Metric parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Metric parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static io.prometheus.client.Metrics.Metric parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Metric parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static io.prometheus.client.Metrics.Metric parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.Metric parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static io.prometheus.client.Metrics.Metric parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(io.prometheus.client.Metrics.Metric prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code io.prometheus.client.Metric}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder> implements
- // @@protoc_insertion_point(builder_implements:io.prometheus.client.Metric)
- io.prometheus.client.Metrics.MetricOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Metric_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Metric_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- io.prometheus.client.Metrics.Metric.class, io.prometheus.client.Metrics.Metric.Builder.class);
- }
-
- // Construct using io.prometheus.client.Metrics.Metric.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- getLabelFieldBuilder();
- getGaugeFieldBuilder();
- getCounterFieldBuilder();
- getSummaryFieldBuilder();
- getUntypedFieldBuilder();
- getHistogramFieldBuilder();
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- if (labelBuilder_ == null) {
- label_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000001);
- } else {
- labelBuilder_.clear();
- }
- if (gaugeBuilder_ == null) {
- gauge_ = io.prometheus.client.Metrics.Gauge.getDefaultInstance();
- } else {
- gaugeBuilder_.clear();
- }
- bitField0_ = (bitField0_ & ~0x00000002);
- if (counterBuilder_ == null) {
- counter_ = io.prometheus.client.Metrics.Counter.getDefaultInstance();
- } else {
- counterBuilder_.clear();
- }
- bitField0_ = (bitField0_ & ~0x00000004);
- if (summaryBuilder_ == null) {
- summary_ = io.prometheus.client.Metrics.Summary.getDefaultInstance();
- } else {
- summaryBuilder_.clear();
- }
- bitField0_ = (bitField0_ & ~0x00000008);
- if (untypedBuilder_ == null) {
- untyped_ = io.prometheus.client.Metrics.Untyped.getDefaultInstance();
- } else {
- untypedBuilder_.clear();
- }
- bitField0_ = (bitField0_ & ~0x00000010);
- if (histogramBuilder_ == null) {
- histogram_ = io.prometheus.client.Metrics.Histogram.getDefaultInstance();
- } else {
- histogramBuilder_.clear();
- }
- bitField0_ = (bitField0_ & ~0x00000020);
- timestampMs_ = 0L;
- bitField0_ = (bitField0_ & ~0x00000040);
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Metric_descriptor;
- }
-
- public io.prometheus.client.Metrics.Metric getDefaultInstanceForType() {
- return io.prometheus.client.Metrics.Metric.getDefaultInstance();
- }
-
- public io.prometheus.client.Metrics.Metric build() {
- io.prometheus.client.Metrics.Metric result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public io.prometheus.client.Metrics.Metric buildPartial() {
- io.prometheus.client.Metrics.Metric result = new io.prometheus.client.Metrics.Metric(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (labelBuilder_ == null) {
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- label_ = java.util.Collections.unmodifiableList(label_);
- bitField0_ = (bitField0_ & ~0x00000001);
- }
- result.label_ = label_;
- } else {
- result.label_ = labelBuilder_.build();
- }
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
- to_bitField0_ |= 0x00000001;
- }
- if (gaugeBuilder_ == null) {
- result.gauge_ = gauge_;
- } else {
- result.gauge_ = gaugeBuilder_.build();
- }
- if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
- to_bitField0_ |= 0x00000002;
- }
- if (counterBuilder_ == null) {
- result.counter_ = counter_;
- } else {
- result.counter_ = counterBuilder_.build();
- }
- if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
- to_bitField0_ |= 0x00000004;
- }
- if (summaryBuilder_ == null) {
- result.summary_ = summary_;
- } else {
- result.summary_ = summaryBuilder_.build();
- }
- if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
- to_bitField0_ |= 0x00000008;
- }
- if (untypedBuilder_ == null) {
- result.untyped_ = untyped_;
- } else {
- result.untyped_ = untypedBuilder_.build();
- }
- if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
- to_bitField0_ |= 0x00000010;
- }
- if (histogramBuilder_ == null) {
- result.histogram_ = histogram_;
- } else {
- result.histogram_ = histogramBuilder_.build();
- }
- if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
- to_bitField0_ |= 0x00000020;
- }
- result.timestampMs_ = timestampMs_;
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof io.prometheus.client.Metrics.Metric) {
- return mergeFrom((io.prometheus.client.Metrics.Metric)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(io.prometheus.client.Metrics.Metric other) {
- if (other == io.prometheus.client.Metrics.Metric.getDefaultInstance()) return this;
- if (labelBuilder_ == null) {
- if (!other.label_.isEmpty()) {
- if (label_.isEmpty()) {
- label_ = other.label_;
- bitField0_ = (bitField0_ & ~0x00000001);
- } else {
- ensureLabelIsMutable();
- label_.addAll(other.label_);
- }
- onChanged();
- }
- } else {
- if (!other.label_.isEmpty()) {
- if (labelBuilder_.isEmpty()) {
- labelBuilder_.dispose();
- labelBuilder_ = null;
- label_ = other.label_;
- bitField0_ = (bitField0_ & ~0x00000001);
- labelBuilder_ =
- com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
- getLabelFieldBuilder() : null;
- } else {
- labelBuilder_.addAllMessages(other.label_);
- }
- }
- }
- if (other.hasGauge()) {
- mergeGauge(other.getGauge());
- }
- if (other.hasCounter()) {
- mergeCounter(other.getCounter());
- }
- if (other.hasSummary()) {
- mergeSummary(other.getSummary());
- }
- if (other.hasUntyped()) {
- mergeUntyped(other.getUntyped());
- }
- if (other.hasHistogram()) {
- mergeHistogram(other.getHistogram());
- }
- if (other.hasTimestampMs()) {
- setTimestampMs(other.getTimestampMs());
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- io.prometheus.client.Metrics.Metric parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (io.prometheus.client.Metrics.Metric) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- private java.util.List<io.prometheus.client.Metrics.LabelPair> label_ =
- java.util.Collections.emptyList();
- private void ensureLabelIsMutable() {
- if (!((bitField0_ & 0x00000001) == 0x00000001)) {
- label_ = new java.util.ArrayList<io.prometheus.client.Metrics.LabelPair>(label_);
- bitField0_ |= 0x00000001;
- }
- }
-
- private com.google.protobuf.RepeatedFieldBuilder<
- io.prometheus.client.Metrics.LabelPair, io.prometheus.client.Metrics.LabelPair.Builder, io.prometheus.client.Metrics.LabelPairOrBuilder> labelBuilder_;
-
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public java.util.List<io.prometheus.client.Metrics.LabelPair> getLabelList() {
- if (labelBuilder_ == null) {
- return java.util.Collections.unmodifiableList(label_);
- } else {
- return labelBuilder_.getMessageList();
- }
- }
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public int getLabelCount() {
- if (labelBuilder_ == null) {
- return label_.size();
- } else {
- return labelBuilder_.getCount();
- }
- }
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public io.prometheus.client.Metrics.LabelPair getLabel(int index) {
- if (labelBuilder_ == null) {
- return label_.get(index);
- } else {
- return labelBuilder_.getMessage(index);
- }
- }
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public Builder setLabel(
- int index, io.prometheus.client.Metrics.LabelPair value) {
- if (labelBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureLabelIsMutable();
- label_.set(index, value);
- onChanged();
- } else {
- labelBuilder_.setMessage(index, value);
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public Builder setLabel(
- int index, io.prometheus.client.Metrics.LabelPair.Builder builderForValue) {
- if (labelBuilder_ == null) {
- ensureLabelIsMutable();
- label_.set(index, builderForValue.build());
- onChanged();
- } else {
- labelBuilder_.setMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public Builder addLabel(io.prometheus.client.Metrics.LabelPair value) {
- if (labelBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureLabelIsMutable();
- label_.add(value);
- onChanged();
- } else {
- labelBuilder_.addMessage(value);
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public Builder addLabel(
- int index, io.prometheus.client.Metrics.LabelPair value) {
- if (labelBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureLabelIsMutable();
- label_.add(index, value);
- onChanged();
- } else {
- labelBuilder_.addMessage(index, value);
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public Builder addLabel(
- io.prometheus.client.Metrics.LabelPair.Builder builderForValue) {
- if (labelBuilder_ == null) {
- ensureLabelIsMutable();
- label_.add(builderForValue.build());
- onChanged();
- } else {
- labelBuilder_.addMessage(builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public Builder addLabel(
- int index, io.prometheus.client.Metrics.LabelPair.Builder builderForValue) {
- if (labelBuilder_ == null) {
- ensureLabelIsMutable();
- label_.add(index, builderForValue.build());
- onChanged();
- } else {
- labelBuilder_.addMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public Builder addAllLabel(
- java.lang.Iterable<? extends io.prometheus.client.Metrics.LabelPair> values) {
- if (labelBuilder_ == null) {
- ensureLabelIsMutable();
- com.google.protobuf.AbstractMessageLite.Builder.addAll(
- values, label_);
- onChanged();
- } else {
- labelBuilder_.addAllMessages(values);
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public Builder clearLabel() {
- if (labelBuilder_ == null) {
- label_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000001);
- onChanged();
- } else {
- labelBuilder_.clear();
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public Builder removeLabel(int index) {
- if (labelBuilder_ == null) {
- ensureLabelIsMutable();
- label_.remove(index);
- onChanged();
- } else {
- labelBuilder_.remove(index);
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public io.prometheus.client.Metrics.LabelPair.Builder getLabelBuilder(
- int index) {
- return getLabelFieldBuilder().getBuilder(index);
- }
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public io.prometheus.client.Metrics.LabelPairOrBuilder getLabelOrBuilder(
- int index) {
- if (labelBuilder_ == null) {
- return label_.get(index); } else {
- return labelBuilder_.getMessageOrBuilder(index);
- }
- }
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public java.util.List<? extends io.prometheus.client.Metrics.LabelPairOrBuilder>
- getLabelOrBuilderList() {
- if (labelBuilder_ != null) {
- return labelBuilder_.getMessageOrBuilderList();
- } else {
- return java.util.Collections.unmodifiableList(label_);
- }
- }
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public io.prometheus.client.Metrics.LabelPair.Builder addLabelBuilder() {
- return getLabelFieldBuilder().addBuilder(
- io.prometheus.client.Metrics.LabelPair.getDefaultInstance());
- }
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public io.prometheus.client.Metrics.LabelPair.Builder addLabelBuilder(
- int index) {
- return getLabelFieldBuilder().addBuilder(
- index, io.prometheus.client.Metrics.LabelPair.getDefaultInstance());
- }
- /**
- * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
- */
- public java.util.List<io.prometheus.client.Metrics.LabelPair.Builder>
- getLabelBuilderList() {
- return getLabelFieldBuilder().getBuilderList();
- }
- private com.google.protobuf.RepeatedFieldBuilder<
- io.prometheus.client.Metrics.LabelPair, io.prometheus.client.Metrics.LabelPair.Builder, io.prometheus.client.Metrics.LabelPairOrBuilder>
- getLabelFieldBuilder() {
- if (labelBuilder_ == null) {
- labelBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
- io.prometheus.client.Metrics.LabelPair, io.prometheus.client.Metrics.LabelPair.Builder, io.prometheus.client.Metrics.LabelPairOrBuilder>(
- label_,
- ((bitField0_ & 0x00000001) == 0x00000001),
- getParentForChildren(),
- isClean());
- label_ = null;
- }
- return labelBuilder_;
- }
-
- private io.prometheus.client.Metrics.Gauge gauge_ = io.prometheus.client.Metrics.Gauge.getDefaultInstance();
- private com.google.protobuf.SingleFieldBuilder<
- io.prometheus.client.Metrics.Gauge, io.prometheus.client.Metrics.Gauge.Builder, io.prometheus.client.Metrics.GaugeOrBuilder> gaugeBuilder_;
- /**
- * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
- */
- public boolean hasGauge() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
- */
- public io.prometheus.client.Metrics.Gauge getGauge() {
- if (gaugeBuilder_ == null) {
- return gauge_;
- } else {
- return gaugeBuilder_.getMessage();
- }
- }
- /**
- * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
- */
- public Builder setGauge(io.prometheus.client.Metrics.Gauge value) {
- if (gaugeBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- gauge_ = value;
- onChanged();
- } else {
- gaugeBuilder_.setMessage(value);
- }
- bitField0_ |= 0x00000002;
- return this;
- }
- /**
- * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
- */
- public Builder setGauge(
- io.prometheus.client.Metrics.Gauge.Builder builderForValue) {
- if (gaugeBuilder_ == null) {
- gauge_ = builderForValue.build();
- onChanged();
- } else {
- gaugeBuilder_.setMessage(builderForValue.build());
- }
- bitField0_ |= 0x00000002;
- return this;
- }
- /**
- * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
- */
- public Builder mergeGauge(io.prometheus.client.Metrics.Gauge value) {
- if (gaugeBuilder_ == null) {
- if (((bitField0_ & 0x00000002) == 0x00000002) &&
- gauge_ != io.prometheus.client.Metrics.Gauge.getDefaultInstance()) {
- gauge_ =
- io.prometheus.client.Metrics.Gauge.newBuilder(gauge_).mergeFrom(value).buildPartial();
- } else {
- gauge_ = value;
- }
- onChanged();
- } else {
- gaugeBuilder_.mergeFrom(value);
- }
- bitField0_ |= 0x00000002;
- return this;
- }
- /**
- * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
- */
- public Builder clearGauge() {
- if (gaugeBuilder_ == null) {
- gauge_ = io.prometheus.client.Metrics.Gauge.getDefaultInstance();
- onChanged();
- } else {
- gaugeBuilder_.clear();
- }
- bitField0_ = (bitField0_ & ~0x00000002);
- return this;
- }
- /**
- * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
- */
- public io.prometheus.client.Metrics.Gauge.Builder getGaugeBuilder() {
- bitField0_ |= 0x00000002;
- onChanged();
- return getGaugeFieldBuilder().getBuilder();
- }
- /**
- * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
- */
- public io.prometheus.client.Metrics.GaugeOrBuilder getGaugeOrBuilder() {
- if (gaugeBuilder_ != null) {
- return gaugeBuilder_.getMessageOrBuilder();
- } else {
- return gauge_;
- }
- }
- /**
- * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
- */
- private com.google.protobuf.SingleFieldBuilder<
- io.prometheus.client.Metrics.Gauge, io.prometheus.client.Metrics.Gauge.Builder, io.prometheus.client.Metrics.GaugeOrBuilder>
- getGaugeFieldBuilder() {
- if (gaugeBuilder_ == null) {
- gaugeBuilder_ = new com.google.protobuf.SingleFieldBuilder<
- io.prometheus.client.Metrics.Gauge, io.prometheus.client.Metrics.Gauge.Builder, io.prometheus.client.Metrics.GaugeOrBuilder>(
- getGauge(),
- getParentForChildren(),
- isClean());
- gauge_ = null;
- }
- return gaugeBuilder_;
- }
-
- private io.prometheus.client.Metrics.Counter counter_ = io.prometheus.client.Metrics.Counter.getDefaultInstance();
- private com.google.protobuf.SingleFieldBuilder<
- io.prometheus.client.Metrics.Counter, io.prometheus.client.Metrics.Counter.Builder, io.prometheus.client.Metrics.CounterOrBuilder> counterBuilder_;
- /**
- * <code>optional .io.prometheus.client.Counter counter = 3;</code>
- */
- public boolean hasCounter() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
- }
- /**
- * <code>optional .io.prometheus.client.Counter counter = 3;</code>
- */
- public io.prometheus.client.Metrics.Counter getCounter() {
- if (counterBuilder_ == null) {
- return counter_;
- } else {
- return counterBuilder_.getMessage();
- }
- }
- /**
- * <code>optional .io.prometheus.client.Counter counter = 3;</code>
- */
- public Builder setCounter(io.prometheus.client.Metrics.Counter value) {
- if (counterBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- counter_ = value;
- onChanged();
- } else {
- counterBuilder_.setMessage(value);
- }
- bitField0_ |= 0x00000004;
- return this;
- }
- /**
- * <code>optional .io.prometheus.client.Counter counter = 3;</code>
- */
- public Builder setCounter(
- io.prometheus.client.Metrics.Counter.Builder builderForValue) {
- if (counterBuilder_ == null) {
- counter_ = builderForValue.build();
- onChanged();
- } else {
- counterBuilder_.setMessage(builderForValue.build());
- }
- bitField0_ |= 0x00000004;
- return this;
- }
- /**
- * <code>optional .io.prometheus.client.Counter counter = 3;</code>
- */
- public Builder mergeCounter(io.prometheus.client.Metrics.Counter value) {
- if (counterBuilder_ == null) {
- if (((bitField0_ & 0x00000004) == 0x00000004) &&
- counter_ != io.prometheus.client.Metrics.Counter.getDefaultInstance()) {
- counter_ =
- io.prometheus.client.Metrics.Counter.newBuilder(counter_).mergeFrom(value).buildPartial();
- } else {
- counter_ = value;
- }
- onChanged();
- } else {
- counterBuilder_.mergeFrom(value);
- }
- bitField0_ |= 0x00000004;
- return this;
- }
- /**
- * <code>optional .io.prometheus.client.Counter counter = 3;</code>
- */
- public Builder clearCounter() {
- if (counterBuilder_ == null) {
- counter_ = io.prometheus.client.Metrics.Counter.getDefaultInstance();
- onChanged();
- } else {
- counterBuilder_.clear();
- }
- bitField0_ = (bitField0_ & ~0x00000004);
- return this;
- }
- /**
- * <code>optional .io.prometheus.client.Counter counter = 3;</code>
- */
- public io.prometheus.client.Metrics.Counter.Builder getCounterBuilder() {
- bitField0_ |= 0x00000004;
- onChanged();
- return getCounterFieldBuilder().getBuilder();
- }
- /**
- * <code>optional .io.prometheus.client.Counter counter = 3;</code>
- */
- public io.prometheus.client.Metrics.CounterOrBuilder getCounterOrBuilder() {
- if (counterBuilder_ != null) {
- return counterBuilder_.getMessageOrBuilder();
- } else {
- return counter_;
- }
- }
- /**
- * <code>optional .io.prometheus.client.Counter counter = 3;</code>
- */
- private com.google.protobuf.SingleFieldBuilder<
- io.prometheus.client.Metrics.Counter, io.prometheus.client.Metrics.Counter.Builder, io.prometheus.client.Metrics.CounterOrBuilder>
- getCounterFieldBuilder() {
- if (counterBuilder_ == null) {
- counterBuilder_ = new com.google.protobuf.SingleFieldBuilder<
- io.prometheus.client.Metrics.Counter, io.prometheus.client.Metrics.Counter.Builder, io.prometheus.client.Metrics.CounterOrBuilder>(
- getCounter(),
- getParentForChildren(),
- isClean());
- counter_ = null;
- }
- return counterBuilder_;
- }
-
- private io.prometheus.client.Metrics.Summary summary_ = io.prometheus.client.Metrics.Summary.getDefaultInstance();
- private com.google.protobuf.SingleFieldBuilder<
- io.prometheus.client.Metrics.Summary, io.prometheus.client.Metrics.Summary.Builder, io.prometheus.client.Metrics.SummaryOrBuilder> summaryBuilder_;
- /**
- * <code>optional .io.prometheus.client.Summary summary = 4;</code>
- */
- public boolean hasSummary() {
- return ((bitField0_ & 0x00000008) == 0x00000008);
- }
- /**
- * <code>optional .io.prometheus.client.Summary summary = 4;</code>
- */
- public io.prometheus.client.Metrics.Summary getSummary() {
- if (summaryBuilder_ == null) {
- return summary_;
- } else {
- return summaryBuilder_.getMessage();
- }
- }
- /**
- * <code>optional .io.prometheus.client.Summary summary = 4;</code>
- */
- public Builder setSummary(io.prometheus.client.Metrics.Summary value) {
- if (summaryBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- summary_ = value;
- onChanged();
- } else {
- summaryBuilder_.setMessage(value);
- }
- bitField0_ |= 0x00000008;
- return this;
- }
- /**
- * <code>optional .io.prometheus.client.Summary summary = 4;</code>
- */
- public Builder setSummary(
- io.prometheus.client.Metrics.Summary.Builder builderForValue) {
- if (summaryBuilder_ == null) {
- summary_ = builderForValue.build();
- onChanged();
- } else {
- summaryBuilder_.setMessage(builderForValue.build());
- }
- bitField0_ |= 0x00000008;
- return this;
- }
- /**
- * <code>optional .io.prometheus.client.Summary summary = 4;</code>
- */
- public Builder mergeSummary(io.prometheus.client.Metrics.Summary value) {
- if (summaryBuilder_ == null) {
- if (((bitField0_ & 0x00000008) == 0x00000008) &&
- summary_ != io.prometheus.client.Metrics.Summary.getDefaultInstance()) {
- summary_ =
- io.prometheus.client.Metrics.Summary.newBuilder(summary_).mergeFrom(value).buildPartial();
- } else {
- summary_ = value;
- }
- onChanged();
- } else {
- summaryBuilder_.mergeFrom(value);
- }
- bitField0_ |= 0x00000008;
- return this;
- }
- /**
- * <code>optional .io.prometheus.client.Summary summary = 4;</code>
- */
- public Builder clearSummary() {
- if (summaryBuilder_ == null) {
- summary_ = io.prometheus.client.Metrics.Summary.getDefaultInstance();
- onChanged();
- } else {
- summaryBuilder_.clear();
- }
- bitField0_ = (bitField0_ & ~0x00000008);
- return this;
- }
- /**
- * <code>optional .io.prometheus.client.Summary summary = 4;</code>
- */
- public io.prometheus.client.Metrics.Summary.Builder getSummaryBuilder() {
- bitField0_ |= 0x00000008;
- onChanged();
- return getSummaryFieldBuilder().getBuilder();
- }
- /**
- * <code>optional .io.prometheus.client.Summary summary = 4;</code>
- */
- public io.prometheus.client.Metrics.SummaryOrBuilder getSummaryOrBuilder() {
- if (summaryBuilder_ != null) {
- return summaryBuilder_.getMessageOrBuilder();
- } else {
- return summary_;
- }
- }
- /**
- * <code>optional .io.prometheus.client.Summary summary = 4;</code>
- */
- private com.google.protobuf.SingleFieldBuilder<
- io.prometheus.client.Metrics.Summary, io.prometheus.client.Metrics.Summary.Builder, io.prometheus.client.Metrics.SummaryOrBuilder>
- getSummaryFieldBuilder() {
- if (summaryBuilder_ == null) {
- summaryBuilder_ = new com.google.protobuf.SingleFieldBuilder<
- io.prometheus.client.Metrics.Summary, io.prometheus.client.Metrics.Summary.Builder, io.prometheus.client.Metrics.SummaryOrBuilder>(
- getSummary(),
- getParentForChildren(),
- isClean());
- summary_ = null;
- }
- return summaryBuilder_;
- }
-
- private io.prometheus.client.Metrics.Untyped untyped_ = io.prometheus.client.Metrics.Untyped.getDefaultInstance();
- private com.google.protobuf.SingleFieldBuilder<
- io.prometheus.client.Metrics.Untyped, io.prometheus.client.Metrics.Untyped.Builder, io.prometheus.client.Metrics.UntypedOrBuilder> untypedBuilder_;
- /**
- * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
- */
- public boolean hasUntyped() {
- return ((bitField0_ & 0x00000010) == 0x00000010);
- }
- /**
- * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
- */
- public io.prometheus.client.Metrics.Untyped getUntyped() {
- if (untypedBuilder_ == null) {
- return untyped_;
- } else {
- return untypedBuilder_.getMessage();
- }
- }
- /**
- * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
- */
- public Builder setUntyped(io.prometheus.client.Metrics.Untyped value) {
- if (untypedBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- untyped_ = value;
- onChanged();
- } else {
- untypedBuilder_.setMessage(value);
- }
- bitField0_ |= 0x00000010;
- return this;
- }
- /**
- * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
- */
- public Builder setUntyped(
- io.prometheus.client.Metrics.Untyped.Builder builderForValue) {
- if (untypedBuilder_ == null) {
- untyped_ = builderForValue.build();
- onChanged();
- } else {
- untypedBuilder_.setMessage(builderForValue.build());
- }
- bitField0_ |= 0x00000010;
- return this;
- }
- /**
- * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
- */
- public Builder mergeUntyped(io.prometheus.client.Metrics.Untyped value) {
- if (untypedBuilder_ == null) {
- if (((bitField0_ & 0x00000010) == 0x00000010) &&
- untyped_ != io.prometheus.client.Metrics.Untyped.getDefaultInstance()) {
- untyped_ =
- io.prometheus.client.Metrics.Untyped.newBuilder(untyped_).mergeFrom(value).buildPartial();
- } else {
- untyped_ = value;
- }
- onChanged();
- } else {
- untypedBuilder_.mergeFrom(value);
- }
- bitField0_ |= 0x00000010;
- return this;
- }
- /**
- * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
- */
- public Builder clearUntyped() {
- if (untypedBuilder_ == null) {
- untyped_ = io.prometheus.client.Metrics.Untyped.getDefaultInstance();
- onChanged();
- } else {
- untypedBuilder_.clear();
- }
- bitField0_ = (bitField0_ & ~0x00000010);
- return this;
- }
- /**
- * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
- */
- public io.prometheus.client.Metrics.Untyped.Builder getUntypedBuilder() {
- bitField0_ |= 0x00000010;
- onChanged();
- return getUntypedFieldBuilder().getBuilder();
- }
- /**
- * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
- */
- public io.prometheus.client.Metrics.UntypedOrBuilder getUntypedOrBuilder() {
- if (untypedBuilder_ != null) {
- return untypedBuilder_.getMessageOrBuilder();
- } else {
- return untyped_;
- }
- }
- /**
- * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
- */
- private com.google.protobuf.SingleFieldBuilder<
- io.prometheus.client.Metrics.Untyped, io.prometheus.client.Metrics.Untyped.Builder, io.prometheus.client.Metrics.UntypedOrBuilder>
- getUntypedFieldBuilder() {
- if (untypedBuilder_ == null) {
- untypedBuilder_ = new com.google.protobuf.SingleFieldBuilder<
- io.prometheus.client.Metrics.Untyped, io.prometheus.client.Metrics.Untyped.Builder, io.prometheus.client.Metrics.UntypedOrBuilder>(
- getUntyped(),
- getParentForChildren(),
- isClean());
- untyped_ = null;
- }
- return untypedBuilder_;
- }
-
- private io.prometheus.client.Metrics.Histogram histogram_ = io.prometheus.client.Metrics.Histogram.getDefaultInstance();
- private com.google.protobuf.SingleFieldBuilder<
- io.prometheus.client.Metrics.Histogram, io.prometheus.client.Metrics.Histogram.Builder, io.prometheus.client.Metrics.HistogramOrBuilder> histogramBuilder_;
- /**
- * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
- */
- public boolean hasHistogram() {
- return ((bitField0_ & 0x00000020) == 0x00000020);
- }
- /**
- * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
- */
- public io.prometheus.client.Metrics.Histogram getHistogram() {
- if (histogramBuilder_ == null) {
- return histogram_;
- } else {
- return histogramBuilder_.getMessage();
- }
- }
- /**
- * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
- */
- public Builder setHistogram(io.prometheus.client.Metrics.Histogram value) {
- if (histogramBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- histogram_ = value;
- onChanged();
- } else {
- histogramBuilder_.setMessage(value);
- }
- bitField0_ |= 0x00000020;
- return this;
- }
- /**
- * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
- */
- public Builder setHistogram(
- io.prometheus.client.Metrics.Histogram.Builder builderForValue) {
- if (histogramBuilder_ == null) {
- histogram_ = builderForValue.build();
- onChanged();
- } else {
- histogramBuilder_.setMessage(builderForValue.build());
- }
- bitField0_ |= 0x00000020;
- return this;
- }
- /**
- * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
- */
- public Builder mergeHistogram(io.prometheus.client.Metrics.Histogram value) {
- if (histogramBuilder_ == null) {
- if (((bitField0_ & 0x00000020) == 0x00000020) &&
- histogram_ != io.prometheus.client.Metrics.Histogram.getDefaultInstance()) {
- histogram_ =
- io.prometheus.client.Metrics.Histogram.newBuilder(histogram_).mergeFrom(value).buildPartial();
- } else {
- histogram_ = value;
- }
- onChanged();
- } else {
- histogramBuilder_.mergeFrom(value);
- }
- bitField0_ |= 0x00000020;
- return this;
- }
- /**
- * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
- */
- public Builder clearHistogram() {
- if (histogramBuilder_ == null) {
- histogram_ = io.prometheus.client.Metrics.Histogram.getDefaultInstance();
- onChanged();
- } else {
- histogramBuilder_.clear();
- }
- bitField0_ = (bitField0_ & ~0x00000020);
- return this;
- }
- /**
- * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
- */
- public io.prometheus.client.Metrics.Histogram.Builder getHistogramBuilder() {
- bitField0_ |= 0x00000020;
- onChanged();
- return getHistogramFieldBuilder().getBuilder();
- }
- /**
- * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
- */
- public io.prometheus.client.Metrics.HistogramOrBuilder getHistogramOrBuilder() {
- if (histogramBuilder_ != null) {
- return histogramBuilder_.getMessageOrBuilder();
- } else {
- return histogram_;
- }
- }
- /**
- * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
- */
- private com.google.protobuf.SingleFieldBuilder<
- io.prometheus.client.Metrics.Histogram, io.prometheus.client.Metrics.Histogram.Builder, io.prometheus.client.Metrics.HistogramOrBuilder>
- getHistogramFieldBuilder() {
- if (histogramBuilder_ == null) {
- histogramBuilder_ = new com.google.protobuf.SingleFieldBuilder<
- io.prometheus.client.Metrics.Histogram, io.prometheus.client.Metrics.Histogram.Builder, io.prometheus.client.Metrics.HistogramOrBuilder>(
- getHistogram(),
- getParentForChildren(),
- isClean());
- histogram_ = null;
- }
- return histogramBuilder_;
- }
-
- private long timestampMs_ ;
- /**
- * <code>optional int64 timestamp_ms = 6;</code>
- */
- public boolean hasTimestampMs() {
- return ((bitField0_ & 0x00000040) == 0x00000040);
- }
- /**
- * <code>optional int64 timestamp_ms = 6;</code>
- */
- public long getTimestampMs() {
- return timestampMs_;
- }
- /**
- * <code>optional int64 timestamp_ms = 6;</code>
- */
- public Builder setTimestampMs(long value) {
- bitField0_ |= 0x00000040;
- timestampMs_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional int64 timestamp_ms = 6;</code>
- */
- public Builder clearTimestampMs() {
- bitField0_ = (bitField0_ & ~0x00000040);
- timestampMs_ = 0L;
- onChanged();
- return this;
- }
-
- // @@protoc_insertion_point(builder_scope:io.prometheus.client.Metric)
- }
-
- static {
- defaultInstance = new Metric(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:io.prometheus.client.Metric)
- }
-
- public interface MetricFamilyOrBuilder extends
- // @@protoc_insertion_point(interface_extends:io.prometheus.client.MetricFamily)
- com.google.protobuf.MessageOrBuilder {
-
- /**
- * <code>optional string name = 1;</code>
- */
- boolean hasName();
- /**
- * <code>optional string name = 1;</code>
- */
- java.lang.String getName();
- /**
- * <code>optional string name = 1;</code>
- */
- com.google.protobuf.ByteString
- getNameBytes();
-
- /**
- * <code>optional string help = 2;</code>
- */
- boolean hasHelp();
- /**
- * <code>optional string help = 2;</code>
- */
- java.lang.String getHelp();
- /**
- * <code>optional string help = 2;</code>
- */
- com.google.protobuf.ByteString
- getHelpBytes();
-
- /**
- * <code>optional .io.prometheus.client.MetricType type = 3;</code>
- */
- boolean hasType();
- /**
- * <code>optional .io.prometheus.client.MetricType type = 3;</code>
- */
- io.prometheus.client.Metrics.MetricType getType();
-
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- java.util.List<io.prometheus.client.Metrics.Metric>
- getMetricList();
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- io.prometheus.client.Metrics.Metric getMetric(int index);
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- int getMetricCount();
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- java.util.List<? extends io.prometheus.client.Metrics.MetricOrBuilder>
- getMetricOrBuilderList();
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- io.prometheus.client.Metrics.MetricOrBuilder getMetricOrBuilder(
- int index);
- }
- /**
- * Protobuf type {@code io.prometheus.client.MetricFamily}
- */
- public static final class MetricFamily extends
- com.google.protobuf.GeneratedMessage implements
- // @@protoc_insertion_point(message_implements:io.prometheus.client.MetricFamily)
- MetricFamilyOrBuilder {
- // Use MetricFamily.newBuilder() to construct.
- private MetricFamily(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private MetricFamily(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final MetricFamily defaultInstance;
- public static MetricFamily getDefaultInstance() {
- return defaultInstance;
- }
-
- public MetricFamily getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private MetricFamily(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 10: {
- com.google.protobuf.ByteString bs = input.readBytes();
- bitField0_ |= 0x00000001;
- name_ = bs;
- break;
- }
- case 18: {
- com.google.protobuf.ByteString bs = input.readBytes();
- bitField0_ |= 0x00000002;
- help_ = bs;
- break;
- }
- case 24: {
- int rawValue = input.readEnum();
- io.prometheus.client.Metrics.MetricType value = io.prometheus.client.Metrics.MetricType.valueOf(rawValue);
- if (value == null) {
- unknownFields.mergeVarintField(3, rawValue);
- } else {
- bitField0_ |= 0x00000004;
- type_ = value;
- }
- break;
- }
- case 34: {
- if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
- metric_ = new java.util.ArrayList<io.prometheus.client.Metrics.Metric>();
- mutable_bitField0_ |= 0x00000008;
- }
- metric_.add(input.readMessage(io.prometheus.client.Metrics.Metric.PARSER, extensionRegistry));
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
- metric_ = java.util.Collections.unmodifiableList(metric_);
- }
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_MetricFamily_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_MetricFamily_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- io.prometheus.client.Metrics.MetricFamily.class, io.prometheus.client.Metrics.MetricFamily.Builder.class);
- }
-
- public static com.google.protobuf.Parser<MetricFamily> PARSER =
- new com.google.protobuf.AbstractParser<MetricFamily>() {
- public MetricFamily parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new MetricFamily(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<MetricFamily> getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- public static final int NAME_FIELD_NUMBER = 1;
- private java.lang.Object name_;
- /**
- * <code>optional string name = 1;</code>
- */
- public boolean hasName() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public java.lang.String getName() {
- java.lang.Object ref = name_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- name_ = s;
- }
- return s;
- }
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public com.google.protobuf.ByteString
- getNameBytes() {
- java.lang.Object ref = name_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- name_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- public static final int HELP_FIELD_NUMBER = 2;
- private java.lang.Object help_;
- /**
- * <code>optional string help = 2;</code>
- */
- public boolean hasHelp() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional string help = 2;</code>
- */
- public java.lang.String getHelp() {
- java.lang.Object ref = help_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- help_ = s;
- }
- return s;
- }
- }
- /**
- * <code>optional string help = 2;</code>
- */
- public com.google.protobuf.ByteString
- getHelpBytes() {
- java.lang.Object ref = help_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- help_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- public static final int TYPE_FIELD_NUMBER = 3;
- private io.prometheus.client.Metrics.MetricType type_;
- /**
- * <code>optional .io.prometheus.client.MetricType type = 3;</code>
- */
- public boolean hasType() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
- }
- /**
- * <code>optional .io.prometheus.client.MetricType type = 3;</code>
- */
- public io.prometheus.client.Metrics.MetricType getType() {
- return type_;
- }
-
- public static final int METRIC_FIELD_NUMBER = 4;
- private java.util.List<io.prometheus.client.Metrics.Metric> metric_;
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public java.util.List<io.prometheus.client.Metrics.Metric> getMetricList() {
- return metric_;
- }
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public java.util.List<? extends io.prometheus.client.Metrics.MetricOrBuilder>
- getMetricOrBuilderList() {
- return metric_;
- }
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public int getMetricCount() {
- return metric_.size();
- }
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public io.prometheus.client.Metrics.Metric getMetric(int index) {
- return metric_.get(index);
- }
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public io.prometheus.client.Metrics.MetricOrBuilder getMetricOrBuilder(
- int index) {
- return metric_.get(index);
- }
-
- private void initFields() {
- name_ = "";
- help_ = "";
- type_ = io.prometheus.client.Metrics.MetricType.COUNTER;
- metric_ = java.util.Collections.emptyList();
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized == 1) return true;
- if (isInitialized == 0) return false;
-
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBytes(1, getNameBytes());
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeBytes(2, getHelpBytes());
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- output.writeEnum(3, type_.getNumber());
- }
- for (int i = 0; i < metric_.size(); i++) {
- output.writeMessage(4, metric_.get(i));
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(1, getNameBytes());
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(2, getHelpBytes());
- }
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- size += com.google.protobuf.CodedOutputStream
- .computeEnumSize(3, type_.getNumber());
- }
- for (int i = 0; i < metric_.size(); i++) {
- size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(4, metric_.get(i));
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static io.prometheus.client.Metrics.MetricFamily parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static io.prometheus.client.Metrics.MetricFamily parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.MetricFamily parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static io.prometheus.client.Metrics.MetricFamily parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.MetricFamily parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static io.prometheus.client.Metrics.MetricFamily parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.MetricFamily parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static io.prometheus.client.Metrics.MetricFamily parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static io.prometheus.client.Metrics.MetricFamily parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static io.prometheus.client.Metrics.MetricFamily parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(io.prometheus.client.Metrics.MetricFamily prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code io.prometheus.client.MetricFamily}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder> implements
- // @@protoc_insertion_point(builder_implements:io.prometheus.client.MetricFamily)
- io.prometheus.client.Metrics.MetricFamilyOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_MetricFamily_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_MetricFamily_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- io.prometheus.client.Metrics.MetricFamily.class, io.prometheus.client.Metrics.MetricFamily.Builder.class);
- }
-
- // Construct using io.prometheus.client.Metrics.MetricFamily.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- getMetricFieldBuilder();
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- name_ = "";
- bitField0_ = (bitField0_ & ~0x00000001);
- help_ = "";
- bitField0_ = (bitField0_ & ~0x00000002);
- type_ = io.prometheus.client.Metrics.MetricType.COUNTER;
- bitField0_ = (bitField0_ & ~0x00000004);
- if (metricBuilder_ == null) {
- metric_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000008);
- } else {
- metricBuilder_.clear();
- }
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return io.prometheus.client.Metrics.internal_static_io_prometheus_client_MetricFamily_descriptor;
- }
-
- public io.prometheus.client.Metrics.MetricFamily getDefaultInstanceForType() {
- return io.prometheus.client.Metrics.MetricFamily.getDefaultInstance();
- }
-
- public io.prometheus.client.Metrics.MetricFamily build() {
- io.prometheus.client.Metrics.MetricFamily result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public io.prometheus.client.Metrics.MetricFamily buildPartial() {
- io.prometheus.client.Metrics.MetricFamily result = new io.prometheus.client.Metrics.MetricFamily(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.name_ = name_;
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
- to_bitField0_ |= 0x00000002;
- }
- result.help_ = help_;
- if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
- to_bitField0_ |= 0x00000004;
- }
- result.type_ = type_;
- if (metricBuilder_ == null) {
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
- metric_ = java.util.Collections.unmodifiableList(metric_);
- bitField0_ = (bitField0_ & ~0x00000008);
- }
- result.metric_ = metric_;
- } else {
- result.metric_ = metricBuilder_.build();
- }
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof io.prometheus.client.Metrics.MetricFamily) {
- return mergeFrom((io.prometheus.client.Metrics.MetricFamily)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(io.prometheus.client.Metrics.MetricFamily other) {
- if (other == io.prometheus.client.Metrics.MetricFamily.getDefaultInstance()) return this;
- if (other.hasName()) {
- bitField0_ |= 0x00000001;
- name_ = other.name_;
- onChanged();
- }
- if (other.hasHelp()) {
- bitField0_ |= 0x00000002;
- help_ = other.help_;
- onChanged();
- }
- if (other.hasType()) {
- setType(other.getType());
- }
- if (metricBuilder_ == null) {
- if (!other.metric_.isEmpty()) {
- if (metric_.isEmpty()) {
- metric_ = other.metric_;
- bitField0_ = (bitField0_ & ~0x00000008);
- } else {
- ensureMetricIsMutable();
- metric_.addAll(other.metric_);
- }
- onChanged();
- }
- } else {
- if (!other.metric_.isEmpty()) {
- if (metricBuilder_.isEmpty()) {
- metricBuilder_.dispose();
- metricBuilder_ = null;
- metric_ = other.metric_;
- bitField0_ = (bitField0_ & ~0x00000008);
- metricBuilder_ =
- com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
- getMetricFieldBuilder() : null;
- } else {
- metricBuilder_.addAllMessages(other.metric_);
- }
- }
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- io.prometheus.client.Metrics.MetricFamily parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (io.prometheus.client.Metrics.MetricFamily) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- private java.lang.Object name_ = "";
- /**
- * <code>optional string name = 1;</code>
- */
- public boolean hasName() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public java.lang.String getName() {
- java.lang.Object ref = name_;
- if (!(ref instanceof java.lang.String)) {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- name_ = s;
- }
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public com.google.protobuf.ByteString
- getNameBytes() {
- java.lang.Object ref = name_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- name_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public Builder setName(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- name_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public Builder clearName() {
- bitField0_ = (bitField0_ & ~0x00000001);
- name_ = getDefaultInstance().getName();
- onChanged();
- return this;
- }
- /**
- * <code>optional string name = 1;</code>
- */
- public Builder setNameBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- name_ = value;
- onChanged();
- return this;
- }
-
- private java.lang.Object help_ = "";
- /**
- * <code>optional string help = 2;</code>
- */
- public boolean hasHelp() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>optional string help = 2;</code>
- */
- public java.lang.String getHelp() {
- java.lang.Object ref = help_;
- if (!(ref instanceof java.lang.String)) {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- help_ = s;
- }
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- * <code>optional string help = 2;</code>
- */
- public com.google.protobuf.ByteString
- getHelpBytes() {
- java.lang.Object ref = help_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- help_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- * <code>optional string help = 2;</code>
- */
- public Builder setHelp(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000002;
- help_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional string help = 2;</code>
- */
- public Builder clearHelp() {
- bitField0_ = (bitField0_ & ~0x00000002);
- help_ = getDefaultInstance().getHelp();
- onChanged();
- return this;
- }
- /**
- * <code>optional string help = 2;</code>
- */
- public Builder setHelpBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000002;
- help_ = value;
- onChanged();
- return this;
- }
-
- private io.prometheus.client.Metrics.MetricType type_ = io.prometheus.client.Metrics.MetricType.COUNTER;
- /**
- * <code>optional .io.prometheus.client.MetricType type = 3;</code>
- */
- public boolean hasType() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
- }
- /**
- * <code>optional .io.prometheus.client.MetricType type = 3;</code>
- */
- public io.prometheus.client.Metrics.MetricType getType() {
- return type_;
- }
- /**
- * <code>optional .io.prometheus.client.MetricType type = 3;</code>
- */
- public Builder setType(io.prometheus.client.Metrics.MetricType value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000004;
- type_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>optional .io.prometheus.client.MetricType type = 3;</code>
- */
- public Builder clearType() {
- bitField0_ = (bitField0_ & ~0x00000004);
- type_ = io.prometheus.client.Metrics.MetricType.COUNTER;
- onChanged();
- return this;
- }
-
- private java.util.List<io.prometheus.client.Metrics.Metric> metric_ =
- java.util.Collections.emptyList();
- private void ensureMetricIsMutable() {
- if (!((bitField0_ & 0x00000008) == 0x00000008)) {
- metric_ = new java.util.ArrayList<io.prometheus.client.Metrics.Metric>(metric_);
- bitField0_ |= 0x00000008;
- }
- }
-
- private com.google.protobuf.RepeatedFieldBuilder<
- io.prometheus.client.Metrics.Metric, io.prometheus.client.Metrics.Metric.Builder, io.prometheus.client.Metrics.MetricOrBuilder> metricBuilder_;
-
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public java.util.List<io.prometheus.client.Metrics.Metric> getMetricList() {
- if (metricBuilder_ == null) {
- return java.util.Collections.unmodifiableList(metric_);
- } else {
- return metricBuilder_.getMessageList();
- }
- }
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public int getMetricCount() {
- if (metricBuilder_ == null) {
- return metric_.size();
- } else {
- return metricBuilder_.getCount();
- }
- }
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public io.prometheus.client.Metrics.Metric getMetric(int index) {
- if (metricBuilder_ == null) {
- return metric_.get(index);
- } else {
- return metricBuilder_.getMessage(index);
- }
- }
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public Builder setMetric(
- int index, io.prometheus.client.Metrics.Metric value) {
- if (metricBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureMetricIsMutable();
- metric_.set(index, value);
- onChanged();
- } else {
- metricBuilder_.setMessage(index, value);
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public Builder setMetric(
- int index, io.prometheus.client.Metrics.Metric.Builder builderForValue) {
- if (metricBuilder_ == null) {
- ensureMetricIsMutable();
- metric_.set(index, builderForValue.build());
- onChanged();
- } else {
- metricBuilder_.setMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public Builder addMetric(io.prometheus.client.Metrics.Metric value) {
- if (metricBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureMetricIsMutable();
- metric_.add(value);
- onChanged();
- } else {
- metricBuilder_.addMessage(value);
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public Builder addMetric(
- int index, io.prometheus.client.Metrics.Metric value) {
- if (metricBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureMetricIsMutable();
- metric_.add(index, value);
- onChanged();
- } else {
- metricBuilder_.addMessage(index, value);
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public Builder addMetric(
- io.prometheus.client.Metrics.Metric.Builder builderForValue) {
- if (metricBuilder_ == null) {
- ensureMetricIsMutable();
- metric_.add(builderForValue.build());
- onChanged();
- } else {
- metricBuilder_.addMessage(builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public Builder addMetric(
- int index, io.prometheus.client.Metrics.Metric.Builder builderForValue) {
- if (metricBuilder_ == null) {
- ensureMetricIsMutable();
- metric_.add(index, builderForValue.build());
- onChanged();
- } else {
- metricBuilder_.addMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public Builder addAllMetric(
- java.lang.Iterable<? extends io.prometheus.client.Metrics.Metric> values) {
- if (metricBuilder_ == null) {
- ensureMetricIsMutable();
- com.google.protobuf.AbstractMessageLite.Builder.addAll(
- values, metric_);
- onChanged();
- } else {
- metricBuilder_.addAllMessages(values);
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public Builder clearMetric() {
- if (metricBuilder_ == null) {
- metric_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000008);
- onChanged();
- } else {
- metricBuilder_.clear();
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public Builder removeMetric(int index) {
- if (metricBuilder_ == null) {
- ensureMetricIsMutable();
- metric_.remove(index);
- onChanged();
- } else {
- metricBuilder_.remove(index);
- }
- return this;
- }
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public io.prometheus.client.Metrics.Metric.Builder getMetricBuilder(
- int index) {
- return getMetricFieldBuilder().getBuilder(index);
- }
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public io.prometheus.client.Metrics.MetricOrBuilder getMetricOrBuilder(
- int index) {
- if (metricBuilder_ == null) {
- return metric_.get(index); } else {
- return metricBuilder_.getMessageOrBuilder(index);
- }
- }
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public java.util.List<? extends io.prometheus.client.Metrics.MetricOrBuilder>
- getMetricOrBuilderList() {
- if (metricBuilder_ != null) {
- return metricBuilder_.getMessageOrBuilderList();
- } else {
- return java.util.Collections.unmodifiableList(metric_);
- }
- }
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public io.prometheus.client.Metrics.Metric.Builder addMetricBuilder() {
- return getMetricFieldBuilder().addBuilder(
- io.prometheus.client.Metrics.Metric.getDefaultInstance());
- }
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public io.prometheus.client.Metrics.Metric.Builder addMetricBuilder(
- int index) {
- return getMetricFieldBuilder().addBuilder(
- index, io.prometheus.client.Metrics.Metric.getDefaultInstance());
- }
- /**
- * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
- */
- public java.util.List<io.prometheus.client.Metrics.Metric.Builder>
- getMetricBuilderList() {
- return getMetricFieldBuilder().getBuilderList();
- }
- private com.google.protobuf.RepeatedFieldBuilder<
- io.prometheus.client.Metrics.Metric, io.prometheus.client.Metrics.Metric.Builder, io.prometheus.client.Metrics.MetricOrBuilder>
- getMetricFieldBuilder() {
- if (metricBuilder_ == null) {
- metricBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
- io.prometheus.client.Metrics.Metric, io.prometheus.client.Metrics.Metric.Builder, io.prometheus.client.Metrics.MetricOrBuilder>(
- metric_,
- ((bitField0_ & 0x00000008) == 0x00000008),
- getParentForChildren(),
- isClean());
- metric_ = null;
- }
- return metricBuilder_;
- }
-
- // @@protoc_insertion_point(builder_scope:io.prometheus.client.MetricFamily)
- }
-
- static {
- defaultInstance = new MetricFamily(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:io.prometheus.client.MetricFamily)
- }
-
- private static final com.google.protobuf.Descriptors.Descriptor
- internal_static_io_prometheus_client_LabelPair_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_io_prometheus_client_LabelPair_fieldAccessorTable;
- private static final com.google.protobuf.Descriptors.Descriptor
- internal_static_io_prometheus_client_Gauge_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_io_prometheus_client_Gauge_fieldAccessorTable;
- private static final com.google.protobuf.Descriptors.Descriptor
- internal_static_io_prometheus_client_Counter_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_io_prometheus_client_Counter_fieldAccessorTable;
- private static final com.google.protobuf.Descriptors.Descriptor
- internal_static_io_prometheus_client_Quantile_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_io_prometheus_client_Quantile_fieldAccessorTable;
- private static final com.google.protobuf.Descriptors.Descriptor
- internal_static_io_prometheus_client_Summary_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_io_prometheus_client_Summary_fieldAccessorTable;
- private static final com.google.protobuf.Descriptors.Descriptor
- internal_static_io_prometheus_client_Untyped_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_io_prometheus_client_Untyped_fieldAccessorTable;
- private static final com.google.protobuf.Descriptors.Descriptor
- internal_static_io_prometheus_client_Histogram_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_io_prometheus_client_Histogram_fieldAccessorTable;
- private static final com.google.protobuf.Descriptors.Descriptor
- internal_static_io_prometheus_client_Bucket_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_io_prometheus_client_Bucket_fieldAccessorTable;
- private static final com.google.protobuf.Descriptors.Descriptor
- internal_static_io_prometheus_client_Metric_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_io_prometheus_client_Metric_fieldAccessorTable;
- private static final com.google.protobuf.Descriptors.Descriptor
- internal_static_io_prometheus_client_MetricFamily_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_io_prometheus_client_MetricFamily_fieldAccessorTable;
-
- public static com.google.protobuf.Descriptors.FileDescriptor
- getDescriptor() {
- return descriptor;
- }
- private static com.google.protobuf.Descriptors.FileDescriptor
- descriptor;
- static {
- java.lang.String[] descriptorData = {
- "\n\rmetrics.proto\022\024io.prometheus.client\"(\n" +
- "\tLabelPair\022\014\n\004name\030\001 \001(\t\022\r\n\005value\030\002 \001(\t\"" +
- "\026\n\005Gauge\022\r\n\005value\030\001 \001(\001\"\030\n\007Counter\022\r\n\005va" +
- "lue\030\001 \001(\001\"+\n\010Quantile\022\020\n\010quantile\030\001 \001(\001\022" +
- "\r\n\005value\030\002 \001(\001\"e\n\007Summary\022\024\n\014sample_coun" +
- "t\030\001 \001(\004\022\022\n\nsample_sum\030\002 \001(\001\0220\n\010quantile\030" +
- "\003 \003(\0132\036.io.prometheus.client.Quantile\"\030\n" +
- "\007Untyped\022\r\n\005value\030\001 \001(\001\"c\n\tHistogram\022\024\n\014" +
- "sample_count\030\001 \001(\004\022\022\n\nsample_sum\030\002 \001(\001\022," +
- "\n\006bucket\030\003 \003(\0132\034.io.prometheus.client.Bu",
- "cket\"7\n\006Bucket\022\030\n\020cumulative_count\030\001 \001(\004" +
- "\022\023\n\013upper_bound\030\002 \001(\001\"\276\002\n\006Metric\022.\n\005labe" +
- "l\030\001 \003(\0132\037.io.prometheus.client.LabelPair" +
- "\022*\n\005gauge\030\002 \001(\0132\033.io.prometheus.client.G" +
- "auge\022.\n\007counter\030\003 \001(\0132\035.io.prometheus.cl" +
- "ient.Counter\022.\n\007summary\030\004 \001(\0132\035.io.prome" +
- "theus.client.Summary\022.\n\007untyped\030\005 \001(\0132\035." +
- "io.prometheus.client.Untyped\0222\n\thistogra" +
- "m\030\007 \001(\0132\037.io.prometheus.client.Histogram" +
- "\022\024\n\014timestamp_ms\030\006 \001(\003\"\210\001\n\014MetricFamily\022",
- "\014\n\004name\030\001 \001(\t\022\014\n\004help\030\002 \001(\t\022.\n\004type\030\003 \001(" +
- "\0162 .io.prometheus.client.MetricType\022,\n\006m" +
- "etric\030\004 \003(\0132\034.io.prometheus.client.Metri" +
- "c*M\n\nMetricType\022\013\n\007COUNTER\020\000\022\t\n\005GAUGE\020\001\022" +
- "\013\n\007SUMMARY\020\002\022\013\n\007UNTYPED\020\003\022\r\n\tHISTOGRAM\020\004" +
- "B\026\n\024io.prometheus.client"
- };
- com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
- new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() {
- public com.google.protobuf.ExtensionRegistry assignDescriptors(
- com.google.protobuf.Descriptors.FileDescriptor root) {
- descriptor = root;
- return null;
- }
- };
- com.google.protobuf.Descriptors.FileDescriptor
- .internalBuildGeneratedFileFrom(descriptorData,
- new com.google.protobuf.Descriptors.FileDescriptor[] {
- }, assigner);
- internal_static_io_prometheus_client_LabelPair_descriptor =
- getDescriptor().getMessageTypes().get(0);
- internal_static_io_prometheus_client_LabelPair_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_io_prometheus_client_LabelPair_descriptor,
- new java.lang.String[] { "Name", "Value", });
- internal_static_io_prometheus_client_Gauge_descriptor =
- getDescriptor().getMessageTypes().get(1);
- internal_static_io_prometheus_client_Gauge_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_io_prometheus_client_Gauge_descriptor,
- new java.lang.String[] { "Value", });
- internal_static_io_prometheus_client_Counter_descriptor =
- getDescriptor().getMessageTypes().get(2);
- internal_static_io_prometheus_client_Counter_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_io_prometheus_client_Counter_descriptor,
- new java.lang.String[] { "Value", });
- internal_static_io_prometheus_client_Quantile_descriptor =
- getDescriptor().getMessageTypes().get(3);
- internal_static_io_prometheus_client_Quantile_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_io_prometheus_client_Quantile_descriptor,
- new java.lang.String[] { "Quantile", "Value", });
- internal_static_io_prometheus_client_Summary_descriptor =
- getDescriptor().getMessageTypes().get(4);
- internal_static_io_prometheus_client_Summary_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_io_prometheus_client_Summary_descriptor,
- new java.lang.String[] { "SampleCount", "SampleSum", "Quantile", });
- internal_static_io_prometheus_client_Untyped_descriptor =
- getDescriptor().getMessageTypes().get(5);
- internal_static_io_prometheus_client_Untyped_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_io_prometheus_client_Untyped_descriptor,
- new java.lang.String[] { "Value", });
- internal_static_io_prometheus_client_Histogram_descriptor =
- getDescriptor().getMessageTypes().get(6);
- internal_static_io_prometheus_client_Histogram_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_io_prometheus_client_Histogram_descriptor,
- new java.lang.String[] { "SampleCount", "SampleSum", "Bucket", });
- internal_static_io_prometheus_client_Bucket_descriptor =
- getDescriptor().getMessageTypes().get(7);
- internal_static_io_prometheus_client_Bucket_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_io_prometheus_client_Bucket_descriptor,
- new java.lang.String[] { "CumulativeCount", "UpperBound", });
- internal_static_io_prometheus_client_Metric_descriptor =
- getDescriptor().getMessageTypes().get(8);
- internal_static_io_prometheus_client_Metric_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_io_prometheus_client_Metric_descriptor,
- new java.lang.String[] { "Label", "Gauge", "Counter", "Summary", "Untyped", "Histogram", "TimestampMs", });
- internal_static_io_prometheus_client_MetricFamily_descriptor =
- getDescriptor().getMessageTypes().get(9);
- internal_static_io_prometheus_client_MetricFamily_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_io_prometheus_client_MetricFamily_descriptor,
- new java.lang.String[] { "Name", "Help", "Type", "Metric", });
- }
-
- // @@protoc_insertion_point(outer_class_scope)
-}
diff --git a/vendor/github.com/prometheus/procfs/fixtures/net/rpc/nfs b/vendor/github.com/prometheus/procfs/fixtures/net/rpc/nfs
new file mode 100644
index 000000000..2e58e0544
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/net/rpc/nfs
@@ -0,0 +1,5 @@
+net 18628 0 18628 6
+rpc 4329785 0 4338291
+proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
+proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39
+proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
diff --git a/vendor/github.com/prometheus/procfs/fixtures/net/rpc/nfsd b/vendor/github.com/prometheus/procfs/fixtures/net/rpc/nfsd
new file mode 100644
index 000000000..4e8565f41
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/net/rpc/nfsd
@@ -0,0 +1,11 @@
+rc 0 6 18622
+fh 0 0 0 0 0
+io 157286400 0
+th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
+ra 32 0 0 0 0 0 0 0 0 0 0 0
+net 18628 0 18628 6
+rpc 18628 0 0 0 0
+proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
+proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0
+proc4 2 2 10853
+proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
index 17546756b..36c1586a1 100644
--- a/vendor/github.com/prometheus/procfs/fs.go
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -5,6 +5,7 @@ import (
"os"
"path"
+ "github.com/prometheus/procfs/nfs"
"github.com/prometheus/procfs/xfs"
)
@@ -44,3 +45,25 @@ func (fs FS) XFSStats() (*xfs.Stats, error) {
return xfs.ParseStats(f)
}
+
+// NFSdClientRPCStats retrieves NFS daemon RPC statistics.
+func (fs FS) NFSdClientRPCStats() (*nfs.ClientRPCStats, error) {
+ f, err := os.Open(fs.Path("net/rpc/nfs"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return nfs.ParseClientRPCStats(f)
+}
+
+// NFSdServerRPCStats retrieves NFS daemon RPC statistics.
+func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) {
+ f, err := os.Open(fs.Path("net/rpc/nfsd"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return nfs.ParseServerRPCStats(f)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
new file mode 100644
index 000000000..1ad21c91a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go
@@ -0,0 +1,46 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import "strconv"
+
+// ParseUint32s parses a slice of strings into a slice of uint32s.
+func ParseUint32s(ss []string) ([]uint32, error) {
+ us := make([]uint32, 0, len(ss))
+ for _, s := range ss {
+ u, err := strconv.ParseUint(s, 10, 32)
+ if err != nil {
+ return nil, err
+ }
+
+ us = append(us, uint32(u))
+ }
+
+ return us, nil
+}
+
+// ParseUint64s parses a slice of strings into a slice of uint64s.
+func ParseUint64s(ss []string) ([]uint64, error) {
+ us := make([]uint64, 0, len(ss))
+ for _, s := range ss {
+ u, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ us = append(us, u)
+ }
+
+ return us, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/nfs/nfs.go b/vendor/github.com/prometheus/procfs/nfs/nfs.go
new file mode 100644
index 000000000..e2185b782
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/nfs/nfs.go
@@ -0,0 +1,263 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package nfsd implements parsing of /proc/net/rpc/nfsd.
+// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/
+package nfs
+
+// ReplyCache models the "rc" line.
+type ReplyCache struct {
+ Hits uint64
+ Misses uint64
+ NoCache uint64
+}
+
+// FileHandles models the "fh" line.
+type FileHandles struct {
+ Stale uint64
+ TotalLookups uint64
+ AnonLookups uint64
+ DirNoCache uint64
+ NoDirNoCache uint64
+}
+
+// InputOutput models the "io" line.
+type InputOutput struct {
+ Read uint64
+ Write uint64
+}
+
+// Threads models the "th" line.
+type Threads struct {
+ Threads uint64
+ FullCnt uint64
+}
+
+// ReadAheadCache models the "ra" line.
+type ReadAheadCache struct {
+ CacheSize uint64
+ CacheHistogram []uint64
+ NotFound uint64
+}
+
+// Network models the "net" line.
+type Network struct {
+ NetCount uint64
+ UDPCount uint64
+ TCPCount uint64
+ TCPConnect uint64
+}
+
+// ClientRPC models the nfs "rpc" line.
+type ClientRPC struct {
+ RPCCount uint64
+ Retransmissions uint64
+ AuthRefreshes uint64
+}
+
+// ServerRPC models the nfsd "rpc" line.
+type ServerRPC struct {
+ RPCCount uint64
+ BadCnt uint64
+ BadFmt uint64
+ BadAuth uint64
+ BadcInt uint64
+}
+
+// V2Stats models the "proc2" line.
+type V2Stats struct {
+ Null uint64
+ GetAttr uint64
+ SetAttr uint64
+ Root uint64
+ Lookup uint64
+ ReadLink uint64
+ Read uint64
+ WrCache uint64
+ Write uint64
+ Create uint64
+ Remove uint64
+ Rename uint64
+ Link uint64
+ SymLink uint64
+ MkDir uint64
+ RmDir uint64
+ ReadDir uint64
+ FsStat uint64
+}
+
+// V3Stats models the "proc3" line.
+type V3Stats struct {
+ Null uint64
+ GetAttr uint64
+ SetAttr uint64
+ Lookup uint64
+ Access uint64
+ ReadLink uint64
+ Read uint64
+ Write uint64
+ Create uint64
+ MkDir uint64
+ SymLink uint64
+ MkNod uint64
+ Remove uint64
+ RmDir uint64
+ Rename uint64
+ Link uint64
+ ReadDir uint64
+ ReadDirPlus uint64
+ FsStat uint64
+ FsInfo uint64
+ PathConf uint64
+ Commit uint64
+}
+
+// ClientV4Stats models the nfs "proc4" line.
+type ClientV4Stats struct {
+ Null uint64
+ Read uint64
+ Write uint64
+ Commit uint64
+ Open uint64
+ OpenConfirm uint64
+ OpenNoattr uint64
+ OpenDowngrade uint64
+ Close uint64
+ Setattr uint64
+ FsInfo uint64
+ Renew uint64
+ SetClientId uint64
+ SetClientIdConfirm uint64
+ Lock uint64
+ Lockt uint64
+ Locku uint64
+ Access uint64
+ Getattr uint64
+ Lookup uint64
+ LookupRoot uint64
+ Remove uint64
+ Rename uint64
+ Link uint64
+ Symlink uint64
+ Create uint64
+ Pathconf uint64
+ StatFs uint64
+ ReadLink uint64
+ ReadDir uint64
+ ServerCaps uint64
+ DelegReturn uint64
+ GetAcl uint64
+ SetAcl uint64
+ FsLocations uint64
+ ReleaseLockowner uint64
+ Secinfo uint64
+ FsidPresent uint64
+ ExchangeId uint64
+ CreateSession uint64
+ DestroySession uint64
+ Sequence uint64
+ GetLeaseTime uint64
+ ReclaimComplete uint64
+ LayoutGet uint64
+ GetDeviceInfo uint64
+ LayoutCommit uint64
+ LayoutReturn uint64
+ SecinfoNoName uint64
+ TestStateId uint64
+ FreeStateId uint64
+ GetDeviceList uint64
+ BindConnToSession uint64
+ DestroyClientId uint64
+ Seek uint64
+ Allocate uint64
+ DeAllocate uint64
+ LayoutStats uint64
+ Clone uint64
+}
+
+// ServerV4Stats models the nfsd "proc4" line.
+type ServerV4Stats struct {
+ Null uint64
+ Compound uint64
+}
+
+// V4Ops models the "proc4ops" line: NFSv4 operations
+// Variable list, see:
+// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations)
+// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations)
+// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations)
+type V4Ops struct {
+ //Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct?
+ Op0Unused uint64
+ Op1Unused uint64
+ Op2Future uint64
+ Access uint64
+ Close uint64
+ Commit uint64
+ Create uint64
+ DelegPurge uint64
+ DelegReturn uint64
+ GetAttr uint64
+ GetFH uint64
+ Link uint64
+ Lock uint64
+ Lockt uint64
+ Locku uint64
+ Lookup uint64
+ LookupRoot uint64
+ Nverify uint64
+ Open uint64
+ OpenAttr uint64
+ OpenConfirm uint64
+ OpenDgrd uint64
+ PutFH uint64
+ PutPubFH uint64
+ PutRootFH uint64
+ Read uint64
+ ReadDir uint64
+ ReadLink uint64
+ Remove uint64
+ Rename uint64
+ Renew uint64
+ RestoreFH uint64
+ SaveFH uint64
+ SecInfo uint64
+ SetAttr uint64
+ Verify uint64
+ Write uint64
+ RelLockOwner uint64
+}
+
+// RPCStats models all stats from /proc/net/rpc/nfs.
+type ClientRPCStats struct {
+ Network Network
+ ClientRPC ClientRPC
+ V2Stats V2Stats
+ V3Stats V3Stats
+ ClientV4Stats ClientV4Stats
+}
+
+// ServerRPCStats models all stats from /proc/net/rpc/nfsd.
+type ServerRPCStats struct {
+ ReplyCache ReplyCache
+ FileHandles FileHandles
+ InputOutput InputOutput
+ Threads Threads
+ ReadAheadCache ReadAheadCache
+ Network Network
+ ServerRPC ServerRPC
+ V2Stats V2Stats
+ V3Stats V3Stats
+ ServerV4Stats ServerV4Stats
+ V4Ops V4Ops
+}
diff --git a/vendor/github.com/prometheus/procfs/nfs/parse.go b/vendor/github.com/prometheus/procfs/nfs/parse.go
new file mode 100644
index 000000000..3aa32563a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/nfs/parse.go
@@ -0,0 +1,308 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package nfs
+
+import (
+ "fmt"
+)
+
+func parseReplyCache(v []uint64) (ReplyCache, error) {
+ if len(v) != 3 {
+ return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v)
+ }
+
+ return ReplyCache{
+ Hits: v[0],
+ Misses: v[1],
+ NoCache: v[2],
+ }, nil
+}
+
+func parseFileHandles(v []uint64) (FileHandles, error) {
+ if len(v) != 5 {
+ return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v)
+ }
+
+ return FileHandles{
+ Stale: v[0],
+ TotalLookups: v[1],
+ AnonLookups: v[2],
+ DirNoCache: v[3],
+ NoDirNoCache: v[4],
+ }, nil
+}
+
+func parseInputOutput(v []uint64) (InputOutput, error) {
+ if len(v) != 2 {
+ return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v)
+ }
+
+ return InputOutput{
+ Read: v[0],
+ Write: v[1],
+ }, nil
+}
+
+func parseThreads(v []uint64) (Threads, error) {
+ if len(v) != 2 {
+ return Threads{}, fmt.Errorf("invalid Threads line %q", v)
+ }
+
+ return Threads{
+ Threads: v[0],
+ FullCnt: v[1],
+ }, nil
+}
+
+func parseReadAheadCache(v []uint64) (ReadAheadCache, error) {
+ if len(v) != 12 {
+ return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v)
+ }
+
+ return ReadAheadCache{
+ CacheSize: v[0],
+ CacheHistogram: v[1:11],
+ NotFound: v[11],
+ }, nil
+}
+
+func parseNetwork(v []uint64) (Network, error) {
+ if len(v) != 4 {
+ return Network{}, fmt.Errorf("invalid Network line %q", v)
+ }
+
+ return Network{
+ NetCount: v[0],
+ UDPCount: v[1],
+ TCPCount: v[2],
+ TCPConnect: v[3],
+ }, nil
+}
+
+func parseServerRPC(v []uint64) (ServerRPC, error) {
+ if len(v) != 5 {
+ return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v)
+ }
+
+ return ServerRPC{
+ RPCCount: v[0],
+ BadCnt: v[1],
+ BadFmt: v[2],
+ BadAuth: v[3],
+ BadcInt: v[4],
+ }, nil
+}
+
+func parseClientRPC(v []uint64) (ClientRPC, error) {
+ if len(v) != 3 {
+ return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v)
+ }
+
+ return ClientRPC{
+ RPCCount: v[0],
+ Retransmissions: v[1],
+ AuthRefreshes: v[2],
+ }, nil
+}
+
+func parseV2Stats(v []uint64) (V2Stats, error) {
+ values := int(v[0])
+ if len(v[1:]) != values || values != 18 {
+ return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v)
+ }
+
+ return V2Stats{
+ Null: v[1],
+ GetAttr: v[2],
+ SetAttr: v[3],
+ Root: v[4],
+ Lookup: v[5],
+ ReadLink: v[6],
+ Read: v[7],
+ WrCache: v[8],
+ Write: v[9],
+ Create: v[10],
+ Remove: v[11],
+ Rename: v[12],
+ Link: v[13],
+ SymLink: v[14],
+ MkDir: v[15],
+ RmDir: v[16],
+ ReadDir: v[17],
+ FsStat: v[18],
+ }, nil
+}
+
+func parseV3Stats(v []uint64) (V3Stats, error) {
+ values := int(v[0])
+ if len(v[1:]) != values || values != 22 {
+ return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v)
+ }
+
+ return V3Stats{
+ Null: v[1],
+ GetAttr: v[2],
+ SetAttr: v[3],
+ Lookup: v[4],
+ Access: v[5],
+ ReadLink: v[6],
+ Read: v[7],
+ Write: v[8],
+ Create: v[9],
+ MkDir: v[10],
+ SymLink: v[11],
+ MkNod: v[12],
+ Remove: v[13],
+ RmDir: v[14],
+ Rename: v[15],
+ Link: v[16],
+ ReadDir: v[17],
+ ReadDirPlus: v[18],
+ FsStat: v[19],
+ FsInfo: v[20],
+ PathConf: v[21],
+ Commit: v[22],
+ }, nil
+}
+
+func parseClientV4Stats(v []uint64) (ClientV4Stats, error) {
+ values := int(v[0])
+ if len(v[1:]) != values || values < 59 {
+ return ClientV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v)
+ }
+
+ return ClientV4Stats{
+ Null: v[1],
+ Read: v[2],
+ Write: v[3],
+ Commit: v[4],
+ Open: v[5],
+ OpenConfirm: v[6],
+ OpenNoattr: v[7],
+ OpenDowngrade: v[8],
+ Close: v[9],
+ Setattr: v[10],
+ FsInfo: v[11],
+ Renew: v[12],
+ SetClientId: v[13],
+ SetClientIdConfirm: v[14],
+ Lock: v[15],
+ Lockt: v[16],
+ Locku: v[17],
+ Access: v[18],
+ Getattr: v[19],
+ Lookup: v[20],
+ LookupRoot: v[21],
+ Remove: v[22],
+ Rename: v[23],
+ Link: v[24],
+ Symlink: v[25],
+ Create: v[26],
+ Pathconf: v[27],
+ StatFs: v[28],
+ ReadLink: v[29],
+ ReadDir: v[30],
+ ServerCaps: v[31],
+ DelegReturn: v[32],
+ GetAcl: v[33],
+ SetAcl: v[34],
+ FsLocations: v[35],
+ ReleaseLockowner: v[36],
+ Secinfo: v[37],
+ FsidPresent: v[38],
+ ExchangeId: v[39],
+ CreateSession: v[40],
+ DestroySession: v[41],
+ Sequence: v[42],
+ GetLeaseTime: v[43],
+ ReclaimComplete: v[44],
+ LayoutGet: v[45],
+ GetDeviceInfo: v[46],
+ LayoutCommit: v[47],
+ LayoutReturn: v[48],
+ SecinfoNoName: v[49],
+ TestStateId: v[50],
+ FreeStateId: v[51],
+ GetDeviceList: v[52],
+ BindConnToSession: v[53],
+ DestroyClientId: v[54],
+ Seek: v[55],
+ Allocate: v[56],
+ DeAllocate: v[57],
+ LayoutStats: v[58],
+ Clone: v[59],
+ }, nil
+}
+
+func parseServerV4Stats(v []uint64) (ServerV4Stats, error) {
+ values := int(v[0])
+ if len(v[1:]) != values || values != 2 {
+ return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v)
+ }
+
+ return ServerV4Stats{
+ Null: v[1],
+ Compound: v[2],
+ }, nil
+}
+
+func parseV4Ops(v []uint64) (V4Ops, error) {
+ values := int(v[0])
+ if len(v[1:]) != values || values < 39 {
+ return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v)
+ }
+
+ stats := V4Ops{
+ Op0Unused: v[1],
+ Op1Unused: v[2],
+ Op2Future: v[3],
+ Access: v[4],
+ Close: v[5],
+ Commit: v[6],
+ Create: v[7],
+ DelegPurge: v[8],
+ DelegReturn: v[9],
+ GetAttr: v[10],
+ GetFH: v[11],
+ Link: v[12],
+ Lock: v[13],
+ Lockt: v[14],
+ Locku: v[15],
+ Lookup: v[16],
+ LookupRoot: v[17],
+ Nverify: v[18],
+ Open: v[19],
+ OpenAttr: v[20],
+ OpenConfirm: v[21],
+ OpenDgrd: v[22],
+ PutFH: v[23],
+ PutPubFH: v[24],
+ PutRootFH: v[25],
+ Read: v[26],
+ ReadDir: v[27],
+ ReadLink: v[28],
+ Remove: v[29],
+ Rename: v[30],
+ Renew: v[31],
+ RestoreFH: v[32],
+ SaveFH: v[33],
+ SecInfo: v[34],
+ SetAttr: v[35],
+ Verify: v[36],
+ Write: v[37],
+ RelLockOwner: v[38],
+ }
+
+ return stats, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go
new file mode 100644
index 000000000..b5c0b15f3
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go
@@ -0,0 +1,67 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package nfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs
+func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) {
+ stats := &ClientRPCStats{}
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.Fields(scanner.Text())
+ // require at least <key> <value>
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("invalid NFSd metric line %q", line)
+ }
+
+ values, err := util.ParseUint64s(parts[1:])
+ if err != nil {
+ return nil, fmt.Errorf("error parsing NFSd metric line: %s", err)
+ }
+
+ switch metricLine := parts[0]; metricLine {
+ case "net":
+ stats.Network, err = parseNetwork(values)
+ case "rpc":
+ stats.ClientRPC, err = parseClientRPC(values)
+ case "proc2":
+ stats.V2Stats, err = parseV2Stats(values)
+ case "proc3":
+ stats.V3Stats, err = parseV3Stats(values)
+ case "proc4":
+ stats.ClientV4Stats, err = parseClientV4Stats(values)
+ default:
+ return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err)
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("error scanning NFSd file: %s", err)
+ }
+
+ return stats, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs_test.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs_test.go
new file mode 100644
index 000000000..d821f1b4c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfs_test.go
@@ -0,0 +1,180 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package nfs_test
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/prometheus/procfs/nfs"
+)
+
+func TestNewNFSdClientRPCStats(t *testing.T) {
+ tests := []struct {
+ name string
+ content string
+ stats *nfs.ClientRPCStats
+ invalid bool
+ }{
+ {
+ name: "invalid file",
+ content: "invalid",
+ invalid: true,
+ }, {
+ name: "good file",
+ content: `net 18628 0 18628 6
+rpc 4329785 0 4338291
+proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
+proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39
+proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+`,
+ stats: &nfs.ClientRPCStats{
+ Network: nfs.Network{
+ NetCount: 18628,
+ UDPCount: 0,
+ TCPCount: 18628,
+ TCPConnect: 6,
+ },
+ ClientRPC: nfs.ClientRPC{
+ RPCCount: 4329785,
+ Retransmissions: 0,
+ AuthRefreshes: 4338291,
+ },
+ V2Stats: nfs.V2Stats{
+ Null: 2,
+ GetAttr: 69,
+ SetAttr: 0,
+ Root: 0,
+ Lookup: 4410,
+ ReadLink: 0,
+ Read: 0,
+ WrCache: 0,
+ Write: 0,
+ Create: 0,
+ Remove: 0,
+ Rename: 0,
+ Link: 0,
+ SymLink: 0,
+ MkDir: 0,
+ RmDir: 0,
+ ReadDir: 99,
+ FsStat: 2,
+ },
+ V3Stats: nfs.V3Stats{
+ Null: 1,
+ GetAttr: 4084749,
+ SetAttr: 29200,
+ Lookup: 94754,
+ Access: 32580,
+ ReadLink: 186,
+ Read: 47747,
+ Write: 7981,
+ Create: 8639,
+ MkDir: 0,
+ SymLink: 6356,
+ MkNod: 0,
+ Remove: 6962,
+ RmDir: 0,
+ Rename: 7958,
+ Link: 0,
+ ReadDir: 0,
+ ReadDirPlus: 241,
+ FsStat: 4,
+ FsInfo: 4,
+ PathConf: 2,
+ Commit: 39,
+ },
+ ClientV4Stats: nfs.ClientV4Stats{
+ Null: 1,
+ Read: 0,
+ Write: 0,
+ Commit: 0,
+ Open: 0,
+ OpenConfirm: 0,
+ OpenNoattr: 0,
+ OpenDowngrade: 0,
+ Close: 0,
+ Setattr: 0,
+ FsInfo: 0,
+ Renew: 0,
+ SetClientId: 1,
+ SetClientIdConfirm: 1,
+ Lock: 0,
+ Lockt: 0,
+ Locku: 0,
+ Access: 0,
+ Getattr: 0,
+ Lookup: 0,
+ LookupRoot: 0,
+ Remove: 2,
+ Rename: 0,
+ Link: 0,
+ Symlink: 0,
+ Create: 0,
+ Pathconf: 0,
+ StatFs: 0,
+ ReadLink: 0,
+ ReadDir: 0,
+ ServerCaps: 0,
+ DelegReturn: 0,
+ GetAcl: 0,
+ SetAcl: 0,
+ FsLocations: 0,
+ ReleaseLockowner: 0,
+ Secinfo: 0,
+ FsidPresent: 0,
+ ExchangeId: 0,
+ CreateSession: 0,
+ DestroySession: 0,
+ Sequence: 0,
+ GetLeaseTime: 0,
+ ReclaimComplete: 0,
+ LayoutGet: 0,
+ GetDeviceInfo: 0,
+ LayoutCommit: 0,
+ LayoutReturn: 0,
+ SecinfoNoName: 0,
+ TestStateId: 0,
+ FreeStateId: 0,
+ GetDeviceList: 0,
+ BindConnToSession: 0,
+ DestroyClientId: 0,
+ Seek: 0,
+ Allocate: 0,
+ DeAllocate: 0,
+ LayoutStats: 0,
+ Clone: 0,
+ },
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ stats, err := nfs.ParseClientRPCStats(strings.NewReader(tt.content))
+
+ if tt.invalid && err == nil {
+ t.Fatal("expected an error, but none occurred")
+ }
+ if !tt.invalid && err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+
+ if want, have := tt.stats, stats; !reflect.DeepEqual(want, have) {
+ t.Fatalf("unexpected NFS stats:\nwant:\n%v\nhave:\n%v", want, have)
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go
new file mode 100644
index 000000000..57bb4a358
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go
@@ -0,0 +1,89 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package nfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd
+func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) {
+ stats := &ServerRPCStats{}
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.Fields(scanner.Text())
+ // require at least <key> <value>
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("invalid NFSd metric line %q", line)
+ }
+ label := parts[0]
+
+ var values []uint64
+ var err error
+ if label == "th" {
+ if len(parts) < 3 {
+ return nil, fmt.Errorf("invalid NFSd th metric line %q", line)
+ }
+ values, err = util.ParseUint64s(parts[1:3])
+ } else {
+ values, err = util.ParseUint64s(parts[1:])
+ }
+ if err != nil {
+ return nil, fmt.Errorf("error parsing NFSd metric line: %s", err)
+ }
+
+ switch metricLine := parts[0]; metricLine {
+ case "rc":
+ stats.ReplyCache, err = parseReplyCache(values)
+ case "fh":
+ stats.FileHandles, err = parseFileHandles(values)
+ case "io":
+ stats.InputOutput, err = parseInputOutput(values)
+ case "th":
+ stats.Threads, err = parseThreads(values)
+ case "ra":
+ stats.ReadAheadCache, err = parseReadAheadCache(values)
+ case "net":
+ stats.Network, err = parseNetwork(values)
+ case "rpc":
+ stats.ServerRPC, err = parseServerRPC(values)
+ case "proc2":
+ stats.V2Stats, err = parseV2Stats(values)
+ case "proc3":
+ stats.V3Stats, err = parseV3Stats(values)
+ case "proc4":
+ stats.ServerV4Stats, err = parseServerV4Stats(values)
+ case "proc4ops":
+ stats.V4Ops, err = parseV4Ops(values)
+ default:
+ return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err)
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("error scanning NFSd file: %s", err)
+ }
+
+ return stats, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd_test.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd_test.go
new file mode 100644
index 000000000..b09b3b580
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd_test.go
@@ -0,0 +1,196 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package nfs_test
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/prometheus/procfs/nfs"
+)
+
+func TestNewNFSdServerRPCStats(t *testing.T) {
+ tests := []struct {
+ name string
+ content string
+ stats *nfs.ServerRPCStats
+ invalid bool
+ }{
+ {
+ name: "invalid file",
+ content: "invalid",
+ invalid: true,
+ }, {
+ name: "good file",
+ content: `rc 0 6 18622
+fh 0 0 0 0 0
+io 157286400 0
+th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
+ra 32 0 0 0 0 0 0 0 0 0 0 0
+net 18628 0 18628 6
+rpc 18628 0 0 0 0
+proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
+proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0
+proc4 2 2 10853
+proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+`,
+ stats: &nfs.ServerRPCStats{
+ ReplyCache: nfs.ReplyCache{
+ Hits: 0,
+ Misses: 6,
+ NoCache: 18622,
+ },
+ FileHandles: nfs.FileHandles{
+ Stale: 0,
+ TotalLookups: 0,
+ AnonLookups: 0,
+ DirNoCache: 0,
+ NoDirNoCache: 0,
+ },
+ InputOutput: nfs.InputOutput{
+ Read: 157286400,
+ Write: 0,
+ },
+ Threads: nfs.Threads{
+ Threads: 8,
+ FullCnt: 0,
+ },
+ ReadAheadCache: nfs.ReadAheadCache{
+ CacheSize: 32,
+ CacheHistogram: []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ NotFound: 0,
+ },
+ Network: nfs.Network{
+ NetCount: 18628,
+ UDPCount: 0,
+ TCPCount: 18628,
+ TCPConnect: 6,
+ },
+ ServerRPC: nfs.ServerRPC{
+ RPCCount: 18628,
+ BadCnt: 0,
+ BadFmt: 0,
+ BadAuth: 0,
+ BadcInt: 0,
+ },
+ V2Stats: nfs.V2Stats{
+ Null: 2,
+ GetAttr: 69,
+ SetAttr: 0,
+ Root: 0,
+ Lookup: 4410,
+ ReadLink: 0,
+ Read: 0,
+ WrCache: 0,
+ Write: 0,
+ Create: 0,
+ Remove: 0,
+ Rename: 0,
+ Link: 0,
+ SymLink: 0,
+ MkDir: 0,
+ RmDir: 0,
+ ReadDir: 99,
+ FsStat: 2,
+ },
+ V3Stats: nfs.V3Stats{
+ Null: 2,
+ GetAttr: 112,
+ SetAttr: 0,
+ Lookup: 2719,
+ Access: 111,
+ ReadLink: 0,
+ Read: 0,
+ Write: 0,
+ Create: 0,
+ MkDir: 0,
+ SymLink: 0,
+ MkNod: 0,
+ Remove: 0,
+ RmDir: 0,
+ Rename: 0,
+ Link: 0,
+ ReadDir: 27,
+ ReadDirPlus: 216,
+ FsStat: 0,
+ FsInfo: 2,
+ PathConf: 1,
+ Commit: 0,
+ },
+ ServerV4Stats: nfs.ServerV4Stats{
+ Null: 2,
+ Compound: 10853,
+ },
+ V4Ops: nfs.V4Ops{
+ Op0Unused: 0,
+ Op1Unused: 0,
+ Op2Future: 0,
+ Access: 1098,
+ Close: 2,
+ Commit: 0,
+ Create: 0,
+ DelegPurge: 0,
+ DelegReturn: 0,
+ GetAttr: 8179,
+ GetFH: 5896,
+ Link: 0,
+ Lock: 0,
+ Lockt: 0,
+ Locku: 0,
+ Lookup: 5900,
+ LookupRoot: 0,
+ Nverify: 0,
+ Open: 2,
+ OpenAttr: 0,
+ OpenConfirm: 2,
+ OpenDgrd: 0,
+ PutFH: 9609,
+ PutPubFH: 0,
+ PutRootFH: 2,
+ Read: 150,
+ ReadDir: 1272,
+ ReadLink: 0,
+ Remove: 0,
+ Rename: 0,
+ Renew: 1236,
+ RestoreFH: 0,
+ SaveFH: 0,
+ SecInfo: 0,
+ SetAttr: 0,
+ Verify: 3,
+ Write: 3,
+ RelLockOwner: 0,
+ },
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ stats, err := nfs.ParseServerRPCStats(strings.NewReader(tt.content))
+
+ if tt.invalid && err == nil {
+ t.Fatal("expected an error, but none occurred")
+ }
+ if !tt.invalid && err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+
+ if want, have := tt.stats, stats; !reflect.DeepEqual(want, have) {
+ t.Fatalf("unexpected NFS stats:\nwant:\n%v\nhave:\n%v", want, have)
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/prometheus/procfs/xfs/parse.go b/vendor/github.com/prometheus/procfs/xfs/parse.go
index c8f6279f3..2bc0ef342 100644
--- a/vendor/github.com/prometheus/procfs/xfs/parse.go
+++ b/vendor/github.com/prometheus/procfs/xfs/parse.go
@@ -17,8 +17,9 @@ import (
"bufio"
"fmt"
"io"
- "strconv"
"strings"
+
+ "github.com/prometheus/procfs/internal/util"
)
// ParseStats parses a Stats from an input io.Reader, using the format
@@ -68,7 +69,7 @@ func ParseStats(r io.Reader) (*Stats, error) {
// Extended precision counters are uint64 values.
if label == fieldXpc {
- us, err := parseUint64s(ss[1:])
+ us, err := util.ParseUint64s(ss[1:])
if err != nil {
return nil, err
}
@@ -82,7 +83,7 @@ func ParseStats(r io.Reader) (*Stats, error) {
}
// All other counters are uint32 values.
- us, err := parseUint32s(ss[1:])
+ us, err := util.ParseUint32s(ss[1:])
if err != nil {
return nil, err
}
@@ -327,33 +328,3 @@ func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) {
ReadBytes: us[2],
}, nil
}
-
-// parseUint32s parses a slice of strings into a slice of uint32s.
-func parseUint32s(ss []string) ([]uint32, error) {
- us := make([]uint32, 0, len(ss))
- for _, s := range ss {
- u, err := strconv.ParseUint(s, 10, 32)
- if err != nil {
- return nil, err
- }
-
- us = append(us, uint32(u))
- }
-
- return us, nil
-}
-
-// parseUint64s parses a slice of strings into a slice of uint64s.
-func parseUint64s(ss []string) ([]uint64, error) {
- us := make([]uint64, 0, len(ss))
- for _, s := range ss {
- u, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- return nil, err
- }
-
- us = append(us, u)
- }
-
- return us, nil
-}
diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go
index 7463887fd..4f253f481 100644
--- a/vendor/github.com/spf13/afero/util.go
+++ b/vendor/github.com/spf13/afero/util.go
@@ -20,7 +20,6 @@ import (
"bytes"
"fmt"
"io"
- "log"
"os"
"path/filepath"
"strings"
@@ -46,7 +45,7 @@ func WriteReader(fs Fs, path string, r io.Reader) (err error) {
err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
if err != nil {
if err != os.ErrExist {
- log.Panicln(err)
+ return err
}
}
}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md
index 52bd39ddb..44d777c18 100644
--- a/vendor/github.com/spf13/cobra/bash_completions.md
+++ b/vendor/github.com/spf13/cobra/bash_completions.md
@@ -173,9 +173,9 @@ hello.yml test.json
So while there are many other files in the CWD it only shows me subdirs and those with valid extensions.
-# Specifiy custom flag completion
+# Specify custom flag completion
-Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specifiy
+Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specify
a custom flag completion function with cobra.BashCompCustom:
```go
diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/helpers.go b/vendor/github.com/spf13/cobra/cobra/cmd/helpers.go
index e5b37ec72..cd94b3e31 100644
--- a/vendor/github.com/spf13/cobra/cobra/cmd/helpers.go
+++ b/vendor/github.com/spf13/cobra/cobra/cmd/helpers.go
@@ -24,7 +24,6 @@ import (
"text/template"
)
-var cmdDirs = [...]string{"cmd", "cmds", "command", "commands"}
var srcPaths []string
func init() {
@@ -128,8 +127,6 @@ func writeStringToFile(path string, s string) error {
// writeToFile writes r to file with path only
// if file/directory on given path doesn't exist.
-// If file/directory exists on given path, then
-// it terminates app and prints an appropriate error.
func writeToFile(path string, r io.Reader) error {
if exists(path) {
return fmt.Errorf("%v already exists", path)
diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/project.go b/vendor/github.com/spf13/cobra/cobra/cmd/project.go
index de1168a13..7ddb82585 100644
--- a/vendor/github.com/spf13/cobra/cobra/cmd/project.go
+++ b/vendor/github.com/spf13/cobra/cobra/cmd/project.go
@@ -17,10 +17,9 @@ type Project struct {
}
// NewProject returns Project with specified project name.
-// If projectName is blank string, it returns nil.
func NewProject(projectName string) *Project {
if projectName == "" {
- return nil
+ er("can't create project with blank name")
}
p := new(Project)
@@ -54,8 +53,6 @@ func NewProject(projectName string) *Project {
}
// findPackage returns full path to existing go package in GOPATHs.
-// findPackage returns "", if it can't find path.
-// If packageName is "", findPackage returns "".
func findPackage(packageName string) string {
if packageName == "" {
return ""
@@ -73,16 +70,29 @@ func findPackage(packageName string) string {
// NewProjectFromPath returns Project with specified absolute path to
// package.
-// If absPath is blank string or if absPath is not actually absolute,
-// it returns nil.
func NewProjectFromPath(absPath string) *Project {
- if absPath == "" || !filepath.IsAbs(absPath) {
- return nil
+ if absPath == "" {
+ er("can't create project: absPath can't be blank")
+ }
+ if !filepath.IsAbs(absPath) {
+ er("can't create project: absPath is not absolute")
+ }
+
+ // If absPath is symlink, use its destination.
+ fi, err := os.Lstat(absPath)
+ if err != nil {
+ er("can't read path info: " + err.Error())
+ }
+ if fi.Mode()&os.ModeSymlink != 0 {
+ path, err := os.Readlink(absPath)
+ if err != nil {
+ er("can't read the destination of symlink: " + err.Error())
+ }
+ absPath = path
}
p := new(Project)
- p.absPath = absPath
- p.absPath = strings.TrimSuffix(p.absPath, findCmdDir(p.absPath))
+ p.absPath = strings.TrimSuffix(absPath, findCmdDir(absPath))
p.name = filepath.ToSlash(trimSrcPath(p.absPath, p.SrcPath()))
return p
}
@@ -91,7 +101,7 @@ func NewProjectFromPath(absPath string) *Project {
func trimSrcPath(absPath, srcPath string) string {
relPath, err := filepath.Rel(srcPath, absPath)
if err != nil {
- er("Cobra supports project only within $GOPATH: " + err.Error())
+ er(err)
}
return relPath
}
@@ -101,7 +111,6 @@ func (p *Project) License() License {
if p.license.Text == "" && p.license.Name != "None" {
p.license = getLicense()
}
-
return p.license
}
@@ -111,8 +120,6 @@ func (p Project) Name() string {
}
// CmdPath returns absolute path to directory, where all commands are located.
-//
-// CmdPath returns blank string, only if p.AbsPath() is a blank string.
func (p *Project) CmdPath() string {
if p.absPath == "" {
return ""
@@ -125,8 +132,6 @@ func (p *Project) CmdPath() string {
// findCmdDir checks if base of absPath is cmd dir and returns it or
// looks for existing cmd dir in absPath.
-// If the cmd dir doesn't exist, empty, or cannot be found,
-// it returns "cmd".
func findCmdDir(absPath string) string {
if !exists(absPath) || isEmpty(absPath) {
return "cmd"
@@ -149,7 +154,7 @@ func findCmdDir(absPath string) string {
// isCmdDir checks if base of name is one of cmdDir.
func isCmdDir(name string) bool {
name = filepath.Base(name)
- for _, cmdDir := range cmdDirs {
+ for _, cmdDir := range []string{"cmd", "cmds", "command", "commands"} {
if name == cmdDir {
return true
}
diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go
index 250a43814..aa126e44d 100644
--- a/vendor/github.com/spf13/pflag/count.go
+++ b/vendor/github.com/spf13/pflag/count.go
@@ -11,13 +11,13 @@ func newCountValue(val int, p *int) *countValue {
}
func (i *countValue) Set(s string) error {
- v, err := strconv.ParseInt(s, 0, 64)
- // -1 means that no specific value was passed, so increment
- if v == -1 {
+ // "+1" means that no specific value was passed, so increment
+ if s == "+1" {
*i = countValue(*i + 1)
- } else {
- *i = countValue(v)
+ return nil
}
+ v, err := strconv.ParseInt(s, 0, 0)
+ *i = countValue(v)
return err
}
@@ -54,7 +54,7 @@ func (f *FlagSet) CountVar(p *int, name string, usage string) {
// CountVarP is like CountVar only take a shorthand for the flag name.
func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) {
flag := f.VarPF(newCountValue(0, p), name, shorthand, usage)
- flag.NoOptDefVal = "-1"
+ flag.NoOptDefVal = "+1"
}
// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set
diff --git a/vendor/github.com/spf13/pflag/count_test.go b/vendor/github.com/spf13/pflag/count_test.go
index 460d96a6f..3785d375a 100644
--- a/vendor/github.com/spf13/pflag/count_test.go
+++ b/vendor/github.com/spf13/pflag/count_test.go
@@ -17,10 +17,14 @@ func TestCount(t *testing.T) {
success bool
expected int
}{
+ {[]string{}, true, 0},
+ {[]string{"-v"}, true, 1},
{[]string{"-vvv"}, true, 3},
{[]string{"-v", "-v", "-v"}, true, 3},
{[]string{"-v", "--verbose", "-v"}, true, 3},
{[]string{"-v=3", "-v"}, true, 4},
+ {[]string{"--verbose=0"}, true, 0},
+ {[]string{"-v=0"}, true, 0},
{[]string{"-v=a"}, false, 0},
}
@@ -45,7 +49,7 @@ func TestCount(t *testing.T) {
t.Errorf("Got error trying to fetch the counter flag")
}
if c != tc.expected {
- t.Errorf("expected %q, got %q", tc.expected, c)
+ t.Errorf("expected %d, got %d", tc.expected, c)
}
}
}
diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go
index 6f1fc3007..28538c075 100644
--- a/vendor/github.com/spf13/pflag/flag.go
+++ b/vendor/github.com/spf13/pflag/flag.go
@@ -202,12 +202,18 @@ func sortFlags(flags map[NormalizedName]*Flag) []*Flag {
func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) {
f.normalizeNameFunc = n
f.sortedFormal = f.sortedFormal[:0]
- for k, v := range f.orderedFormal {
- delete(f.formal, NormalizedName(v.Name))
- nname := f.normalizeFlagName(v.Name)
- v.Name = string(nname)
- f.formal[nname] = v
- f.orderedFormal[k] = v
+ for fname, flag := range f.formal {
+ nname := f.normalizeFlagName(flag.Name)
+ if fname == nname {
+ continue
+ }
+ flag.Name = string(nname)
+ delete(f.formal, fname)
+ f.formal[nname] = flag
+ if _, set := f.actual[fname]; set {
+ delete(f.actual, fname)
+ f.actual[nname] = flag
+ }
}
}
@@ -440,13 +446,15 @@ func (f *FlagSet) Set(name, value string) error {
return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err)
}
- if f.actual == nil {
- f.actual = make(map[NormalizedName]*Flag)
- }
- f.actual[normalName] = flag
- f.orderedActual = append(f.orderedActual, flag)
+ if !flag.Changed {
+ if f.actual == nil {
+ f.actual = make(map[NormalizedName]*Flag)
+ }
+ f.actual[normalName] = flag
+ f.orderedActual = append(f.orderedActual, flag)
- flag.Changed = true
+ flag.Changed = true
+ }
if flag.Deprecated != "" {
fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
@@ -556,6 +564,14 @@ func UnquoteUsage(flag *Flag) (name string, usage string) {
name = "int"
case "uint64":
name = "uint"
+ case "stringSlice":
+ name = "strings"
+ case "intSlice":
+ name = "ints"
+ case "uintSlice":
+ name = "uints"
+ case "boolSlice":
+ name = "bools"
}
return
@@ -660,6 +676,10 @@ func (f *FlagSet) FlagUsagesWrapped(cols int) string {
if flag.NoOptDefVal != "true" {
line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
}
+ case "count":
+ if flag.NoOptDefVal != "+1" {
+ line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+ }
default:
line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
}
@@ -857,8 +877,10 @@ func VarP(value Value, name, shorthand, usage string) {
// returns the error.
func (f *FlagSet) failf(format string, a ...interface{}) error {
err := fmt.Errorf(format, a...)
- fmt.Fprintln(f.out(), err)
- f.usage()
+ if f.errorHandling != ContinueOnError {
+ fmt.Fprintln(f.out(), err)
+ f.usage()
+ }
return err
}
@@ -912,6 +934,9 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin
}
err = fn(flag, value)
+ if err != nil {
+ f.failf(err.Error())
+ }
return
}
@@ -962,6 +987,9 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse
}
err = fn(flag, value)
+ if err != nil {
+ f.failf(err.Error())
+ }
return
}
@@ -1034,6 +1062,7 @@ func (f *FlagSet) Parse(arguments []string) error {
case ContinueOnError:
return err
case ExitOnError:
+ fmt.Println(err)
os.Exit(2)
case PanicOnError:
panic(err)
diff --git a/vendor/github.com/spf13/pflag/flag_test.go b/vendor/github.com/spf13/pflag/flag_test.go
index c3def0fd4..d587752f3 100644
--- a/vendor/github.com/spf13/pflag/flag_test.go
+++ b/vendor/github.com/spf13/pflag/flag_test.go
@@ -106,8 +106,8 @@ func TestUsage(t *testing.T) {
if GetCommandLine().Parse([]string{"--x"}) == nil {
t.Error("parse did not fail for unknown flag")
}
- if !called {
- t.Error("did not call Usage for unknown flag")
+ if called {
+ t.Error("did call Usage while using ContinueOnError")
}
}
@@ -168,6 +168,7 @@ func testParse(f *FlagSet, t *testing.T) {
bool3Flag := f.Bool("bool3", false, "bool3 value")
intFlag := f.Int("int", 0, "int value")
int8Flag := f.Int8("int8", 0, "int value")
+ int16Flag := f.Int16("int16", 0, "int value")
int32Flag := f.Int32("int32", 0, "int value")
int64Flag := f.Int64("int64", 0, "int64 value")
uintFlag := f.Uint("uint", 0, "uint value")
@@ -192,6 +193,7 @@ func testParse(f *FlagSet, t *testing.T) {
"--bool3=false",
"--int=22",
"--int8=-8",
+ "--int16=-16",
"--int32=-32",
"--int64=0x23",
"--uint", "24",
@@ -236,9 +238,15 @@ func testParse(f *FlagSet, t *testing.T) {
if *int8Flag != -8 {
t.Error("int8 flag should be 0x23, is ", *int8Flag)
}
+ if *int16Flag != -16 {
+ t.Error("int16 flag should be -16, is ", *int16Flag)
+ }
if v, err := f.GetInt8("int8"); err != nil || v != *int8Flag {
t.Error("GetInt8 does not work.")
}
+ if v, err := f.GetInt16("int16"); err != nil || v != *int16Flag {
+ t.Error("GetInt16 does not work.")
+ }
if *int32Flag != -32 {
t.Error("int32 flag should be 0x23, is ", *int32Flag)
}
@@ -604,7 +612,6 @@ func aliasAndWordSepFlagNames(f *FlagSet, name string) NormalizedName {
switch name {
case oldName:
name = newName
- break
}
return NormalizedName(name)
@@ -658,6 +665,70 @@ func TestNormalizationFuncShouldChangeFlagName(t *testing.T) {
}
}
+// Related to https://github.com/spf13/cobra/issues/521.
+func TestNormalizationSharedFlags(t *testing.T) {
+ f := NewFlagSet("set f", ContinueOnError)
+ g := NewFlagSet("set g", ContinueOnError)
+ nfunc := wordSepNormalizeFunc
+ testName := "valid_flag"
+ normName := nfunc(nil, testName)
+ if testName == string(normName) {
+ t.Error("TestNormalizationSharedFlags meaningless: the original and normalized flag names are identical:", testName)
+ }
+
+ f.Bool(testName, false, "bool value")
+ g.AddFlagSet(f)
+
+ f.SetNormalizeFunc(nfunc)
+ g.SetNormalizeFunc(nfunc)
+
+ if len(f.formal) != 1 {
+ t.Error("Normalizing flags should not result in duplications in the flag set:", f.formal)
+ }
+ if f.orderedFormal[0].Name != string(normName) {
+ t.Error("Flag name not normalized")
+ }
+ for k := range f.formal {
+ if k != "valid.flag" {
+ t.Errorf("The key in the flag map should have been normalized: wanted \"%s\", got \"%s\" instead", normName, k)
+ }
+ }
+
+ if !reflect.DeepEqual(f.formal, g.formal) || !reflect.DeepEqual(f.orderedFormal, g.orderedFormal) {
+ t.Error("Two flag sets sharing the same flags should stay consistent after being normalized. Original set:", f.formal, "Duplicate set:", g.formal)
+ }
+}
+
+func TestNormalizationSetFlags(t *testing.T) {
+ f := NewFlagSet("normalized", ContinueOnError)
+ nfunc := wordSepNormalizeFunc
+ testName := "valid_flag"
+ normName := nfunc(nil, testName)
+ if testName == string(normName) {
+ t.Error("TestNormalizationSetFlags meaningless: the original and normalized flag names are identical:", testName)
+ }
+
+ f.Bool(testName, false, "bool value")
+ f.Set(testName, "true")
+ f.SetNormalizeFunc(nfunc)
+
+ if len(f.formal) != 1 {
+ t.Error("Normalizing flags should not result in duplications in the flag set:", f.formal)
+ }
+ if f.orderedFormal[0].Name != string(normName) {
+ t.Error("Flag name not normalized")
+ }
+ for k := range f.formal {
+ if k != "valid.flag" {
+ t.Errorf("The key in the flag map should have been normalized: wanted \"%s\", got \"%s\" instead", normName, k)
+ }
+ }
+
+ if !reflect.DeepEqual(f.formal, f.actual) {
+ t.Error("The map of set flags should get normalized. Formal:", f.formal, "Actual:", f.actual)
+ }
+}
+
// Declare a user-defined flag type.
type flagVar []string
@@ -978,16 +1049,17 @@ const defaultOutput = ` --A for bootstrapping, allo
--IP ip IP address with no default
--IPMask ipMask Netmask address with no default
--IPNet ipNet IP network with no default
- --Ints intSlice int slice with zero default
+ --Ints ints int slice with zero default
--N int a non-zero int (default 27)
--ND1 string[="bar"] a string with NoOptDefVal (default "foo")
--ND2 num[=4321] a num with NoOptDefVal (default 1234)
--StringArray stringArray string array with zero default
- --StringSlice stringSlice string slice with zero default
+ --StringSlice strings string slice with zero default
--Z int an int that defaults to zero
--custom custom custom Value implementation
--customP custom a VarP with default (default 10)
--maxT timeout set timeout for dial
+ -v, --verbose count verbosity
`
// Custom value that satisfies the Value interface.
@@ -1028,6 +1100,7 @@ func TestPrintDefaults(t *testing.T) {
fs.ShorthandLookup("E").NoOptDefVal = "1234"
fs.StringSlice("StringSlice", []string{}, "string slice with zero default")
fs.StringArray("StringArray", []string{}, "string array with zero default")
+ fs.CountP("verbose", "v", "verbosity")
var cv customValue
fs.Var(&cv, "custom", "custom Value implementation")
diff --git a/vendor/github.com/spf13/pflag/int16.go b/vendor/github.com/spf13/pflag/int16.go
new file mode 100644
index 000000000..f1a01d05e
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int16.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int16 Value
+type int16Value int16
+
+func newInt16Value(val int16, p *int16) *int16Value {
+ *p = val
+ return (*int16Value)(p)
+}
+
+func (i *int16Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 16)
+ *i = int16Value(v)
+ return err
+}
+
+func (i *int16Value) Type() string {
+ return "int16"
+}
+
+func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int16Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 16)
+ if err != nil {
+ return 0, err
+ }
+ return int16(v), nil
+}
+
+// GetInt16 returns the int16 value of a flag with the given name
+func (f *FlagSet) GetInt16(name string) (int16, error) {
+ val, err := f.getFlagType(name, "int16", int16Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int16), nil
+}
+
+// Int16Var defines an int16 flag with specified name, default value, and usage string.
+// The argument p points to an int16 variable in which to store the value of the flag.
+func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) {
+ f.VarP(newInt16Value(value, p), name, "", usage)
+}
+
+// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
+ f.VarP(newInt16Value(value, p), name, shorthand, usage)
+}
+
+// Int16Var defines an int16 flag with specified name, default value, and usage string.
+// The argument p points to an int16 variable in which to store the value of the flag.
+func Int16Var(p *int16, name string, value int16, usage string) {
+ CommandLine.VarP(newInt16Value(value, p), name, "", usage)
+}
+
+// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
+func Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
+ CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage)
+}
+
+// Int16 defines an int16 flag with specified name, default value, and usage string.
+// The return value is the address of an int16 variable that stores the value of the flag.
+func (f *FlagSet) Int16(name string, value int16, usage string) *int16 {
+ p := new(int16)
+ f.Int16VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 {
+ p := new(int16)
+ f.Int16VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int16 defines an int16 flag with specified name, default value, and usage string.
+// The return value is the address of an int16 variable that stores the value of the flag.
+func Int16(name string, value int16, usage string) *int16 {
+ return CommandLine.Int16P(name, "", value, usage)
+}
+
+// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
+func Int16P(name, shorthand string, value int16, usage string) *int16 {
+ return CommandLine.Int16P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/stretchr/objx/.codeclimate.yml b/vendor/github.com/stretchr/objx/.codeclimate.yml
new file mode 100644
index 000000000..010d4ccd5
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/.codeclimate.yml
@@ -0,0 +1,13 @@
+engines:
+ gofmt:
+ enabled: true
+ golint:
+ enabled: true
+ govet:
+ enabled: true
+
+exclude_patterns:
+- ".github/"
+- "vendor/"
+- "codegen/"
+- "doc.go"
diff --git a/vendor/github.com/stretchr/objx/.github/CODE_OF_CONDUCT.md b/vendor/github.com/stretchr/objx/.github/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..5099d59c9
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/.github/CODE_OF_CONDUCT.md
@@ -0,0 +1,46 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at hanzei@mailbox.org. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/stretchr/objx/.gitignore b/vendor/github.com/stretchr/objx/.gitignore
index 00268614f..ea58090bd 100644
--- a/vendor/github.com/stretchr/objx/.gitignore
+++ b/vendor/github.com/stretchr/objx/.gitignore
@@ -1,22 +1,11 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
+# Binaries for programs and plugins
+*.exe
+*.dll
*.so
+*.dylib
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
+# Test binary, build with `go test -c`
+*.test
-_testmain.go
-
-*.exe
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
diff --git a/vendor/github.com/stretchr/objx/.travis.yml b/vendor/github.com/stretchr/objx/.travis.yml
new file mode 100644
index 000000000..a63efa59d
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/.travis.yml
@@ -0,0 +1,25 @@
+language: go
+go:
+ - 1.8
+ - 1.9
+ - tip
+
+env:
+ global:
+ - CC_TEST_REPORTER_ID=68feaa3410049ce73e145287acbcdacc525087a30627f96f04e579e75bd71c00
+
+before_script:
+ - curl -L https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64 > ./cc-test-reporter
+ - chmod +x ./cc-test-reporter
+ - ./cc-test-reporter before-build
+
+install:
+- go get github.com/go-task/task/cmd/task
+
+script:
+- task dl-deps
+- task lint
+- task test-coverage
+
+after_script:
+ - ./cc-test-reporter after-build --exit-code $TRAVIS_TEST_RESULT
diff --git a/vendor/github.com/stretchr/objx/Gopkg.lock b/vendor/github.com/stretchr/objx/Gopkg.lock
new file mode 100644
index 000000000..eebe342a9
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/Gopkg.lock
@@ -0,0 +1,30 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ name = "github.com/davecgh/go-spew"
+ packages = ["spew"]
+ revision = "346938d642f2ec3594ed81d874461961cd0faa76"
+ version = "v1.1.0"
+
+[[projects]]
+ name = "github.com/pmezard/go-difflib"
+ packages = ["difflib"]
+ revision = "792786c7400a136282c1664665ae0a8db921c6c2"
+ version = "v1.0.0"
+
+[[projects]]
+ name = "github.com/stretchr/testify"
+ packages = [
+ "assert",
+ "require"
+ ]
+ revision = "b91bfb9ebec76498946beb6af7c0230c7cc7ba6c"
+ version = "v1.2.0"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ inputs-digest = "2d160a7dea4ffd13c6c31dab40373822f9d78c73beba016d662bef8f7a998876"
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/vendor/github.com/stretchr/objx/Gopkg.toml b/vendor/github.com/stretchr/objx/Gopkg.toml
new file mode 100644
index 000000000..d70f1570b
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/Gopkg.toml
@@ -0,0 +1,8 @@
+[prune]
+ unused-packages = true
+ non-go = true
+ go-tests = true
+
+[[constraint]]
+ name = "github.com/stretchr/testify"
+ version = "~1.2.0"
diff --git a/vendor/github.com/stretchr/objx/LICENSE.md b/vendor/github.com/stretchr/objx/LICENSE
index 219994581..44d4d9d5a 100644
--- a/vendor/github.com/stretchr/objx/LICENSE.md
+++ b/vendor/github.com/stretchr/objx/LICENSE
@@ -1,8 +1,7 @@
-objx - by Mat Ryer and Tyler Bunnell
-
-The MIT License (MIT)
+The MIT License
Copyright (c) 2014 Stretchr, Inc.
+Copyright (c) 2017-2018 objx contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/github.com/stretchr/objx/README.md b/vendor/github.com/stretchr/objx/README.md
index 4aa180687..be5750c94 100644
--- a/vendor/github.com/stretchr/objx/README.md
+++ b/vendor/github.com/stretchr/objx/README.md
@@ -1,3 +1,80 @@
-# objx
+# Objx
+[![Build Status](https://travis-ci.org/stretchr/objx.svg?branch=master)](https://travis-ci.org/stretchr/objx)
+[![Go Report Card](https://goreportcard.com/badge/github.com/stretchr/objx)](https://goreportcard.com/report/github.com/stretchr/objx)
+[![Maintainability](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/maintainability)](https://codeclimate.com/github/stretchr/objx/maintainability)
+[![Test Coverage](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/test_coverage)](https://codeclimate.com/github/stretchr/objx/test_coverage)
+[![Sourcegraph](https://sourcegraph.com/github.com/stretchr/objx/-/badge.svg)](https://sourcegraph.com/github.com/stretchr/objx)
+[![GoDoc](https://godoc.org/github.com/stretchr/objx?status.svg)](https://godoc.org/github.com/stretchr/objx)
- * Jump into the [API Documentation](http://godoc.org/github.com/stretchr/objx)
+Objx - Go package for dealing with maps, slices, JSON and other data.
+
+Get started:
+
+- Install Objx with [one line of code](#installation), or [update it with another](#staying-up-to-date)
+- Check out the API Documentation http://godoc.org/github.com/stretchr/objx
+
+## Overview
+Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes a powerful `Get` method (among others) that allows you to easily and quickly get access to data within the map, without having to worry too much about type assertions, missing data, default values etc.
+
+### Pattern
+Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going:
+
+ m, err := objx.FromJSON(json)
+
+NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, the rest will be optimistic and try to figure things out without panicking.
+
+Use `Get` to access the value you're interested in. You can use dot and array
+notation too:
+
+ m.Get("places[0].latlng")
+
+Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type.
+
+ if m.Get("code").IsStr() { // Your code... }
+
+Or you can just assume the type, and use one of the strong type methods to extract the real value:
+
+ m.Get("code").Int()
+
+If there's no value there (or if it's the wrong type) then a default value will be returned, or you can be explicit about the default value.
+
+ Get("code").Int(-1)
+
+If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, manipulating and selecting that data. You can find out more by exploring the index below.
+
+### Reading data
+A simple example of how to use Objx:
+
+ // Use MustFromJSON to make an objx.Map from some JSON
+ m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`)
+
+ // Get the details
+ name := m.Get("name").Str()
+ age := m.Get("age").Int()
+
+ // Get their nickname (or use their name if they don't have one)
+ nickname := m.Get("nickname").Str(name)
+
+### Ranging
+Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For example, to `range` the data, do what you would expect:
+
+ m := objx.MustFromJSON(json)
+ for key, value := range m {
+ // Your code...
+ }
+
+## Installation
+To install Objx, use go get:
+
+ go get github.com/stretchr/objx
+
+### Staying up to date
+To update Objx to the latest version, run:
+
+ go get -u github.com/stretchr/objx
+
+### Supported go versions
+We support the lastest two major Go versions, which are 1.8 and 1.9 at the moment.
+
+## Contributing
+Please feel free to submit issues, fork the repository and send pull requests!
diff --git a/vendor/github.com/stretchr/objx/Taskfile.yml b/vendor/github.com/stretchr/objx/Taskfile.yml
new file mode 100644
index 000000000..f8035641f
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/Taskfile.yml
@@ -0,0 +1,32 @@
+default:
+ deps: [test]
+
+dl-deps:
+ desc: Downloads cli dependencies
+ cmds:
+ - go get -u github.com/golang/lint/golint
+ - go get -u github.com/golang/dep/cmd/dep
+
+update-deps:
+ desc: Updates dependencies
+ cmds:
+ - dep ensure
+ - dep ensure -update
+
+lint:
+ desc: Runs golint
+ cmds:
+ - go fmt $(go list ./... | grep -v /vendor/)
+ - go vet $(go list ./... | grep -v /vendor/)
+ - golint $(ls *.go | grep -v "doc.go")
+ silent: true
+
+test:
+ desc: Runs go tests
+ cmds:
+ - go test -race .
+
+test-coverage:
+ desc: Runs go tests and calucates test coverage
+ cmds:
+ - go test -coverprofile=c.out .
diff --git a/vendor/github.com/stretchr/objx/accessors.go b/vendor/github.com/stretchr/objx/accessors.go
index 721bcac79..204356a22 100644
--- a/vendor/github.com/stretchr/objx/accessors.go
+++ b/vendor/github.com/stretchr/objx/accessors.go
@@ -1,7 +1,6 @@
package objx
import (
- "fmt"
"regexp"
"strconv"
"strings"
@@ -28,7 +27,7 @@ var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString)
//
// o.Get("books[1].chapters[2].title")
func (m Map) Get(selector string) *Value {
- rawObj := access(m, selector, nil, false, false)
+ rawObj := access(m, selector, nil, false)
return &Value{data: rawObj}
}
@@ -43,47 +42,34 @@ func (m Map) Get(selector string) *Value {
//
// o.Set("books[1].chapters[2].title","Time to Go")
func (m Map) Set(selector string, value interface{}) Map {
- access(m, selector, value, true, false)
+ access(m, selector, value, true)
return m
}
// access accesses the object using the selector and performs the
// appropriate action.
-func access(current, selector, value interface{}, isSet, panics bool) interface{} {
-
+func access(current, selector, value interface{}, isSet bool) interface{} {
switch selector.(type) {
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
-
if array, ok := current.([]interface{}); ok {
index := intFromInterface(selector)
-
if index >= len(array) {
- if panics {
- panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array)))
- }
return nil
}
-
return array[index]
}
-
return nil
case string:
-
selStr := selector.(string)
selSegs := strings.SplitN(selStr, PathSeparator, 2)
thisSel := selSegs[0]
index := -1
var err error
- // https://github.com/stretchr/objx/issues/12
if strings.Contains(thisSel, "[") {
-
arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel)
-
if len(arrayMatches) > 0 {
-
// Get the key into the map
thisSel = arrayMatches[1]
@@ -95,14 +81,11 @@ func access(current, selector, value interface{}, isSet, panics bool) interface{
// seriously wrong. Panic.
panic("objx: Array index is not an integer. Must use array[int].")
}
-
}
}
-
if curMap, ok := current.(Map); ok {
current = map[string]interface{}(curMap)
}
-
// get the object in question
switch current.(type) {
case map[string]interface{}:
@@ -110,39 +93,26 @@ func access(current, selector, value interface{}, isSet, panics bool) interface{
if len(selSegs) <= 1 && isSet {
curMSI[thisSel] = value
return nil
- } else {
- current = curMSI[thisSel]
}
+ current = curMSI[thisSel]
default:
current = nil
}
-
- if current == nil && panics {
- panic(fmt.Sprintf("objx: '%v' invalid on object.", selector))
- }
-
// do we need to access the item of an array?
if index > -1 {
if array, ok := current.([]interface{}); ok {
if index < len(array) {
current = array[index]
} else {
- if panics {
- panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array)))
- }
current = nil
}
}
}
-
if len(selSegs) > 1 {
- current = access(current, selSegs[1], value, isSet, panics)
+ current = access(current, selSegs[1], value, isSet)
}
-
}
-
return current
-
}
// intFromInterface converts an interface object to the largest
@@ -172,8 +142,7 @@ func intFromInterface(selector interface{}) int {
case uint64:
value = int(selector.(uint64))
default:
- panic("objx: array access argument is not an integer type (this should never happen)")
+ return 0
}
-
return value
}
diff --git a/vendor/github.com/stretchr/objx/accessors_test.go b/vendor/github.com/stretchr/objx/accessors_test.go
index ce5d8e4aa..f6be310f7 100644
--- a/vendor/github.com/stretchr/objx/accessors_test.go
+++ b/vendor/github.com/stretchr/objx/accessors_test.go
@@ -1,145 +1,238 @@
package objx
import (
- "github.com/stretchr/testify/assert"
"testing"
+
+ "github.com/stretchr/testify/assert"
)
func TestAccessorsAccessGetSingleField(t *testing.T) {
+ current := Map{"name": "Tyler"}
- current := map[string]interface{}{"name": "Tyler"}
- assert.Equal(t, "Tyler", access(current, "name", nil, false, true))
-
+ assert.Equal(t, "Tyler", current.Get("name").Data())
}
-func TestAccessorsAccessGetDeep(t *testing.T) {
- current := map[string]interface{}{"name": map[string]interface{}{"first": "Tyler", "last": "Bunnell"}}
- assert.Equal(t, "Tyler", access(current, "name.first", nil, false, true))
- assert.Equal(t, "Bunnell", access(current, "name.last", nil, false, true))
+func TestAccessorsAccessGetSingleFieldInt(t *testing.T) {
+ current := Map{"name": 10}
+ assert.Equal(t, 10, current.Get("name").Data())
}
-func TestAccessorsAccessGetDeepDeep(t *testing.T) {
-
- current := map[string]interface{}{"one": map[string]interface{}{"two": map[string]interface{}{"three": map[string]interface{}{"four": 4}}}}
- assert.Equal(t, 4, access(current, "one.two.three.four", nil, false, true))
+func TestAccessorsAccessGetDeep(t *testing.T) {
+ current := Map{
+ "name": Map{
+ "first": "Tyler",
+ "last": "Bunnell",
+ },
+ }
+
+ assert.Equal(t, "Tyler", current.Get("name.first").Data())
+ assert.Equal(t, "Bunnell", current.Get("name.last").Data())
}
-func TestAccessorsAccessGetInsideArray(t *testing.T) {
- current := map[string]interface{}{"names": []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}}
- assert.Equal(t, "Tyler", access(current, "names[0].first", nil, false, true))
- assert.Equal(t, "Bunnell", access(current, "names[0].last", nil, false, true))
- assert.Equal(t, "Capitol", access(current, "names[1].first", nil, false, true))
- assert.Equal(t, "Bollocks", access(current, "names[1].last", nil, false, true))
-
- assert.Panics(t, func() {
- access(current, "names[2]", nil, false, true)
- })
- assert.Nil(t, access(current, "names[2]", nil, false, false))
+func TestAccessorsAccessGetDeepDeep(t *testing.T) {
+ current := Map{
+ "one": Map{
+ "two": Map{
+ "three": Map{
+ "four": 4,
+ },
+ },
+ },
+ }
+
+ assert.Equal(t, 4, current.Get("one.two.three.four").Data())
+}
+func TestAccessorsAccessGetInsideArray(t *testing.T) {
+ current := Map{
+ "names": []interface{}{
+ Map{
+ "first": "Tyler",
+ "last": "Bunnell",
+ },
+ Map{
+ "first": "Capitol",
+ "last": "Bollocks",
+ },
+ },
+ }
+
+ assert.Equal(t, "Tyler", current.Get("names[0].first").Data())
+ assert.Equal(t, "Bunnell", current.Get("names[0].last").Data())
+ assert.Equal(t, "Capitol", current.Get("names[1].first").Data())
+ assert.Equal(t, "Bollocks", current.Get("names[1].last").Data())
+
+ assert.Nil(t, current.Get("names[2]").Data())
}
func TestAccessorsAccessGetFromArrayWithInt(t *testing.T) {
-
- current := []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}
- one := access(current, 0, nil, false, false)
- two := access(current, 1, nil, false, false)
- three := access(current, 2, nil, false, false)
+ current := []interface{}{
+ map[string]interface{}{
+ "first": "Tyler",
+ "last": "Bunnell",
+ },
+ map[string]interface{}{
+ "first": "Capitol",
+ "last": "Bollocks",
+ },
+ }
+ one := access(current, 0, nil, false)
+ two := access(current, 1, nil, false)
+ three := access(current, 2, nil, false)
assert.Equal(t, "Tyler", one.(map[string]interface{})["first"])
assert.Equal(t, "Capitol", two.(map[string]interface{})["first"])
assert.Nil(t, three)
+}
+func TestAccessorsAccessGetFromArrayWithIntTypes(t *testing.T) {
+ current := []interface{}{
+ "abc",
+ "def",
+ }
+ assert.Equal(t, "abc", access(current, 0, nil, false))
+ assert.Equal(t, "def", access(current, 1, nil, false))
+ assert.Nil(t, access(current, 2, nil, false))
+
+ assert.Equal(t, "abc", access(current, int8(0), nil, false))
+ assert.Equal(t, "def", access(current, int8(1), nil, false))
+ assert.Nil(t, access(current, int8(2), nil, false))
+
+ assert.Equal(t, "abc", access(current, int16(0), nil, false))
+ assert.Equal(t, "def", access(current, int16(1), nil, false))
+ assert.Nil(t, access(current, int16(2), nil, false))
+
+ assert.Equal(t, "abc", access(current, int32(0), nil, false))
+ assert.Equal(t, "def", access(current, int32(1), nil, false))
+ assert.Nil(t, access(current, int32(2), nil, false))
+
+ assert.Equal(t, "abc", access(current, int64(0), nil, false))
+ assert.Equal(t, "def", access(current, int64(1), nil, false))
+ assert.Nil(t, access(current, int64(2), nil, false))
+
+ assert.Equal(t, "abc", access(current, uint(0), nil, false))
+ assert.Equal(t, "def", access(current, uint(1), nil, false))
+ assert.Nil(t, access(current, uint(2), nil, false))
+
+ assert.Equal(t, "abc", access(current, uint8(0), nil, false))
+ assert.Equal(t, "def", access(current, uint8(1), nil, false))
+ assert.Nil(t, access(current, uint8(2), nil, false))
+
+ assert.Equal(t, "abc", access(current, uint16(0), nil, false))
+ assert.Equal(t, "def", access(current, uint16(1), nil, false))
+ assert.Nil(t, access(current, uint16(2), nil, false))
+
+ assert.Equal(t, "abc", access(current, uint32(0), nil, false))
+ assert.Equal(t, "def", access(current, uint32(1), nil, false))
+ assert.Nil(t, access(current, uint32(2), nil, false))
+
+ assert.Equal(t, "abc", access(current, uint64(0), nil, false))
+ assert.Equal(t, "def", access(current, uint64(1), nil, false))
+ assert.Nil(t, access(current, uint64(2), nil, false))
}
-func TestAccessorsGet(t *testing.T) {
+func TestAccessorsAccessGetFromArrayWithIntError(t *testing.T) {
+ current := Map{"name": "Tyler"}
- current := New(map[string]interface{}{"name": "Tyler"})
- assert.Equal(t, "Tyler", current.Get("name").data)
+ assert.Nil(t, access(current, 0, nil, false))
+}
+func TestAccessorsGet(t *testing.T) {
+ current := Map{"name": "Tyler"}
+
+ assert.Equal(t, "Tyler", current.Get("name").Data())
}
func TestAccessorsAccessSetSingleField(t *testing.T) {
+ current := Map{"name": "Tyler"}
- current := map[string]interface{}{"name": "Tyler"}
- access(current, "name", "Mat", true, false)
- assert.Equal(t, current["name"], "Mat")
+ current.Set("name", "Mat")
+ current.Set("age", 29)
- access(current, "age", 29, true, true)
+ assert.Equal(t, current["name"], "Mat")
assert.Equal(t, current["age"], 29)
-
}
func TestAccessorsAccessSetSingleFieldNotExisting(t *testing.T) {
+ current := Map{
+ "first": "Tyler",
+ "last": "Bunnell",
+ }
- current := map[string]interface{}{}
- access(current, "name", "Mat", true, false)
- assert.Equal(t, current["name"], "Mat")
+ current.Set("name", "Mat")
+ assert.Equal(t, current["name"], "Mat")
}
func TestAccessorsAccessSetDeep(t *testing.T) {
-
- current := map[string]interface{}{"name": map[string]interface{}{"first": "Tyler", "last": "Bunnell"}}
-
- access(current, "name.first", "Mat", true, true)
- access(current, "name.last", "Ryer", true, true)
-
- assert.Equal(t, "Mat", access(current, "name.first", nil, false, true))
- assert.Equal(t, "Ryer", access(current, "name.last", nil, false, true))
-
+ current := Map{
+ "name": Map{
+ "first": "Tyler",
+ "last": "Bunnell",
+ },
+ }
+
+ current.Set("name.first", "Mat")
+ current.Set("name.last", "Ryer")
+
+ assert.Equal(t, "Mat", current.Get("name.first").Data())
+ assert.Equal(t, "Ryer", current.Get("name.last").Data())
}
-func TestAccessorsAccessSetDeepDeep(t *testing.T) {
-
- current := map[string]interface{}{"one": map[string]interface{}{"two": map[string]interface{}{"three": map[string]interface{}{"four": 4}}}}
-
- access(current, "one.two.three.four", 5, true, true)
-
- assert.Equal(t, 5, access(current, "one.two.three.four", nil, false, true))
+func TestAccessorsAccessSetDeepDeep(t *testing.T) {
+ current := Map{
+ "one": Map{
+ "two": Map{
+ "three": Map{
+ "four": 4},
+ },
+ },
+ }
+
+ current.Set("one.two.three.four", 5)
+
+ assert.Equal(t, 5, current.Get("one.two.three.four").Data())
}
-func TestAccessorsAccessSetArray(t *testing.T) {
-
- current := map[string]interface{}{"names": []interface{}{"Tyler"}}
-
- access(current, "names[0]", "Mat", true, true)
- assert.Equal(t, "Mat", access(current, "names[0]", nil, false, true))
-
-}
-func TestAccessorsAccessSetInsideArray(t *testing.T) {
-
- current := map[string]interface{}{"names": []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}}
-
- access(current, "names[0].first", "Mat", true, true)
- access(current, "names[0].last", "Ryer", true, true)
- access(current, "names[1].first", "Captain", true, true)
- access(current, "names[1].last", "Underpants", true, true)
-
- assert.Equal(t, "Mat", access(current, "names[0].first", nil, false, true))
- assert.Equal(t, "Ryer", access(current, "names[0].last", nil, false, true))
- assert.Equal(t, "Captain", access(current, "names[1].first", nil, false, true))
- assert.Equal(t, "Underpants", access(current, "names[1].last", nil, false, true))
+func TestAccessorsAccessSetArray(t *testing.T) {
+ current := Map{
+ "names": []interface{}{"Tyler"},
+ }
+ current.Set("names[0]", "Mat")
+ assert.Equal(t, "Mat", current.Get("names[0]").Data())
}
-func TestAccessorsAccessSetFromArrayWithInt(t *testing.T) {
-
- current := []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}
- one := access(current, 0, nil, false, false)
- two := access(current, 1, nil, false, false)
- three := access(current, 2, nil, false, false)
-
- assert.Equal(t, "Tyler", one.(map[string]interface{})["first"])
- assert.Equal(t, "Capitol", two.(map[string]interface{})["first"])
- assert.Nil(t, three)
-
+func TestAccessorsAccessSetInsideArray(t *testing.T) {
+ current := Map{
+ "names": []interface{}{
+ Map{
+ "first": "Tyler",
+ "last": "Bunnell",
+ },
+ Map{
+ "first": "Capitol",
+ "last": "Bollocks",
+ },
+ },
+ }
+
+ current.Set("names[0].first", "Mat")
+ current.Set("names[0].last", "Ryer")
+ current.Set("names[1].first", "Captain")
+ current.Set("names[1].last", "Underpants")
+
+ assert.Equal(t, "Mat", current.Get("names[0].first").Data())
+ assert.Equal(t, "Ryer", current.Get("names[0].last").Data())
+ assert.Equal(t, "Captain", current.Get("names[1].first").Data())
+ assert.Equal(t, "Underpants", current.Get("names[1].last").Data())
}
func TestAccessorsSet(t *testing.T) {
+ current := Map{"name": "Tyler"}
- current := New(map[string]interface{}{"name": "Tyler"})
current.Set("name", "Mat")
- assert.Equal(t, "Mat", current.Get("name").data)
+ assert.Equal(t, "Mat", current.Get("name").data)
}
diff --git a/vendor/github.com/stretchr/objx/codegen/template.txt b/vendor/github.com/stretchr/objx/codegen/template.txt
index b396900b8..047bfc13f 100644
--- a/vendor/github.com/stretchr/objx/codegen/template.txt
+++ b/vendor/github.com/stretchr/objx/codegen/template.txt
@@ -1,6 +1,5 @@
/*
{4} ({1} and []{1})
- --------------------------------------------------
*/
// {4} gets the value as a {1}, returns the optionalDefault
@@ -58,44 +57,35 @@ func (v *Value) Is{4}Slice() bool {
//
// Panics if the object is the wrong type.
func (v *Value) Each{4}(callback func(int, {1}) bool) *Value {
-
for index, val := range v.Must{4}Slice() {
carryon := callback(index, val)
- if carryon == false {
+ if !carryon {
break
}
}
-
return v
-
}
// Where{4} uses the specified decider function to select items
// from the []{1}. The object contained in the result will contain
// only the selected items.
func (v *Value) Where{4}(decider func(int, {1}) bool) *Value {
-
var selected []{1}
-
v.Each{4}(func(index int, val {1}) bool {
shouldSelect := decider(index, val)
- if shouldSelect == false {
+ if !shouldSelect {
selected = append(selected, val)
}
return true
})
-
return &Value{data:selected}
-
}
// Group{4} uses the specified grouper function to group the items
// keyed by the return of the grouper. The object contained in the
// result will contain a map[string][]{1}.
func (v *Value) Group{4}(grouper func(int, {1}) string) *Value {
-
groups := make(map[string][]{1})
-
v.Each{4}(func(index int, val {1}) bool {
group := grouper(index, val)
if _, ok := groups[group]; !ok {
@@ -104,183 +94,31 @@ func (v *Value) Group{4}(grouper func(int, {1}) string) *Value {
groups[group] = append(groups[group], val)
return true
})
-
return &Value{data:groups}
-
}
// Replace{4} uses the specified function to replace each {1}s
// by iterating each item. The data in the returned result will be a
// []{1} containing the replaced items.
func (v *Value) Replace{4}(replacer func(int, {1}) {1}) *Value {
-
arr := v.Must{4}Slice()
replaced := make([]{1}, len(arr))
-
v.Each{4}(func(index int, val {1}) bool {
replaced[index] = replacer(index, val)
return true
})
-
return &Value{data:replaced}
-
}
// Collect{4} uses the specified collector function to collect a value
// for each of the {1}s in the slice. The data returned will be a
// []interface{}.
func (v *Value) Collect{4}(collector func(int, {1}) interface{}) *Value {
-
arr := v.Must{4}Slice()
collected := make([]interface{}, len(arr))
-
v.Each{4}(func(index int, val {1}) bool {
collected[index] = collector(index, val)
return true
})
-
return &Value{data:collected}
}
-
-// ************************************************************
-// TESTS
-// ************************************************************
-
-func Test{4}(t *testing.T) {
-
- val := {1}( {2} )
- m := map[string]interface{}{"value": val, "nothing": nil}
- assert.Equal(t, val, New(m).Get("value").{4}())
- assert.Equal(t, val, New(m).Get("value").Must{4}())
- assert.Equal(t, {1}({3}), New(m).Get("nothing").{4}())
- assert.Equal(t, val, New(m).Get("nothing").{4}({2}))
-
- assert.Panics(t, func() {
- New(m).Get("age").Must{4}()
- })
-
-}
-
-func Test{4}Slice(t *testing.T) {
-
- val := {1}( {2} )
- m := map[string]interface{}{"value": []{1}{ val }, "nothing": nil}
- assert.Equal(t, val, New(m).Get("value").{4}Slice()[0])
- assert.Equal(t, val, New(m).Get("value").Must{4}Slice()[0])
- assert.Equal(t, []{1}(nil), New(m).Get("nothing").{4}Slice())
- assert.Equal(t, val, New(m).Get("nothing").{4}Slice( []{1}{ {1}({2}) } )[0])
-
- assert.Panics(t, func() {
- New(m).Get("nothing").Must{4}Slice()
- })
-
-}
-
-func TestIs{4}(t *testing.T) {
-
- var v *Value
-
- v = &Value{data: {1}({2})}
- assert.True(t, v.Is{4}())
-
- v = &Value{data: []{1}{ {1}({2}) }}
- assert.True(t, v.Is{4}Slice())
-
-}
-
-func TestEach{4}(t *testing.T) {
-
- v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }}
- count := 0
- replacedVals := make([]{1}, 0)
- assert.Equal(t, v, v.Each{4}(func(i int, val {1}) bool {
-
- count++
- replacedVals = append(replacedVals, val)
-
- // abort early
- if i == 2 {
- return false
- }
-
- return true
-
- }))
-
- assert.Equal(t, count, 3)
- assert.Equal(t, replacedVals[0], v.Must{4}Slice()[0])
- assert.Equal(t, replacedVals[1], v.Must{4}Slice()[1])
- assert.Equal(t, replacedVals[2], v.Must{4}Slice()[2])
-
-}
-
-func TestWhere{4}(t *testing.T) {
-
- v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }}
-
- selected := v.Where{4}(func(i int, val {1}) bool {
- return i%2==0
- }).Must{4}Slice()
-
- assert.Equal(t, 3, len(selected))
-
-}
-
-func TestGroup{4}(t *testing.T) {
-
- v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }}
-
- grouped := v.Group{4}(func(i int, val {1}) string {
- return fmt.Sprintf("%v", i%2==0)
- }).data.(map[string][]{1})
-
- assert.Equal(t, 2, len(grouped))
- assert.Equal(t, 3, len(grouped["true"]))
- assert.Equal(t, 3, len(grouped["false"]))
-
-}
-
-func TestReplace{4}(t *testing.T) {
-
- v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }}
-
- rawArr := v.Must{4}Slice()
-
- replaced := v.Replace{4}(func(index int, val {1}) {1} {
- if index < len(rawArr)-1 {
- return rawArr[index+1]
- }
- return rawArr[0]
- })
-
- replacedArr := replaced.Must{4}Slice()
- if assert.Equal(t, 6, len(replacedArr)) {
- assert.Equal(t, replacedArr[0], rawArr[1])
- assert.Equal(t, replacedArr[1], rawArr[2])
- assert.Equal(t, replacedArr[2], rawArr[3])
- assert.Equal(t, replacedArr[3], rawArr[4])
- assert.Equal(t, replacedArr[4], rawArr[5])
- assert.Equal(t, replacedArr[5], rawArr[0])
- }
-
-}
-
-func TestCollect{4}(t *testing.T) {
-
- v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }}
-
- collected := v.Collect{4}(func(index int, val {1}) interface{} {
- return index
- })
-
- collectedArr := collected.MustInterSlice()
- if assert.Equal(t, 6, len(collectedArr)) {
- assert.Equal(t, collectedArr[0], 0)
- assert.Equal(t, collectedArr[1], 1)
- assert.Equal(t, collectedArr[2], 2)
- assert.Equal(t, collectedArr[3], 3)
- assert.Equal(t, collectedArr[4], 4)
- assert.Equal(t, collectedArr[5], 5)
- }
-
-}
diff --git a/vendor/github.com/stretchr/objx/codegen/template_test.txt b/vendor/github.com/stretchr/objx/codegen/template_test.txt
new file mode 100644
index 000000000..7643af390
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/codegen/template_test.txt
@@ -0,0 +1,121 @@
+/*
+ Tests for {4} ({1} and []{1})
+*/
+
+func Test{4}(t *testing.T) {
+ val := {1}( {2} )
+
+ m := map[string]interface{}{"value": val, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").{4}())
+ assert.Equal(t, val, New(m).Get("value").Must{4}())
+ assert.Equal(t, {1}({3}), New(m).Get("nothing").{4}())
+ assert.Equal(t, val, New(m).Get("nothing").{4}({2}))
+ assert.Panics(t, func() {
+ New(m).Get("age").Must{4}()
+ })
+}
+
+func Test{4}Slice(t *testing.T) {
+ val := {1}( {2} )
+
+ m := map[string]interface{}{"value": []{1}{ val }, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").{4}Slice()[0])
+ assert.Equal(t, val, New(m).Get("value").Must{4}Slice()[0])
+ assert.Equal(t, []{1}(nil), New(m).Get("nothing").{4}Slice())
+ assert.Equal(t, val, New(m).Get("nothing").{4}Slice( []{1}{ {1}({2}) } )[0])
+ assert.Panics(t, func() {
+ New(m).Get("nothing").Must{4}Slice()
+ })
+}
+
+func TestIs{4}(t *testing.T) {
+ v := &Value{data: {1}({2})}
+ assert.True(t, v.Is{4}())
+
+}
+
+func TestIs{4}Slice(t *testing.T) {
+ v := &Value{data: []{1}{ {1}({2}) }}
+ assert.True(t, v.Is{4}Slice())
+}
+
+func TestEach{4}(t *testing.T) {
+ v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }}
+ count := 0
+ replacedVals := make([]{1}, 0)
+ assert.Equal(t, v, v.Each{4}(func(i int, val {1}) bool {
+ count++
+ replacedVals = append(replacedVals, val)
+
+ // abort early
+ return i != 2
+ }))
+
+ assert.Equal(t, count, 3)
+ assert.Equal(t, replacedVals[0], v.Must{4}Slice()[0])
+ assert.Equal(t, replacedVals[1], v.Must{4}Slice()[1])
+ assert.Equal(t, replacedVals[2], v.Must{4}Slice()[2])
+}
+
+func TestWhere{4}(t *testing.T) {
+ v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }}
+
+ selected := v.Where{4}(func(i int, val {1}) bool {
+ return i%2==0
+ }).Must{4}Slice()
+
+ assert.Equal(t, 3, len(selected))
+}
+
+func TestGroup{4}(t *testing.T) {
+ v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }}
+
+ grouped := v.Group{4}(func(i int, val {1}) string {
+ return fmt.Sprintf("%v", i%2==0)
+ }).data.(map[string][]{1})
+
+ assert.Equal(t, 2, len(grouped))
+ assert.Equal(t, 3, len(grouped["true"]))
+ assert.Equal(t, 3, len(grouped["false"]))
+}
+
+func TestReplace{4}(t *testing.T) {
+ v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }}
+
+ rawArr := v.Must{4}Slice()
+
+ replaced := v.Replace{4}(func(index int, val {1}) {1} {
+ if index < len(rawArr)-1 {
+ return rawArr[index+1]
+ }
+ return rawArr[0]
+ })
+
+ replacedArr := replaced.Must{4}Slice()
+ if assert.Equal(t, 6, len(replacedArr)) {
+ assert.Equal(t, replacedArr[0], rawArr[1])
+ assert.Equal(t, replacedArr[1], rawArr[2])
+ assert.Equal(t, replacedArr[2], rawArr[3])
+ assert.Equal(t, replacedArr[3], rawArr[4])
+ assert.Equal(t, replacedArr[4], rawArr[5])
+ assert.Equal(t, replacedArr[5], rawArr[0])
+ }
+}
+
+func TestCollect{4}(t *testing.T) {
+ v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }}
+
+ collected := v.Collect{4}(func(index int, val {1}) interface{} {
+ return index
+ })
+
+ collectedArr := collected.MustInterSlice()
+ if assert.Equal(t, 6, len(collectedArr)) {
+ assert.Equal(t, collectedArr[0], 0)
+ assert.Equal(t, collectedArr[1], 1)
+ assert.Equal(t, collectedArr[2], 2)
+ assert.Equal(t, collectedArr[3], 3)
+ assert.Equal(t, collectedArr[4], 4)
+ assert.Equal(t, collectedArr[5], 5)
+ }
+}
diff --git a/vendor/github.com/stretchr/objx/conversions.go b/vendor/github.com/stretchr/objx/conversions.go
index 9cdfa9f9f..5e020f310 100644
--- a/vendor/github.com/stretchr/objx/conversions.go
+++ b/vendor/github.com/stretchr/objx/conversions.go
@@ -12,15 +12,11 @@ import (
// JSON converts the contained object to a JSON string
// representation
func (m Map) JSON() (string, error) {
-
result, err := json.Marshal(m)
-
if err != nil {
err = errors.New("objx: JSON encode failed with: " + err.Error())
}
-
return string(result), err
-
}
// MustJSON converts the contained object to a JSON string
@@ -36,7 +32,6 @@ func (m Map) MustJSON() string {
// Base64 converts the contained object to a Base64 string
// representation of the JSON string representation
func (m Map) Base64() (string, error) {
-
var buf bytes.Buffer
jsonData, err := m.JSON()
@@ -45,11 +40,13 @@ func (m Map) Base64() (string, error) {
}
encoder := base64.NewEncoder(base64.StdEncoding, &buf)
- encoder.Write([]byte(jsonData))
- encoder.Close()
+ _, err = encoder.Write([]byte(jsonData))
+ if err != nil {
+ return "", err
+ }
+ _ = encoder.Close()
return buf.String(), nil
-
}
// MustBase64 converts the contained object to a Base64 string
@@ -67,16 +64,13 @@ func (m Map) MustBase64() string {
// representation of the JSON string representation and signs it
// using the provided key.
func (m Map) SignedBase64(key string) (string, error) {
-
base64, err := m.Base64()
if err != nil {
return "", err
}
sig := HashWithKey(base64, key)
-
return base64 + SignatureSeparator + sig, nil
-
}
// MustSignedBase64 converts the contained object to a Base64 string
@@ -98,14 +92,11 @@ func (m Map) MustSignedBase64(key string) string {
// URLValues creates a url.Values object from an Obj. This
// function requires that the wrapped object be a map[string]interface{}
func (m Map) URLValues() url.Values {
-
vals := make(url.Values)
-
for k, v := range m {
//TODO: can this be done without sprintf?
vals.Set(k, fmt.Sprintf("%v", v))
}
-
return vals
}
diff --git a/vendor/github.com/stretchr/objx/conversions_test.go b/vendor/github.com/stretchr/objx/conversions_test.go
index e9ccd2987..4584208d8 100644
--- a/vendor/github.com/stretchr/objx/conversions_test.go
+++ b/vendor/github.com/stretchr/objx/conversions_test.go
@@ -1,28 +1,27 @@
-package objx
+package objx_test
import (
- "github.com/stretchr/testify/assert"
+ "net/url"
"testing"
+
+ "github.com/stretchr/objx"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestConversionJSON(t *testing.T) {
-
jsonString := `{"name":"Mat"}`
- o := MustFromJSON(jsonString)
+ o := objx.MustFromJSON(jsonString)
result, err := o.JSON()
- if assert.NoError(t, err) {
- assert.Equal(t, jsonString, result)
- }
-
+ require.NoError(t, err)
+ assert.Equal(t, jsonString, result)
assert.Equal(t, jsonString, o.MustJSON())
-
}
func TestConversionJSONWithError(t *testing.T) {
-
- o := MSI()
+ o := objx.MSI()
o["test"] = func() {}
assert.Panics(t, func() {
@@ -32,26 +31,20 @@ func TestConversionJSONWithError(t *testing.T) {
_, err := o.JSON()
assert.Error(t, err)
-
}
func TestConversionBase64(t *testing.T) {
-
- o := New(map[string]interface{}{"name": "Mat"})
+ o := objx.Map{"name": "Mat"}
result, err := o.Base64()
- if assert.NoError(t, err) {
- assert.Equal(t, "eyJuYW1lIjoiTWF0In0=", result)
- }
-
+ require.NoError(t, err)
+ assert.Equal(t, "eyJuYW1lIjoiTWF0In0=", result)
assert.Equal(t, "eyJuYW1lIjoiTWF0In0=", o.MustBase64())
-
}
func TestConversionBase64WithError(t *testing.T) {
-
- o := MSI()
+ o := objx.MSI()
o["test"] = func() {}
assert.Panics(t, func() {
@@ -61,26 +54,20 @@ func TestConversionBase64WithError(t *testing.T) {
_, err := o.Base64()
assert.Error(t, err)
-
}
func TestConversionSignedBase64(t *testing.T) {
-
- o := New(map[string]interface{}{"name": "Mat"})
+ o := objx.Map{"name": "Mat"}
result, err := o.SignedBase64("key")
- if assert.NoError(t, err) {
- assert.Equal(t, "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6", result)
- }
-
+ require.NoError(t, err)
+ assert.Equal(t, "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6", result)
assert.Equal(t, "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6", o.MustSignedBase64("key"))
-
}
func TestConversionSignedBase64WithError(t *testing.T) {
-
- o := MSI()
+ o := objx.MSI()
o["test"] = func() {}
assert.Panics(t, func() {
@@ -90,5 +77,20 @@ func TestConversionSignedBase64WithError(t *testing.T) {
_, err := o.SignedBase64("key")
assert.Error(t, err)
+}
+
+func TestConversionURLValues(t *testing.T) {
+ m := objx.Map{"abc": 123, "name": "Mat"}
+ u := m.URLValues()
+
+ assert.Equal(t, url.Values{"abc": []string{"123"}, "name": []string{"Mat"}}, u)
+}
+
+func TestConversionURLQuery(t *testing.T) {
+ m := objx.Map{"abc": 123, "name": "Mat"}
+ u, err := m.URLQuery()
+ assert.Nil(t, err)
+ require.NotNil(t, u)
+ assert.Equal(t, "abc=123&name=Mat", u)
}
diff --git a/vendor/github.com/stretchr/objx/doc.go b/vendor/github.com/stretchr/objx/doc.go
index 47bf85e46..6d6af1a83 100644
--- a/vendor/github.com/stretchr/objx/doc.go
+++ b/vendor/github.com/stretchr/objx/doc.go
@@ -1,72 +1,66 @@
-// objx - Go package for dealing with maps, slices, JSON and other data.
-//
-// Overview
-//
-// Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes
-// a powerful `Get` method (among others) that allows you to easily and quickly get
-// access to data within the map, without having to worry too much about type assertions,
-// missing data, default values etc.
-//
-// Pattern
-//
-// Objx uses a preditable pattern to make access data from within `map[string]interface{}'s
-// easy.
-//
-// Call one of the `objx.` functions to create your `objx.Map` to get going:
-//
-// m, err := objx.FromJSON(json)
-//
-// NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong,
-// the rest will be optimistic and try to figure things out without panicking.
-//
-// Use `Get` to access the value you're interested in. You can use dot and array
-// notation too:
-//
-// m.Get("places[0].latlng")
-//
-// Once you have saught the `Value` you're interested in, you can use the `Is*` methods
-// to determine its type.
-//
-// if m.Get("code").IsStr() { /* ... */ }
-//
-// Or you can just assume the type, and use one of the strong type methods to
-// extract the real value:
-//
-// m.Get("code").Int()
-//
-// If there's no value there (or if it's the wrong type) then a default value
-// will be returned, or you can be explicit about the default value.
-//
-// Get("code").Int(-1)
-//
-// If you're dealing with a slice of data as a value, Objx provides many useful
-// methods for iterating, manipulating and selecting that data. You can find out more
-// by exploring the index below.
-//
-// Reading data
-//
-// A simple example of how to use Objx:
-//
-// // use MustFromJSON to make an objx.Map from some JSON
-// m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`)
-//
-// // get the details
-// name := m.Get("name").Str()
-// age := m.Get("age").Int()
-//
-// // get their nickname (or use their name if they
-// // don't have one)
-// nickname := m.Get("nickname").Str(name)
-//
-// Ranging
-//
-// Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For
-// example, to `range` the data, do what you would expect:
-//
-// m := objx.MustFromJSON(json)
-// for key, value := range m {
-//
-// /* ... do your magic ... */
-//
-// }
+/*
+Objx - Go package for dealing with maps, slices, JSON and other data.
+
+Overview
+
+Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes
+a powerful `Get` method (among others) that allows you to easily and quickly get
+access to data within the map, without having to worry too much about type assertions,
+missing data, default values etc.
+
+Pattern
+
+Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy.
+Call one of the `objx.` functions to create your `objx.Map` to get going:
+
+ m, err := objx.FromJSON(json)
+
+NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong,
+the rest will be optimistic and try to figure things out without panicking.
+
+Use `Get` to access the value you're interested in. You can use dot and array
+notation too:
+
+ m.Get("places[0].latlng")
+
+Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type.
+
+ if m.Get("code").IsStr() { // Your code... }
+
+Or you can just assume the type, and use one of the strong type methods to extract the real value:
+
+ m.Get("code").Int()
+
+If there's no value there (or if it's the wrong type) then a default value will be returned,
+or you can be explicit about the default value.
+
+ Get("code").Int(-1)
+
+If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating,
+manipulating and selecting that data. You can find out more by exploring the index below.
+
+Reading data
+
+A simple example of how to use Objx:
+
+ // Use MustFromJSON to make an objx.Map from some JSON
+ m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`)
+
+ // Get the details
+ name := m.Get("name").Str()
+ age := m.Get("age").Int()
+
+ // Get their nickname (or use their name if they don't have one)
+ nickname := m.Get("nickname").Str(name)
+
+Ranging
+
+Since `objx.Map` is a `map[string]interface{}` you can treat it as such.
+For example, to `range` the data, do what you would expect:
+
+ m := objx.MustFromJSON(json)
+ for key, value := range m {
+ // Your code...
+ }
+*/
package objx
diff --git a/vendor/github.com/stretchr/objx/fixture_test.go b/vendor/github.com/stretchr/objx/fixture_test.go
index 27f7d9049..cefe8cdc6 100644
--- a/vendor/github.com/stretchr/objx/fixture_test.go
+++ b/vendor/github.com/stretchr/objx/fixture_test.go
@@ -1,8 +1,10 @@
-package objx
+package objx_test
import (
- "github.com/stretchr/testify/assert"
"testing"
+
+ "github.com/stretchr/objx"
+ "github.com/stretchr/testify/assert"
)
var fixtures = []struct {
@@ -79,20 +81,16 @@ var fixtures = []struct {
}
func TestFixtures(t *testing.T) {
-
for _, fixture := range fixtures {
-
- m := MustFromJSON(fixture.data)
+ m := objx.MustFromJSON(fixture.data)
// get the value
t.Logf("Running get fixture: \"%s\" (%v)", fixture.name, fixture)
value := m.Get(fixture.get.(string))
// make sure it matches
- assert.Equal(t, fixture.output, value.data,
+ assert.Equal(t, fixture.output, value.Data(),
"Get fixture \"%s\" failed: %v", fixture.name, fixture,
)
-
}
-
}
diff --git a/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/stretchr/objx/map.go
index eb6ed8e28..406bc8926 100644
--- a/vendor/github.com/stretchr/objx/map.go
+++ b/vendor/github.com/stretchr/objx/map.go
@@ -27,7 +27,7 @@ func (m Map) Value() *Value {
}
// Nil represents a nil Map.
-var Nil Map = New(nil)
+var Nil = New(nil)
// New creates a new Map containing the map[string]interface{} in the data argument.
// If the data argument is not a map[string]interface, New attempts to call the
@@ -47,9 +47,8 @@ func New(data interface{}) Map {
//
// The arguments follow a key, value pattern.
//
-// Panics
//
-// Panics if any key arugment is non-string or if there are an odd number of arguments.
+// Returns nil if any key argument is non-string or if there are an odd number of arguments.
//
// Example
//
@@ -58,32 +57,25 @@ func New(data interface{}) Map {
// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true))
//
// // creates an Map equivalent to
-// m := objx.New(map[string]interface{}{"name": "Mat", "age": 29, "subobj": map[string]interface{}{"active": true}})
+// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}}
func MSI(keyAndValuePairs ...interface{}) Map {
-
- newMap := make(map[string]interface{})
+ newMap := Map{}
keyAndValuePairsLen := len(keyAndValuePairs)
-
if keyAndValuePairsLen%2 != 0 {
- panic("objx: MSI must have an even number of arguments following the 'key, value' pattern.")
+ return nil
}
-
for i := 0; i < keyAndValuePairsLen; i = i + 2 {
-
key := keyAndValuePairs[i]
value := keyAndValuePairs[i+1]
// make sure the key is a string
keyString, keyStringOK := key.(string)
if !keyStringOK {
- panic("objx: MSI must follow 'string, interface{}' pattern. " + keyString + " is not a valid key.")
+ return nil
}
-
newMap[keyString] = value
-
}
-
- return New(newMap)
+ return newMap
}
// ****** Conversion Constructors
@@ -94,11 +86,9 @@ func MSI(keyAndValuePairs ...interface{}) Map {
// Panics if the JSON is invalid.
func MustFromJSON(jsonString string) Map {
o, err := FromJSON(jsonString)
-
if err != nil {
panic("objx: MustFromJSON failed with error: " + err.Error())
}
-
return o
}
@@ -107,16 +97,12 @@ func MustFromJSON(jsonString string) Map {
//
// Returns an error if the JSON is invalid.
func FromJSON(jsonString string) (Map, error) {
-
var data interface{}
err := json.Unmarshal([]byte(jsonString), &data)
-
if err != nil {
return Nil, err
}
-
return New(data), nil
-
}
// FromBase64 creates a new Obj containing the data specified
@@ -124,14 +110,11 @@ func FromJSON(jsonString string) (Map, error) {
//
// The string is an encoded JSON string returned by Base64
func FromBase64(base64String string) (Map, error) {
-
decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String))
-
decoded, err := ioutil.ReadAll(decoder)
if err != nil {
return nil, err
}
-
return FromJSON(string(decoded))
}
@@ -140,13 +123,10 @@ func FromBase64(base64String string) (Map, error) {
//
// The string is an encoded JSON string returned by Base64
func MustFromBase64(base64String string) Map {
-
result, err := FromBase64(base64String)
-
if err != nil {
panic("objx: MustFromBase64 failed with error: " + err.Error())
}
-
return result
}
@@ -157,14 +137,13 @@ func MustFromBase64(base64String string) Map {
func FromSignedBase64(base64String, key string) (Map, error) {
parts := strings.Split(base64String, SignatureSeparator)
if len(parts) != 2 {
- return nil, errors.New("objx: Signed base64 string is malformed.")
+ return nil, errors.New("objx: Signed base64 string is malformed")
}
sig := HashWithKey(parts[0], key)
if parts[1] != sig {
- return nil, errors.New("objx: Signature for base64 data does not match.")
+ return nil, errors.New("objx: Signature for base64 data does not match")
}
-
return FromBase64(parts[0])
}
@@ -173,13 +152,10 @@ func FromSignedBase64(base64String, key string) (Map, error) {
//
// The string is an encoded JSON string returned by Base64
func MustFromSignedBase64(base64String, key string) Map {
-
result, err := FromSignedBase64(base64String, key)
-
if err != nil {
panic("objx: MustFromSignedBase64 failed with error: " + err.Error())
}
-
return result
}
@@ -188,19 +164,15 @@ func MustFromSignedBase64(base64String, key string) Map {
//
// For queries with multiple values, the first value is selected.
func FromURLQuery(query string) (Map, error) {
-
vals, err := url.ParseQuery(query)
-
if err != nil {
return nil, err
}
-
- m := make(map[string]interface{})
+ m := Map{}
for k, vals := range vals {
m[k] = vals[0]
}
-
- return New(m), nil
+ return m, nil
}
// MustFromURLQuery generates a new Obj by parsing the specified
@@ -210,13 +182,9 @@ func FromURLQuery(query string) (Map, error) {
//
// Panics if it encounters an error
func MustFromURLQuery(query string) Map {
-
o, err := FromURLQuery(query)
-
if err != nil {
panic("objx: MustFromURLQuery failed with error: " + err.Error())
}
-
return o
-
}
diff --git a/vendor/github.com/stretchr/objx/map_test.go b/vendor/github.com/stretchr/objx/map_test.go
index 1f8b45c61..aa8e536b8 100644
--- a/vendor/github.com/stretchr/objx/map_test.go
+++ b/vendor/github.com/stretchr/objx/map_test.go
@@ -1,147 +1,172 @@
-package objx
+package objx_test
import (
- "github.com/stretchr/testify/assert"
"testing"
+
+ "github.com/stretchr/objx"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
-type Convertable struct {
- name string
+var TestMap = objx.Map{
+ "name": "Tyler",
+ "address": objx.Map{
+ "city": "Salt Lake City",
+ "state": "UT",
+ },
+ "numbers": []interface{}{"one", "two", "three", "four", "five"},
}
-func (c *Convertable) MSI() map[string]interface{} {
- return map[string]interface{}{"name": c.name}
+type Convertable struct {
+ name string
}
type Unconvertable struct {
name string
}
-func TestMapCreation(t *testing.T) {
+func (c *Convertable) MSI() map[string]interface{} {
+ return objx.Map{"name": c.name}
+}
- o := New(nil)
+func TestMapCreation(t *testing.T) {
+ o := objx.New(nil)
assert.Nil(t, o)
- o = New("Tyler")
+ o = objx.New("Tyler")
assert.Nil(t, o)
unconvertable := &Unconvertable{name: "Tyler"}
- o = New(unconvertable)
+ o = objx.New(unconvertable)
assert.Nil(t, o)
convertable := &Convertable{name: "Tyler"}
- o = New(convertable)
- if assert.NotNil(t, convertable) {
- assert.Equal(t, "Tyler", o["name"], "Tyler")
- }
+ o = objx.New(convertable)
+ require.NotNil(t, convertable)
+ assert.Equal(t, "Tyler", o["name"])
- o = MSI()
- if assert.NotNil(t, o) {
- assert.NotNil(t, o)
- }
+ o = objx.MSI()
+ assert.NotNil(t, o)
- o = MSI("name", "Tyler")
- if assert.NotNil(t, o) {
- if assert.NotNil(t, o) {
- assert.Equal(t, o["name"], "Tyler")
- }
+ o = objx.MSI("name", "Tyler")
+ require.NotNil(t, o)
+ assert.Equal(t, o["name"], "Tyler")
+
+ o = objx.MSI(1, "a")
+ assert.Nil(t, o)
+
+ o = objx.MSI("a")
+ assert.Nil(t, o)
+
+ o = objx.MSI("a", "b", "c")
+ assert.Nil(t, o)
+}
+
+func TestMapValure(t *testing.T) {
+ m := objx.Map{
+ "a": 1,
}
+ v := m.Value()
+ assert.Equal(t, m, v.ObjxMap())
}
func TestMapMustFromJSONWithError(t *testing.T) {
-
- _, err := FromJSON(`"name":"Mat"}`)
+ _, err := objx.FromJSON(`"name":"Mat"}`)
assert.Error(t, err)
-
}
func TestMapFromJSON(t *testing.T) {
+ o := objx.MustFromJSON(`{"name":"Mat"}`)
- o := MustFromJSON(`{"name":"Mat"}`)
-
- if assert.NotNil(t, o) {
- if assert.NotNil(t, o) {
- assert.Equal(t, "Mat", o["name"])
- }
- }
-
+ require.NotNil(t, o)
+ assert.Equal(t, "Mat", o["name"])
}
func TestMapFromJSONWithError(t *testing.T) {
-
- var m Map
+ var m objx.Map
assert.Panics(t, func() {
- m = MustFromJSON(`"name":"Mat"}`)
+ m = objx.MustFromJSON(`"name":"Mat"}`)
})
-
assert.Nil(t, m)
-
}
func TestMapFromBase64String(t *testing.T) {
-
base64String := "eyJuYW1lIjoiTWF0In0="
+ o, err := objx.FromBase64(base64String)
- o, err := FromBase64(base64String)
-
- if assert.NoError(t, err) {
- assert.Equal(t, o.Get("name").Str(), "Mat")
- }
-
- assert.Equal(t, MustFromBase64(base64String).Get("name").Str(), "Mat")
-
+ require.NoError(t, err)
+ assert.Equal(t, o.Get("name").Str(), "Mat")
+ assert.Equal(t, objx.MustFromBase64(base64String).Get("name").Str(), "Mat")
}
func TestMapFromBase64StringWithError(t *testing.T) {
-
base64String := "eyJuYW1lIjoiTWFasd0In0="
-
- _, err := FromBase64(base64String)
+ _, err := objx.FromBase64(base64String)
assert.Error(t, err)
-
assert.Panics(t, func() {
- MustFromBase64(base64String)
+ objx.MustFromBase64(base64String)
})
-
}
func TestMapFromSignedBase64String(t *testing.T) {
-
base64String := "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6"
- o, err := FromSignedBase64(base64String, "key")
-
- if assert.NoError(t, err) {
- assert.Equal(t, o.Get("name").Str(), "Mat")
- }
-
- assert.Equal(t, MustFromSignedBase64(base64String, "key").Get("name").Str(), "Mat")
+ o, err := objx.FromSignedBase64(base64String, "key")
+ require.NoError(t, err)
+ assert.Equal(t, o.Get("name").Str(), "Mat")
+ assert.Equal(t, objx.MustFromSignedBase64(base64String, "key").Get("name").Str(), "Mat")
}
func TestMapFromSignedBase64StringWithError(t *testing.T) {
-
base64String := "eyJuYW1lasdIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6"
-
- _, err := FromSignedBase64(base64String, "key")
-
+ _, err := objx.FromSignedBase64(base64String, "key")
assert.Error(t, err)
+ assert.Panics(t, func() {
+ objx.MustFromSignedBase64(base64String, "key")
+ })
+ base64String = "eyJuYW1lasdIjoiTWF0In0=67ee82916f90b2c0d68c903266e8998c9ef0c3d6"
+ _, err = objx.FromSignedBase64(base64String, "key")
+ assert.Error(t, err)
assert.Panics(t, func() {
- MustFromSignedBase64(base64String, "key")
+ objx.MustFromSignedBase64(base64String, "key")
})
+ base64String = "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6_junk"
+ _, err = objx.FromSignedBase64(base64String, "key")
+ assert.Error(t, err)
+ assert.Panics(t, func() {
+ objx.MustFromSignedBase64(base64String, "key")
+ })
}
func TestMapFromURLQuery(t *testing.T) {
+ m, err := objx.FromURLQuery("name=tyler&state=UT")
- m, err := FromURLQuery("name=tyler&state=UT")
- if assert.NoError(t, err) && assert.NotNil(t, m) {
- assert.Equal(t, "tyler", m.Get("name").Str())
- assert.Equal(t, "UT", m.Get("state").Str())
- }
+ assert.NoError(t, err)
+ require.NotNil(t, m)
+ assert.Equal(t, "tyler", m.Get("name").Str())
+ assert.Equal(t, "UT", m.Get("state").Str())
+}
+
+func TestMapMustFromURLQuery(t *testing.T) {
+ m := objx.MustFromURLQuery("name=tyler&state=UT")
+ require.NotNil(t, m)
+ assert.Equal(t, "tyler", m.Get("name").Str())
+ assert.Equal(t, "UT", m.Get("state").Str())
+}
+
+func TestMapFromURLQueryWithError(t *testing.T) {
+ m, err := objx.FromURLQuery("%")
+
+ assert.Error(t, err)
+ assert.Nil(t, m)
+ assert.Panics(t, func() {
+ objx.MustFromURLQuery("%")
+ })
}
diff --git a/vendor/github.com/stretchr/objx/mutations.go b/vendor/github.com/stretchr/objx/mutations.go
index b35c86392..c3400a3f7 100644
--- a/vendor/github.com/stretchr/objx/mutations.go
+++ b/vendor/github.com/stretchr/objx/mutations.go
@@ -2,32 +2,23 @@ package objx
// Exclude returns a new Map with the keys in the specified []string
// excluded.
-func (d Map) Exclude(exclude []string) Map {
-
+func (m Map) Exclude(exclude []string) Map {
excluded := make(Map)
- for k, v := range d {
- var shouldInclude bool = true
- for _, toExclude := range exclude {
- if k == toExclude {
- shouldInclude = false
- break
- }
- }
- if shouldInclude {
+ for k, v := range m {
+ if !contains(exclude, k) {
excluded[k] = v
}
}
-
return excluded
}
// Copy creates a shallow copy of the Obj.
func (m Map) Copy() Map {
- copied := make(map[string]interface{})
+ copied := Map{}
for k, v := range m {
copied[k] = v
}
- return New(copied)
+ return copied
}
// Merge blends the specified map with a copy of this map and returns the result.
@@ -38,31 +29,28 @@ func (m Map) Merge(merge Map) Map {
return m.Copy().MergeHere(merge)
}
-// Merge blends the specified map with this map and returns the current map.
+// MergeHere blends the specified map with this map and returns the current map.
//
-// Keys that appear in both will be selected from the specified map. The original map
+// Keys that appear in both will be selected from the specified map. The original map
// will be modified. This method requires that
// the wrapped object be a map[string]interface{}
func (m Map) MergeHere(merge Map) Map {
-
for k, v := range merge {
m[k] = v
}
-
return m
-
}
// Transform builds a new Obj giving the transformer a chance
// to change the keys and values as it goes. This method requires that
// the wrapped object be a map[string]interface{}
func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map {
- newMap := make(map[string]interface{})
+ newMap := Map{}
for k, v := range m {
modifiedKey, modifiedVal := transformer(k, v)
newMap[modifiedKey] = modifiedVal
}
- return New(newMap)
+ return newMap
}
// TransformKeys builds a new map using the specified key mapping.
@@ -71,11 +59,19 @@ func (m Map) Transform(transformer func(key string, value interface{}) (string,
// This method requires that the wrapped object be a map[string]interface{}
func (m Map) TransformKeys(mapping map[string]string) Map {
return m.Transform(func(key string, value interface{}) (string, interface{}) {
-
if newKey, ok := mapping[key]; ok {
return newKey, value
}
-
return key, value
})
}
+
+// Checks if a string slice contains a string
+func contains(s []string, e string) bool {
+ for _, a := range s {
+ if a == e {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/stretchr/objx/mutations_test.go b/vendor/github.com/stretchr/objx/mutations_test.go
index e20ee23bc..40901ceba 100644
--- a/vendor/github.com/stretchr/objx/mutations_test.go
+++ b/vendor/github.com/stretchr/objx/mutations_test.go
@@ -1,77 +1,106 @@
-package objx
+package objx_test
import (
- "github.com/stretchr/testify/assert"
+ "strings"
"testing"
+
+ "github.com/stretchr/objx"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestExclude(t *testing.T) {
+ m := objx.Map{
+ "name": "Mat",
+ "age": 29,
+ "secret": "ABC",
+ }
- d := make(Map)
- d["name"] = "Mat"
- d["age"] = 29
- d["secret"] = "ABC"
+ excluded := m.Exclude([]string{"secret"})
- excluded := d.Exclude([]string{"secret"})
-
- assert.Equal(t, d["name"], excluded["name"])
- assert.Equal(t, d["age"], excluded["age"])
+ assert.Equal(t, m["name"], excluded["name"])
+ assert.Equal(t, m["age"], excluded["age"])
assert.False(t, excluded.Has("secret"), "secret should be excluded")
-
}
func TestCopy(t *testing.T) {
+ m1 := objx.Map{
+ "name": "Tyler",
+ "location": "UT",
+ }
- d1 := make(map[string]interface{})
- d1["name"] = "Tyler"
- d1["location"] = "UT"
+ m2 := m1.Copy()
+ require.NotNil(t, m2)
+ m2["name"] = "Mat"
- d1Obj := New(d1)
- d2Obj := d1Obj.Copy()
-
- d2Obj["name"] = "Mat"
-
- assert.Equal(t, d1Obj.Get("name").Str(), "Tyler")
- assert.Equal(t, d2Obj.Get("name").Str(), "Mat")
+ assert.Equal(t, m1.Get("name").Str(), "Tyler")
+ assert.Equal(t, m2.Get("name").Str(), "Mat")
}
func TestMerge(t *testing.T) {
-
- d := make(map[string]interface{})
- d["name"] = "Mat"
-
- d1 := make(map[string]interface{})
- d1["name"] = "Tyler"
- d1["location"] = "UT"
-
- dObj := New(d)
- d1Obj := New(d1)
-
- merged := dObj.Merge(d1Obj)
-
- assert.Equal(t, merged.Get("name").Str(), d1Obj.Get("name").Str())
- assert.Equal(t, merged.Get("location").Str(), d1Obj.Get("location").Str())
- assert.Empty(t, dObj.Get("location").Str())
-
+ m1 := objx.Map{
+ "name": "Mat",
+ }
+ m2 := objx.Map{
+ "name": "Tyler",
+ "location": "UT",
+ }
+
+ merged := m1.Merge(m2)
+
+ assert.Equal(t, merged.Get("name").Str(), m2.Get("name").Str())
+ assert.Equal(t, merged.Get("location").Str(), m2.Get("location").Str())
+ assert.Empty(t, m1.Get("location").Str())
}
func TestMergeHere(t *testing.T) {
+ m1 := objx.Map{
+ "name": "Mat",
+ }
+ m2 := objx.Map{
+ "name": "Tyler",
+ "location": "UT",
+ }
+
+ merged := m1.MergeHere(m2)
+
+ assert.Equal(t, m1, merged, "With MergeHere, it should return the first modified map")
+ assert.Equal(t, merged.Get("name").Str(), m2.Get("name").Str())
+ assert.Equal(t, merged.Get("location").Str(), m2.Get("location").Str())
+ assert.Equal(t, merged.Get("location").Str(), m1.Get("location").Str())
+}
- d := make(map[string]interface{})
- d["name"] = "Mat"
-
- d1 := make(map[string]interface{})
- d1["name"] = "Tyler"
- d1["location"] = "UT"
-
- dObj := New(d)
- d1Obj := New(d1)
+func TestTransform(t *testing.T) {
+ m := objx.Map{
+ "name": "Mat",
+ "location": "UK",
+ }
+ r := m.Transform(keyToUpper)
+ assert.Equal(t, objx.Map{
+ "NAME": "Mat",
+ "LOCATION": "UK",
+ }, r)
+}
- merged := dObj.MergeHere(d1Obj)
+func TestTransformKeys(t *testing.T) {
+ m := objx.Map{
+ "a": "1",
+ "b": "2",
+ "c": "3",
+ }
+ mapping := map[string]string{
+ "a": "d",
+ "b": "e",
+ }
+ r := m.TransformKeys(mapping)
+ assert.Equal(t, objx.Map{
+ "c": "3",
+ "d": "1",
+ "e": "2",
+ }, r)
+}
- assert.Equal(t, dObj, merged, "With MergeHere, it should return the first modified map")
- assert.Equal(t, merged.Get("name").Str(), d1Obj.Get("name").Str())
- assert.Equal(t, merged.Get("location").Str(), d1Obj.Get("location").Str())
- assert.Equal(t, merged.Get("location").Str(), dObj.Get("location").Str())
+func keyToUpper(s string, v interface{}) (string, interface{}) {
+ return strings.ToUpper(s), v
}
diff --git a/vendor/github.com/stretchr/objx/security.go b/vendor/github.com/stretchr/objx/security.go
index fdd6be9cf..692be8e2a 100644
--- a/vendor/github.com/stretchr/objx/security.go
+++ b/vendor/github.com/stretchr/objx/security.go
@@ -5,10 +5,8 @@ import (
"encoding/hex"
)
-// HashWithKey hashes the specified string using the security
-// key.
+// HashWithKey hashes the specified string using the security key
func HashWithKey(data, key string) string {
- hash := sha1.New()
- hash.Write([]byte(data + ":" + key))
- return hex.EncodeToString(hash.Sum(nil))
+ d := sha1.Sum([]byte(data + ":" + key))
+ return hex.EncodeToString(d[:])
}
diff --git a/vendor/github.com/stretchr/objx/security_test.go b/vendor/github.com/stretchr/objx/security_test.go
index 8f0898f62..8c623db91 100644
--- a/vendor/github.com/stretchr/objx/security_test.go
+++ b/vendor/github.com/stretchr/objx/security_test.go
@@ -1,12 +1,12 @@
-package objx
+package objx_test
import (
- "github.com/stretchr/testify/assert"
"testing"
+
+ "github.com/stretchr/objx"
+ "github.com/stretchr/testify/assert"
)
func TestHashWithKey(t *testing.T) {
-
- assert.Equal(t, "0ce84d8d01f2c7b6e0882b784429c54d280ea2d9", HashWithKey("abc", "def"))
-
+ assert.Equal(t, "0ce84d8d01f2c7b6e0882b784429c54d280ea2d9", objx.HashWithKey("abc", "def"))
}
diff --git a/vendor/github.com/stretchr/objx/simple_example_test.go b/vendor/github.com/stretchr/objx/simple_example_test.go
index 5408c7fd3..403753d65 100644
--- a/vendor/github.com/stretchr/objx/simple_example_test.go
+++ b/vendor/github.com/stretchr/objx/simple_example_test.go
@@ -1,21 +1,23 @@
-package objx
+package objx_test
import (
- "github.com/stretchr/testify/assert"
"testing"
+
+ "github.com/stretchr/objx"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestSimpleExample(t *testing.T) {
-
// build a map from a JSON object
- o := MustFromJSON(`{"name":"Mat","foods":["indian","chinese"], "location":{"county":"hobbiton","city":"the shire"}}`)
+ o := objx.MustFromJSON(`{"name":"Mat","foods":["indian","chinese"], "location":{"county":"hobbiton","city":"the shire"}}`)
// Map can be used as a straight map[string]interface{}
assert.Equal(t, o["name"], "Mat")
// Get an Value object
v := o.Get("name")
- assert.Equal(t, v, &Value{data: "Mat"})
+ require.NotNil(t, v)
// Test the contained value
assert.False(t, v.IsInt())
@@ -37,5 +39,4 @@ func TestSimpleExample(t *testing.T) {
// Get a value by using dot notation
assert.Equal(t, "hobbiton", o.Get("location.county").Str())
-
}
diff --git a/vendor/github.com/stretchr/objx/tests_test.go b/vendor/github.com/stretchr/objx/tests_test.go
index bcc1eb03d..94a8adaf6 100644
--- a/vendor/github.com/stretchr/objx/tests_test.go
+++ b/vendor/github.com/stretchr/objx/tests_test.go
@@ -1,13 +1,14 @@
-package objx
+package objx_test
import (
- "github.com/stretchr/testify/assert"
"testing"
+
+ "github.com/stretchr/objx"
+ "github.com/stretchr/testify/assert"
)
func TestHas(t *testing.T) {
-
- m := New(TestMap)
+ m := objx.Map(TestMap)
assert.True(t, m.Has("name"))
assert.True(t, m.Has("address.state"))
@@ -19,6 +20,6 @@ func TestHas(t *testing.T) {
assert.False(t, m.Has("numbers[5]"))
m = nil
- assert.False(t, m.Has("nothing"))
+ assert.False(t, m.Has("nothing"))
}
diff --git a/vendor/github.com/stretchr/objx/type_specific_codegen.go b/vendor/github.com/stretchr/objx/type_specific_codegen.go
index f3ecb29b9..202a91f8c 100644
--- a/vendor/github.com/stretchr/objx/type_specific_codegen.go
+++ b/vendor/github.com/stretchr/objx/type_specific_codegen.go
@@ -2,7 +2,6 @@ package objx
/*
Inter (interface{} and []interface{})
- --------------------------------------------------
*/
// Inter gets the value as a interface{}, returns the optionalDefault
@@ -60,44 +59,35 @@ func (v *Value) IsInterSlice() bool {
//
// Panics if the object is the wrong type.
func (v *Value) EachInter(callback func(int, interface{}) bool) *Value {
-
for index, val := range v.MustInterSlice() {
carryon := callback(index, val)
- if carryon == false {
+ if !carryon {
break
}
}
-
return v
-
}
// WhereInter uses the specified decider function to select items
// from the []interface{}. The object contained in the result will contain
// only the selected items.
func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value {
-
var selected []interface{}
-
v.EachInter(func(index int, val interface{}) bool {
shouldSelect := decider(index, val)
- if shouldSelect == false {
+ if !shouldSelect {
selected = append(selected, val)
}
return true
})
-
return &Value{data: selected}
-
}
// GroupInter uses the specified grouper function to group the items
// keyed by the return of the grouper. The object contained in the
// result will contain a map[string][]interface{}.
func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value {
-
groups := make(map[string][]interface{})
-
v.EachInter(func(index int, val interface{}) bool {
group := grouper(index, val)
if _, ok := groups[group]; !ok {
@@ -106,47 +96,37 @@ func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value {
groups[group] = append(groups[group], val)
return true
})
-
return &Value{data: groups}
-
}
// ReplaceInter uses the specified function to replace each interface{}s
// by iterating each item. The data in the returned result will be a
// []interface{} containing the replaced items.
func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value {
-
arr := v.MustInterSlice()
replaced := make([]interface{}, len(arr))
-
v.EachInter(func(index int, val interface{}) bool {
replaced[index] = replacer(index, val)
return true
})
-
return &Value{data: replaced}
-
}
// CollectInter uses the specified collector function to collect a value
// for each of the interface{}s in the slice. The data returned will be a
// []interface{}.
func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value {
-
arr := v.MustInterSlice()
collected := make([]interface{}, len(arr))
-
v.EachInter(func(index int, val interface{}) bool {
collected[index] = collector(index, val)
return true
})
-
return &Value{data: collected}
}
/*
MSI (map[string]interface{} and []map[string]interface{})
- --------------------------------------------------
*/
// MSI gets the value as a map[string]interface{}, returns the optionalDefault
@@ -204,44 +184,35 @@ func (v *Value) IsMSISlice() bool {
//
// Panics if the object is the wrong type.
func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value {
-
for index, val := range v.MustMSISlice() {
carryon := callback(index, val)
- if carryon == false {
+ if !carryon {
break
}
}
-
return v
-
}
// WhereMSI uses the specified decider function to select items
// from the []map[string]interface{}. The object contained in the result will contain
// only the selected items.
func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value {
-
var selected []map[string]interface{}
-
v.EachMSI(func(index int, val map[string]interface{}) bool {
shouldSelect := decider(index, val)
- if shouldSelect == false {
+ if !shouldSelect {
selected = append(selected, val)
}
return true
})
-
return &Value{data: selected}
-
}
// GroupMSI uses the specified grouper function to group the items
// keyed by the return of the grouper. The object contained in the
// result will contain a map[string][]map[string]interface{}.
func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value {
-
groups := make(map[string][]map[string]interface{})
-
v.EachMSI(func(index int, val map[string]interface{}) bool {
group := grouper(index, val)
if _, ok := groups[group]; !ok {
@@ -250,47 +221,37 @@ func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Valu
groups[group] = append(groups[group], val)
return true
})
-
return &Value{data: groups}
-
}
// ReplaceMSI uses the specified function to replace each map[string]interface{}s
// by iterating each item. The data in the returned result will be a
// []map[string]interface{} containing the replaced items.
func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value {
-
arr := v.MustMSISlice()
replaced := make([]map[string]interface{}, len(arr))
-
v.EachMSI(func(index int, val map[string]interface{}) bool {
replaced[index] = replacer(index, val)
return true
})
-
return &Value{data: replaced}
-
}
// CollectMSI uses the specified collector function to collect a value
// for each of the map[string]interface{}s in the slice. The data returned will be a
// []interface{}.
func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value {
-
arr := v.MustMSISlice()
collected := make([]interface{}, len(arr))
-
v.EachMSI(func(index int, val map[string]interface{}) bool {
collected[index] = collector(index, val)
return true
})
-
return &Value{data: collected}
}
/*
ObjxMap ((Map) and [](Map))
- --------------------------------------------------
*/
// ObjxMap gets the value as a (Map), returns the optionalDefault
@@ -348,44 +309,35 @@ func (v *Value) IsObjxMapSlice() bool {
//
// Panics if the object is the wrong type.
func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value {
-
for index, val := range v.MustObjxMapSlice() {
carryon := callback(index, val)
- if carryon == false {
+ if !carryon {
break
}
}
-
return v
-
}
// WhereObjxMap uses the specified decider function to select items
// from the [](Map). The object contained in the result will contain
// only the selected items.
func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value {
-
var selected [](Map)
-
v.EachObjxMap(func(index int, val Map) bool {
shouldSelect := decider(index, val)
- if shouldSelect == false {
+ if !shouldSelect {
selected = append(selected, val)
}
return true
})
-
return &Value{data: selected}
-
}
// GroupObjxMap uses the specified grouper function to group the items
// keyed by the return of the grouper. The object contained in the
// result will contain a map[string][](Map).
func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value {
-
groups := make(map[string][](Map))
-
v.EachObjxMap(func(index int, val Map) bool {
group := grouper(index, val)
if _, ok := groups[group]; !ok {
@@ -394,47 +346,37 @@ func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value {
groups[group] = append(groups[group], val)
return true
})
-
return &Value{data: groups}
-
}
// ReplaceObjxMap uses the specified function to replace each (Map)s
// by iterating each item. The data in the returned result will be a
// [](Map) containing the replaced items.
func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value {
-
arr := v.MustObjxMapSlice()
replaced := make([](Map), len(arr))
-
v.EachObjxMap(func(index int, val Map) bool {
replaced[index] = replacer(index, val)
return true
})
-
return &Value{data: replaced}
-
}
// CollectObjxMap uses the specified collector function to collect a value
// for each of the (Map)s in the slice. The data returned will be a
// []interface{}.
func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value {
-
arr := v.MustObjxMapSlice()
collected := make([]interface{}, len(arr))
-
v.EachObjxMap(func(index int, val Map) bool {
collected[index] = collector(index, val)
return true
})
-
return &Value{data: collected}
}
/*
Bool (bool and []bool)
- --------------------------------------------------
*/
// Bool gets the value as a bool, returns the optionalDefault
@@ -492,44 +434,35 @@ func (v *Value) IsBoolSlice() bool {
//
// Panics if the object is the wrong type.
func (v *Value) EachBool(callback func(int, bool) bool) *Value {
-
for index, val := range v.MustBoolSlice() {
carryon := callback(index, val)
- if carryon == false {
+ if !carryon {
break
}
}
-
return v
-
}
// WhereBool uses the specified decider function to select items
// from the []bool. The object contained in the result will contain
// only the selected items.
func (v *Value) WhereBool(decider func(int, bool) bool) *Value {
-
var selected []bool
-
v.EachBool(func(index int, val bool) bool {
shouldSelect := decider(index, val)
- if shouldSelect == false {
+ if !shouldSelect {
selected = append(selected, val)
}
return true
})
-
return &Value{data: selected}
-
}
// GroupBool uses the specified grouper function to group the items
// keyed by the return of the grouper. The object contained in the
// result will contain a map[string][]bool.
func (v *Value) GroupBool(grouper func(int, bool) string) *Value {
-
groups := make(map[string][]bool)
-
v.EachBool(func(index int, val bool) bool {
group := grouper(index, val)
if _, ok := groups[group]; !ok {
@@ -538,47 +471,37 @@ func (v *Value) GroupBool(grouper func(int, bool) string) *Value {
groups[group] = append(groups[group], val)
return true
})
-
return &Value{data: groups}
-
}
// ReplaceBool uses the specified function to replace each bools
// by iterating each item. The data in the returned result will be a
// []bool containing the replaced items.
func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value {
-
arr := v.MustBoolSlice()
replaced := make([]bool, len(arr))
-
v.EachBool(func(index int, val bool) bool {
replaced[index] = replacer(index, val)
return true
})
-
return &Value{data: replaced}
-
}
// CollectBool uses the specified collector function to collect a value
// for each of the bools in the slice. The data returned will be a
// []interface{}.
func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value {
-
arr := v.MustBoolSlice()
collected := make([]interface{}, len(arr))
-
v.EachBool(func(index int, val bool) bool {
collected[index] = collector(index, val)
return true
})
-
return &Value{data: collected}
}
/*
Str (string and []string)
- --------------------------------------------------
*/
// Str gets the value as a string, returns the optionalDefault
@@ -636,44 +559,35 @@ func (v *Value) IsStrSlice() bool {
//
// Panics if the object is the wrong type.
func (v *Value) EachStr(callback func(int, string) bool) *Value {
-
for index, val := range v.MustStrSlice() {
carryon := callback(index, val)
- if carryon == false {
+ if !carryon {
break
}
}
-
return v
-
}
// WhereStr uses the specified decider function to select items
// from the []string. The object contained in the result will contain
// only the selected items.
func (v *Value) WhereStr(decider func(int, string) bool) *Value {
-
var selected []string
-
v.EachStr(func(index int, val string) bool {
shouldSelect := decider(index, val)
- if shouldSelect == false {
+ if !shouldSelect {
selected = append(selected, val)
}
return true
})
-
return &Value{data: selected}
-
}
// GroupStr uses the specified grouper function to group the items
// keyed by the return of the grouper. The object contained in the
// result will contain a map[string][]string.
func (v *Value) GroupStr(grouper func(int, string) string) *Value {
-
groups := make(map[string][]string)
-
v.EachStr(func(index int, val string) bool {
group := grouper(index, val)
if _, ok := groups[group]; !ok {
@@ -682,47 +596,37 @@ func (v *Value) GroupStr(grouper func(int, string) string) *Value {
groups[group] = append(groups[group], val)
return true
})
-
return &Value{data: groups}
-
}
// ReplaceStr uses the specified function to replace each strings
// by iterating each item. The data in the returned result will be a
// []string containing the replaced items.
func (v *Value) ReplaceStr(replacer func(int, string) string) *Value {
-
arr := v.MustStrSlice()
replaced := make([]string, len(arr))
-
v.EachStr(func(index int, val string) bool {
replaced[index] = replacer(index, val)
return true
})
-
return &Value{data: replaced}
-
}
// CollectStr uses the specified collector function to collect a value
// for each of the strings in the slice. The data returned will be a
// []interface{}.
func (v *Value) CollectStr(collector func(int, string) interface{}) *Value {
-
arr := v.MustStrSlice()
collected := make([]interface{}, len(arr))
-
v.EachStr(func(index int, val string) bool {
collected[index] = collector(index, val)
return true
})
-
return &Value{data: collected}
}
/*
Int (int and []int)
- --------------------------------------------------
*/
// Int gets the value as a int, returns the optionalDefault
@@ -780,44 +684,35 @@ func (v *Value) IsIntSlice() bool {
//
// Panics if the object is the wrong type.
func (v *Value) EachInt(callback func(int, int) bool) *Value {
-
for index, val := range v.MustIntSlice() {
carryon := callback(index, val)
- if carryon == false {
+ if !carryon {
break
}
}
-
return v
-
}
// WhereInt uses the specified decider function to select items
// from the []int. The object contained in the result will contain
// only the selected items.
func (v *Value) WhereInt(decider func(int, int) bool) *Value {
-
var selected []int
-
v.EachInt(func(index int, val int) bool {
shouldSelect := decider(index, val)
- if shouldSelect == false {
+ if !shouldSelect {
selected = append(selected, val)
}
return true
})
-
return &Value{data: selected}
-
}
// GroupInt uses the specified grouper function to group the items
// keyed by the return of the grouper. The object contained in the
// result will contain a map[string][]int.
func (v *Value) GroupInt(grouper func(int, int) string) *Value {
-
groups := make(map[string][]int)
-
v.EachInt(func(index int, val int) bool {
group := grouper(index, val)
if _, ok := groups[group]; !ok {
@@ -826,47 +721,37 @@ func (v *Value) GroupInt(grouper func(int, int) string) *Value {
groups[group] = append(groups[group], val)
return true
})
-
return &Value{data: groups}
-
}
// ReplaceInt uses the specified function to replace each ints
// by iterating each item. The data in the returned result will be a
// []int containing the replaced items.
func (v *Value) ReplaceInt(replacer func(int, int) int) *Value {
-
arr := v.MustIntSlice()
replaced := make([]int, len(arr))
-
v.EachInt(func(index int, val int) bool {
replaced[index] = replacer(index, val)
return true
})
-
return &Value{data: replaced}
-
}
// CollectInt uses the specified collector function to collect a value
// for each of the ints in the slice. The data returned will be a
// []interface{}.
func (v *Value) CollectInt(collector func(int, int) interface{}) *Value {
-
arr := v.MustIntSlice()
collected := make([]interface{}, len(arr))
-
v.EachInt(func(index int, val int) bool {
collected[index] = collector(index, val)
return true
})
-
return &Value{data: collected}
}
/*
Int8 (int8 and []int8)
- --------------------------------------------------
*/
// Int8 gets the value as a int8, returns the optionalDefault
@@ -924,44 +809,35 @@ func (v *Value) IsInt8Slice() bool {
//
// Panics if the object is the wrong type.
func (v *Value) EachInt8(callback func(int, int8) bool) *Value {
-
for index, val := range v.MustInt8Slice() {
carryon := callback(index, val)
- if carryon == false {
+ if !carryon {
break
}
}
-
return v
-
}
// WhereInt8 uses the specified decider function to select items
// from the []int8. The object contained in the result will contain
// only the selected items.
func (v *Value) WhereInt8(decider func(int, int8) bool) *Value {
-
var selected []int8
-
v.EachInt8(func(index int, val int8) bool {
shouldSelect := decider(index, val)
- if shouldSelect == false {
+ if !shouldSelect {
selected = append(selected, val)
}
return true
})
-
return &Value{data: selected}
-
}
// GroupInt8 uses the specified grouper function to group the items
// keyed by the return of the grouper. The object contained in the
// result will contain a map[string][]int8.
func (v *Value) GroupInt8(grouper func(int, int8) string) *Value {
-
groups := make(map[string][]int8)
-
v.EachInt8(func(index int, val int8) bool {
group := grouper(index, val)
if _, ok := groups[group]; !ok {
@@ -970,47 +846,37 @@ func (v *Value) GroupInt8(grouper func(int, int8) string) *Value {
groups[group] = append(groups[group], val)
return true
})
-
return &Value{data: groups}
-
}
// ReplaceInt8 uses the specified function to replace each int8s
// by iterating each item. The data in the returned result will be a
// []int8 containing the replaced items.
func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value {
-
arr := v.MustInt8Slice()
replaced := make([]int8, len(arr))
-
v.EachInt8(func(index int, val int8) bool {
replaced[index] = replacer(index, val)
return true
})
-
return &Value{data: replaced}
-
}
// CollectInt8 uses the specified collector function to collect a value
// for each of the int8s in the slice. The data returned will be a
// []interface{}.
func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value {
-
arr := v.MustInt8Slice()
collected := make([]interface{}, len(arr))
-
v.EachInt8(func(index int, val int8) bool {
collected[index] = collector(index, val)
return true
})
-
return &Value{data: collected}
}
/*
Int16 (int16 and []int16)
- --------------------------------------------------
*/
// Int16 gets the value as a int16, returns the optionalDefault
@@ -1068,44 +934,35 @@ func (v *Value) IsInt16Slice() bool {
//
// Panics if the object is the wrong type.
func (v *Value) EachInt16(callback func(int, int16) bool) *Value {
-
for index, val := range v.MustInt16Slice() {
carryon := callback(index, val)
- if carryon == false {
+ if !carryon {
break
}
}
-
return v
-
}
// WhereInt16 uses the specified decider function to select items
// from the []int16. The object contained in the result will contain
// only the selected items.
func (v *Value) WhereInt16(decider func(int, int16) bool) *Value {
-
var selected []int16
-
v.EachInt16(func(index int, val int16) bool {
shouldSelect := decider(index, val)
- if shouldSelect == false {
+ if !shouldSelect {
selected = append(selected, val)
}
return true
})
-
return &Value{data: selected}
-
}
// GroupInt16 uses the specified grouper function to group the items
// keyed by the return of the grouper. The object contained in the
// result will contain a map[string][]int16.
func (v *Value) GroupInt16(grouper func(int, int16) string) *Value {
-
groups := make(map[string][]int16)
-
v.EachInt16(func(index int, val int16) bool {
group := grouper(index, val)
if _, ok := groups[group]; !ok {
@@ -1114,47 +971,37 @@ func (v *Value) GroupInt16(grouper func(int, int16) string) *Value {
groups[group] = append(groups[group], val)
return true
})
-
return &Value{data: groups}
-
}
// ReplaceInt16 uses the specified function to replace each int16s
// by iterating each item. The data in the returned result will be a
// []int16 containing the replaced items.
func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value {
-
arr := v.MustInt16Slice()
replaced := make([]int16, len(arr))
-
v.EachInt16(func(index int, val int16) bool {
replaced[index] = replacer(index, val)
return true
})
-
return &Value{data: replaced}
-
}
// CollectInt16 uses the specified collector function to collect a value
// for each of the int16s in the slice. The data returned will be a
// []interface{}.
func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value {
-
arr := v.MustInt16Slice()
collected := make([]interface{}, len(arr))
-
v.EachInt16(func(index int, val int16) bool {
collected[index] = collector(index, val)
return true
})
-
return &Value{data: collected}
}
/*
Int32 (int32 and []int32)
- --------------------------------------------------
*/
// Int32 gets the value as a int32, returns the optionalDefault
@@ -1212,44 +1059,35 @@ func (v *Value) IsInt32Slice() bool {
//
// Panics if the object is the wrong type.
func (v *Value) EachInt32(callback func(int, int32) bool) *Value {
-
for index, val := range v.MustInt32Slice() {
carryon := callback(index, val)
- if carryon == false {
+ if !carryon {
break
}
}
-
return v
-
}
// WhereInt32 uses the specified decider function to select items
// from the []int32. The object contained in the result will contain
// only the selected items.
func (v *Value) WhereInt32(decider func(int, int32) bool) *Value {
-
var selected []int32
-
v.EachInt32(func(index int, val int32) bool {
shouldSelect := decider(index, val)
- if shouldSelect == false {
+ if !shouldSelect {
selected = append(selected, val)
}
return true
})
-
return &Value{data: selected}
-
}
// GroupInt32 uses the specified grouper function to group the items
// keyed by the return of the grouper. The object contained in the
// result will contain a map[string][]int32.
func (v *Value) GroupInt32(grouper func(int, int32) string) *Value {
-
groups := make(map[string][]int32)
-
v.EachInt32(func(index int, val int32) bool {
group := grouper(index, val)
if _, ok := groups[group]; !ok {
@@ -1258,47 +1096,37 @@ func (v *Value) GroupInt32(grouper func(int, int32) string) *Value {
groups[group] = append(groups[group], val)
return true
})
-
return &Value{data: groups}
-
}
// ReplaceInt32 uses the specified function to replace each int32s
// by iterating each item. The data in the returned result will be a
// []int32 containing the replaced items.
func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value {
-
arr := v.MustInt32Slice()
replaced := make([]int32, len(arr))
-
v.EachInt32(func(index int, val int32) bool {
replaced[index] = replacer(index, val)
return true
})
-
return &Value{data: replaced}
-
}
// CollectInt32 uses the specified collector function to collect a value
// for each of the int32s in the slice. The data returned will be a
// []interface{}.
func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value {
-
arr := v.MustInt32Slice()
collected := make([]interface{}, len(arr))
-
v.EachInt32(func(index int, val int32) bool {
collected[index] = collector(index, val)
return true
})
-
return &Value{data: collected}
}
/*
Int64 (int64 and []int64)
- --------------------------------------------------
*/
// Int64 gets the value as a int64, returns the optionalDefault
@@ -1356,44 +1184,35 @@ func (v *Value) IsInt64Slice() bool {
//
// Panics if the object is the wrong type.
func (v *Value) EachInt64(callback func(int, int64) bool) *Value {
-
for index, val := range v.MustInt64Slice() {
carryon := callback(index, val)
- if carryon == false {
+ if !carryon {
break
}
}
-
return v
-
}
// WhereInt64 uses the specified decider function to select items
// from the []int64. The object contained in the result will contain
// only the selected items.
func (v *Value) WhereInt64(decider func(int, int64) bool) *Value {
-
var selected []int64
-
v.EachInt64(func(index int, val int64) bool {
shouldSelect := decider(index, val)
- if shouldSelect == false {
+ if !shouldSelect {
selected = append(selected, val)
}
return true
})
-
return &Value{data: selected}
-
}
// GroupInt64 uses the specified grouper function to group the items
// keyed by the return of the grouper. The object contained in the
// result will contain a map[string][]int64.
func (v *Value) GroupInt64(grouper func(int, int64) string) *Value {
-
groups := make(map[string][]int64)
-
v.EachInt64(func(index int, val int64) bool {
group := grouper(index, val)
if _, ok := groups[group]; !ok {
@@ -1402,47 +1221,37 @@ func (v *Value) GroupInt64(grouper func(int, int64) string) *Value {
groups[group] = append(groups[group], val)
return true
})
-
return &Value{data: groups}
-
}
// ReplaceInt64 uses the specified function to replace each int64s
// by iterating each item. The data in the returned result will be a
// []int64 containing the replaced items.
func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value {
-
arr := v.MustInt64Slice()
replaced := make([]int64, len(arr))
-
v.EachInt64(func(index int, val int64) bool {
replaced[index] = replacer(index, val)
return true
})
-
return &Value{data: replaced}
-
}
// CollectInt64 uses the specified collector function to collect a value
// for each of the int64s in the slice. The data returned will be a
// []interface{}.
func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value {
-
arr := v.MustInt64Slice()
collected := make([]interface{}, len(arr))
-
v.EachInt64(func(index int, val int64) bool {
collected[index] = collector(index, val)
return true
})
-
return &Value{data: collected}
}
/*
Uint (uint and []uint)
- --------------------------------------------------
*/
// Uint gets the value as a uint, returns the optionalDefault
@@ -1500,44 +1309,35 @@ func (v *Value) IsUintSlice() bool {
//
// Panics if the object is the wrong type.
func (v *Value) EachUint(callback func(int, uint) bool) *Value {
-
for index, val := range v.MustUintSlice() {
carryon := callback(index, val)
- if carryon == false {
+ if !carryon {
break
}
}
-
return v
-
}
// WhereUint uses the specified decider function to select items
// from the []uint. The object contained in the result will contain
// only the selected items.
func (v *Value) WhereUint(decider func(int, uint) bool) *Value {
-
var selected []uint
-
v.EachUint(func(index int, val uint) bool {
shouldSelect := decider(index, val)
- if shouldSelect == false {
+ if !shouldSelect {
selected = append(selected, val)
}
return true
})
-
return &Value{data: selected}
-
}
// GroupUint uses the specified grouper function to group the items
// keyed by the return of the grouper. The object contained in the
// result will contain a map[string][]uint.
func (v *Value) GroupUint(grouper func(int, uint) string) *Value {
-
groups := make(map[string][]uint)
-
v.EachUint(func(index int, val uint) bool {
group := grouper(index, val)
if _, ok := groups[group]; !ok {
@@ -1546,47 +1346,37 @@ func (v *Value) GroupUint(grouper func(int, uint) string) *Value {
groups[group] = append(groups[group], val)
return true
})
-
return &Value{data: groups}
-
}
// ReplaceUint uses the specified function to replace each uints
// by iterating each item. The data in the returned result will be a
// []uint containing the replaced items.
func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value {
-
arr := v.MustUintSlice()
replaced := make([]uint, len(arr))
-
v.EachUint(func(index int, val uint) bool {
replaced[index] = replacer(index, val)
return true
})
-
return &Value{data: replaced}
-
}
// CollectUint uses the specified collector function to collect a value
// for each of the uints in the slice. The data returned will be a
// []interface{}.
func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value {
-
arr := v.MustUintSlice()
collected := make([]interface{}, len(arr))
-
v.EachUint(func(index int, val uint) bool {
collected[index] = collector(index, val)
return true
})
-
return &Value{data: collected}
}
/*
Uint8 (uint8 and []uint8)
- --------------------------------------------------
*/
// Uint8 gets the value as a uint8, returns the optionalDefault
@@ -1644,44 +1434,35 @@ func (v *Value) IsUint8Slice() bool {
//
// Panics if the object is the wrong type.
func (v *Value) EachUint8(callback func(int, uint8) bool) *Value {
-
for index, val := range v.MustUint8Slice() {
carryon := callback(index, val)
- if carryon == false {
+ if !carryon {
break
}
}
-
return v
-
}
// WhereUint8 uses the specified decider function to select items
// from the []uint8. The object contained in the result will contain
// only the selected items.
func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value {
-
var selected []uint8
-
v.EachUint8(func(index int, val uint8) bool {
shouldSelect := decider(index, val)
- if shouldSelect == false {
+ if !shouldSelect {
selected = append(selected, val)
}
return true
})
-
return &Value{data: selected}
-
}
// GroupUint8 uses the specified grouper function to group the items
// keyed by the return of the grouper. The object contained in the
// result will contain a map[string][]uint8.
func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value {
-
groups := make(map[string][]uint8)
-
v.EachUint8(func(index int, val uint8) bool {
group := grouper(index, val)
if _, ok := groups[group]; !ok {
@@ -1690,47 +1471,37 @@ func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value {
groups[group] = append(groups[group], val)
return true
})
-
return &Value{data: groups}
-
}
// ReplaceUint8 uses the specified function to replace each uint8s
// by iterating each item. The data in the returned result will be a
// []uint8 containing the replaced items.
func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value {
-
arr := v.MustUint8Slice()
replaced := make([]uint8, len(arr))
-
v.EachUint8(func(index int, val uint8) bool {
replaced[index] = replacer(index, val)
return true
})
-
return &Value{data: replaced}
-
}
// CollectUint8 uses the specified collector function to collect a value
// for each of the uint8s in the slice. The data returned will be a
// []interface{}.
func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value {
-
arr := v.MustUint8Slice()
collected := make([]interface{}, len(arr))
-
v.EachUint8(func(index int, val uint8) bool {
collected[index] = collector(index, val)
return true
})
-
return &Value{data: collected}
}
/*
Uint16 (uint16 and []uint16)
- --------------------------------------------------
*/
// Uint16 gets the value as a uint16, returns the optionalDefault
@@ -1788,44 +1559,35 @@ func (v *Value) IsUint16Slice() bool {
//
// Panics if the object is the wrong type.
func (v *Value) EachUint16(callback func(int, uint16) bool) *Value {
-
for index, val := range v.MustUint16Slice() {
carryon := callback(index, val)
- if carryon == false {
+ if !carryon {
break
}
}
-
return v
-
}
// WhereUint16 uses the specified decider function to select items
// from the []uint16. The object contained in the result will contain
// only the selected items.
func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value {
-
var selected []uint16
-
v.EachUint16(func(index int, val uint16) bool {
shouldSelect := decider(index, val)
- if shouldSelect == false {
+ if !shouldSelect {
selected = append(selected, val)
}
return true
})
-
return &Value{data: selected}
-
}
// GroupUint16 uses the specified grouper function to group the items
// keyed by the return of the grouper. The object contained in the
// result will contain a map[string][]uint16.
func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value {
-
groups := make(map[string][]uint16)
-
v.EachUint16(func(index int, val uint16) bool {
group := grouper(index, val)
if _, ok := groups[group]; !ok {
@@ -1834,47 +1596,37 @@ func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value {
groups[group] = append(groups[group], val)
return true
})
-
return &Value{data: groups}
-
}
// ReplaceUint16 uses the specified function to replace each uint16s
// by iterating each item. The data in the returned result will be a
// []uint16 containing the replaced items.
func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value {
-
arr := v.MustUint16Slice()
replaced := make([]uint16, len(arr))
-
v.EachUint16(func(index int, val uint16) bool {
replaced[index] = replacer(index, val)
return true
})
-
return &Value{data: replaced}
-
}
// CollectUint16 uses the specified collector function to collect a value
// for each of the uint16s in the slice. The data returned will be a
// []interface{}.
func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value {
-
arr := v.MustUint16Slice()
collected := make([]interface{}, len(arr))
-
v.EachUint16(func(index int, val uint16) bool {
collected[index] = collector(index, val)
return true
})
-
return &Value{data: collected}
}
/*
Uint32 (uint32 and []uint32)
- --------------------------------------------------
*/
// Uint32 gets the value as a uint32, returns the optionalDefault
@@ -1932,44 +1684,35 @@ func (v *Value) IsUint32Slice() bool {
//
// Panics if the object is the wrong type.
func (v *Value) EachUint32(callback func(int, uint32) bool) *Value {
-
for index, val := range v.MustUint32Slice() {
carryon := callback(index, val)
- if carryon == false {
+ if !carryon {
break
}
}
-
return v
-
}
// WhereUint32 uses the specified decider function to select items
// from the []uint32. The object contained in the result will contain
// only the selected items.
func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value {
-
var selected []uint32
-
v.EachUint32(func(index int, val uint32) bool {
shouldSelect := decider(index, val)
- if shouldSelect == false {
+ if !shouldSelect {
selected = append(selected, val)
}
return true
})
-
return &Value{data: selected}
-
}
// GroupUint32 uses the specified grouper function to group the items
// keyed by the return of the grouper. The object contained in the
// result will contain a map[string][]uint32.
func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value {
-
groups := make(map[string][]uint32)
-
v.EachUint32(func(index int, val uint32) bool {
group := grouper(index, val)
if _, ok := groups[group]; !ok {
@@ -1978,47 +1721,37 @@ func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value {
groups[group] = append(groups[group], val)
return true
})
-
return &Value{data: groups}
-
}
// ReplaceUint32 uses the specified function to replace each uint32s
// by iterating each item. The data in the returned result will be a
// []uint32 containing the replaced items.
func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value {
-
arr := v.MustUint32Slice()
replaced := make([]uint32, len(arr))
-
v.EachUint32(func(index int, val uint32) bool {
replaced[index] = replacer(index, val)
return true
})
-
return &Value{data: replaced}
-
}
// CollectUint32 uses the specified collector function to collect a value
// for each of the uint32s in the slice. The data returned will be a
// []interface{}.
func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value {
-
arr := v.MustUint32Slice()
collected := make([]interface{}, len(arr))
-
v.EachUint32(func(index int, val uint32) bool {
collected[index] = collector(index, val)
return true
})
-
return &Value{data: collected}
}
/*
Uint64 (uint64 and []uint64)
- --------------------------------------------------
*/
// Uint64 gets the value as a uint64, returns the optionalDefault
@@ -2076,44 +1809,35 @@ func (v *Value) IsUint64Slice() bool {
//
// Panics if the object is the wrong type.
func (v *Value) EachUint64(callback func(int, uint64) bool) *Value {
-
for index, val := range v.MustUint64Slice() {
carryon := callback(index, val)
- if carryon == false {
+ if !carryon {
break
}
}
-
return v
-
}
// WhereUint64 uses the specified decider function to select items
// from the []uint64. The object contained in the result will contain
// only the selected items.
func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value {
-
var selected []uint64
-
v.EachUint64(func(index int, val uint64) bool {
shouldSelect := decider(index, val)
- if shouldSelect == false {
+ if !shouldSelect {
selected = append(selected, val)
}
return true
})
-
return &Value{data: selected}
-
}
// GroupUint64 uses the specified grouper function to group the items
// keyed by the return of the grouper. The object contained in the
// result will contain a map[string][]uint64.
func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value {
-
groups := make(map[string][]uint64)
-
v.EachUint64(func(index int, val uint64) bool {
group := grouper(index, val)
if _, ok := groups[group]; !ok {
@@ -2122,47 +1846,37 @@ func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value {
groups[group] = append(groups[group], val)
return true
})
-
return &Value{data: groups}
-
}
// ReplaceUint64 uses the specified function to replace each uint64s
// by iterating each item. The data in the returned result will be a
// []uint64 containing the replaced items.
func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value {
-
arr := v.MustUint64Slice()
replaced := make([]uint64, len(arr))
-
v.EachUint64(func(index int, val uint64) bool {
replaced[index] = replacer(index, val)
return true
})
-
return &Value{data: replaced}
-
}
// CollectUint64 uses the specified collector function to collect a value
// for each of the uint64s in the slice. The data returned will be a
// []interface{}.
func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value {
-
arr := v.MustUint64Slice()
collected := make([]interface{}, len(arr))
-
v.EachUint64(func(index int, val uint64) bool {
collected[index] = collector(index, val)
return true
})
-
return &Value{data: collected}
}
/*
Uintptr (uintptr and []uintptr)
- --------------------------------------------------
*/
// Uintptr gets the value as a uintptr, returns the optionalDefault
@@ -2220,44 +1934,35 @@ func (v *Value) IsUintptrSlice() bool {
//
// Panics if the object is the wrong type.
func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value {
-
for index, val := range v.MustUintptrSlice() {
carryon := callback(index, val)
- if carryon == false {
+ if !carryon {
break
}
}
-
return v
-
}
// WhereUintptr uses the specified decider function to select items
// from the []uintptr. The object contained in the result will contain
// only the selected items.
func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value {
-
var selected []uintptr
-
v.EachUintptr(func(index int, val uintptr) bool {
shouldSelect := decider(index, val)
- if shouldSelect == false {
+ if !shouldSelect {
selected = append(selected, val)
}
return true
})
-
return &Value{data: selected}
-
}
// GroupUintptr uses the specified grouper function to group the items
// keyed by the return of the grouper. The object contained in the
// result will contain a map[string][]uintptr.
func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value {
-
groups := make(map[string][]uintptr)
-
v.EachUintptr(func(index int, val uintptr) bool {
group := grouper(index, val)
if _, ok := groups[group]; !ok {
@@ -2266,47 +1971,37 @@ func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value {
groups[group] = append(groups[group], val)
return true
})
-
return &Value{data: groups}
-
}
// ReplaceUintptr uses the specified function to replace each uintptrs
// by iterating each item. The data in the returned result will be a
// []uintptr containing the replaced items.
func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value {
-
arr := v.MustUintptrSlice()
replaced := make([]uintptr, len(arr))
-
v.EachUintptr(func(index int, val uintptr) bool {
replaced[index] = replacer(index, val)
return true
})
-
return &Value{data: replaced}
-
}
// CollectUintptr uses the specified collector function to collect a value
// for each of the uintptrs in the slice. The data returned will be a
// []interface{}.
func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value {
-
arr := v.MustUintptrSlice()
collected := make([]interface{}, len(arr))
-
v.EachUintptr(func(index int, val uintptr) bool {
collected[index] = collector(index, val)
return true
})
-
return &Value{data: collected}
}
/*
Float32 (float32 and []float32)
- --------------------------------------------------
*/
// Float32 gets the value as a float32, returns the optionalDefault
@@ -2364,44 +2059,35 @@ func (v *Value) IsFloat32Slice() bool {
//
// Panics if the object is the wrong type.
func (v *Value) EachFloat32(callback func(int, float32) bool) *Value {
-
for index, val := range v.MustFloat32Slice() {
carryon := callback(index, val)
- if carryon == false {
+ if !carryon {
break
}
}
-
return v
-
}
// WhereFloat32 uses the specified decider function to select items
// from the []float32. The object contained in the result will contain
// only the selected items.
func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value {
-
var selected []float32
-
v.EachFloat32(func(index int, val float32) bool {
shouldSelect := decider(index, val)
- if shouldSelect == false {
+ if !shouldSelect {
selected = append(selected, val)
}
return true
})
-
return &Value{data: selected}
-
}
// GroupFloat32 uses the specified grouper function to group the items
// keyed by the return of the grouper. The object contained in the
// result will contain a map[string][]float32.
func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value {
-
groups := make(map[string][]float32)
-
v.EachFloat32(func(index int, val float32) bool {
group := grouper(index, val)
if _, ok := groups[group]; !ok {
@@ -2410,47 +2096,37 @@ func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value {
groups[group] = append(groups[group], val)
return true
})
-
return &Value{data: groups}
-
}
// ReplaceFloat32 uses the specified function to replace each float32s
// by iterating each item. The data in the returned result will be a
// []float32 containing the replaced items.
func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value {
-
arr := v.MustFloat32Slice()
replaced := make([]float32, len(arr))
-
v.EachFloat32(func(index int, val float32) bool {
replaced[index] = replacer(index, val)
return true
})
-
return &Value{data: replaced}
-
}
// CollectFloat32 uses the specified collector function to collect a value
// for each of the float32s in the slice. The data returned will be a
// []interface{}.
func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value {
-
arr := v.MustFloat32Slice()
collected := make([]interface{}, len(arr))
-
v.EachFloat32(func(index int, val float32) bool {
collected[index] = collector(index, val)
return true
})
-
return &Value{data: collected}
}
/*
Float64 (float64 and []float64)
- --------------------------------------------------
*/
// Float64 gets the value as a float64, returns the optionalDefault
@@ -2508,44 +2184,35 @@ func (v *Value) IsFloat64Slice() bool {
//
// Panics if the object is the wrong type.
func (v *Value) EachFloat64(callback func(int, float64) bool) *Value {
-
for index, val := range v.MustFloat64Slice() {
carryon := callback(index, val)
- if carryon == false {
+ if !carryon {
break
}
}
-
return v
-
}
// WhereFloat64 uses the specified decider function to select items
// from the []float64. The object contained in the result will contain
// only the selected items.
func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value {
-
var selected []float64
-
v.EachFloat64(func(index int, val float64) bool {
shouldSelect := decider(index, val)
- if shouldSelect == false {
+ if !shouldSelect {
selected = append(selected, val)
}
return true
})
-
return &Value{data: selected}
-
}
// GroupFloat64 uses the specified grouper function to group the items
// keyed by the return of the grouper. The object contained in the
// result will contain a map[string][]float64.
func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value {
-
groups := make(map[string][]float64)
-
v.EachFloat64(func(index int, val float64) bool {
group := grouper(index, val)
if _, ok := groups[group]; !ok {
@@ -2554,47 +2221,37 @@ func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value {
groups[group] = append(groups[group], val)
return true
})
-
return &Value{data: groups}
-
}
// ReplaceFloat64 uses the specified function to replace each float64s
// by iterating each item. The data in the returned result will be a
// []float64 containing the replaced items.
func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value {
-
arr := v.MustFloat64Slice()
replaced := make([]float64, len(arr))
-
v.EachFloat64(func(index int, val float64) bool {
replaced[index] = replacer(index, val)
return true
})
-
return &Value{data: replaced}
-
}
// CollectFloat64 uses the specified collector function to collect a value
// for each of the float64s in the slice. The data returned will be a
// []interface{}.
func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value {
-
arr := v.MustFloat64Slice()
collected := make([]interface{}, len(arr))
-
v.EachFloat64(func(index int, val float64) bool {
collected[index] = collector(index, val)
return true
})
-
return &Value{data: collected}
}
/*
Complex64 (complex64 and []complex64)
- --------------------------------------------------
*/
// Complex64 gets the value as a complex64, returns the optionalDefault
@@ -2652,44 +2309,35 @@ func (v *Value) IsComplex64Slice() bool {
//
// Panics if the object is the wrong type.
func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value {
-
for index, val := range v.MustComplex64Slice() {
carryon := callback(index, val)
- if carryon == false {
+ if !carryon {
break
}
}
-
return v
-
}
// WhereComplex64 uses the specified decider function to select items
// from the []complex64. The object contained in the result will contain
// only the selected items.
func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value {
-
var selected []complex64
-
v.EachComplex64(func(index int, val complex64) bool {
shouldSelect := decider(index, val)
- if shouldSelect == false {
+ if !shouldSelect {
selected = append(selected, val)
}
return true
})
-
return &Value{data: selected}
-
}
// GroupComplex64 uses the specified grouper function to group the items
// keyed by the return of the grouper. The object contained in the
// result will contain a map[string][]complex64.
func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value {
-
groups := make(map[string][]complex64)
-
v.EachComplex64(func(index int, val complex64) bool {
group := grouper(index, val)
if _, ok := groups[group]; !ok {
@@ -2698,47 +2346,37 @@ func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value {
groups[group] = append(groups[group], val)
return true
})
-
return &Value{data: groups}
-
}
// ReplaceComplex64 uses the specified function to replace each complex64s
// by iterating each item. The data in the returned result will be a
// []complex64 containing the replaced items.
func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value {
-
arr := v.MustComplex64Slice()
replaced := make([]complex64, len(arr))
-
v.EachComplex64(func(index int, val complex64) bool {
replaced[index] = replacer(index, val)
return true
})
-
return &Value{data: replaced}
-
}
// CollectComplex64 uses the specified collector function to collect a value
// for each of the complex64s in the slice. The data returned will be a
// []interface{}.
func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value {
-
arr := v.MustComplex64Slice()
collected := make([]interface{}, len(arr))
-
v.EachComplex64(func(index int, val complex64) bool {
collected[index] = collector(index, val)
return true
})
-
return &Value{data: collected}
}
/*
Complex128 (complex128 and []complex128)
- --------------------------------------------------
*/
// Complex128 gets the value as a complex128, returns the optionalDefault
@@ -2796,44 +2434,35 @@ func (v *Value) IsComplex128Slice() bool {
//
// Panics if the object is the wrong type.
func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value {
-
for index, val := range v.MustComplex128Slice() {
carryon := callback(index, val)
- if carryon == false {
+ if !carryon {
break
}
}
-
return v
-
}
// WhereComplex128 uses the specified decider function to select items
// from the []complex128. The object contained in the result will contain
// only the selected items.
func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value {
-
var selected []complex128
-
v.EachComplex128(func(index int, val complex128) bool {
shouldSelect := decider(index, val)
- if shouldSelect == false {
+ if !shouldSelect {
selected = append(selected, val)
}
return true
})
-
return &Value{data: selected}
-
}
// GroupComplex128 uses the specified grouper function to group the items
// keyed by the return of the grouper. The object contained in the
// result will contain a map[string][]complex128.
func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value {
-
groups := make(map[string][]complex128)
-
v.EachComplex128(func(index int, val complex128) bool {
group := grouper(index, val)
if _, ok := groups[group]; !ok {
@@ -2842,40 +2471,31 @@ func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value {
groups[group] = append(groups[group], val)
return true
})
-
return &Value{data: groups}
-
}
// ReplaceComplex128 uses the specified function to replace each complex128s
// by iterating each item. The data in the returned result will be a
// []complex128 containing the replaced items.
func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value {
-
arr := v.MustComplex128Slice()
replaced := make([]complex128, len(arr))
-
v.EachComplex128(func(index int, val complex128) bool {
replaced[index] = replacer(index, val)
return true
})
-
return &Value{data: replaced}
-
}
// CollectComplex128 uses the specified collector function to collect a value
// for each of the complex128s in the slice. The data returned will be a
// []interface{}.
func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value {
-
arr := v.MustComplex128Slice()
collected := make([]interface{}, len(arr))
-
v.EachComplex128(func(index int, val complex128) bool {
collected[index] = collector(index, val)
return true
})
-
return &Value{data: collected}
}
diff --git a/vendor/github.com/stretchr/objx/type_specific_codegen_test.go b/vendor/github.com/stretchr/objx/type_specific_codegen_test.go
index f7a4fceea..c79c7e034 100644
--- a/vendor/github.com/stretchr/objx/type_specific_codegen_test.go
+++ b/vendor/github.com/stretchr/objx/type_specific_codegen_test.go
@@ -2,84 +2,71 @@ package objx
import (
"fmt"
- "github.com/stretchr/testify/assert"
"testing"
+
+ "github.com/stretchr/testify/assert"
)
-// ************************************************************
-// TESTS
-// ************************************************************
+/*
+ Tests for Inter (interface{} and []interface{})
+*/
func TestInter(t *testing.T) {
-
val := interface{}("something")
+
m := map[string]interface{}{"value": val, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Inter())
assert.Equal(t, val, New(m).Get("value").MustInter())
assert.Equal(t, interface{}(nil), New(m).Get("nothing").Inter())
assert.Equal(t, val, New(m).Get("nothing").Inter("something"))
-
assert.Panics(t, func() {
New(m).Get("age").MustInter()
})
-
}
func TestInterSlice(t *testing.T) {
-
val := interface{}("something")
+
m := map[string]interface{}{"value": []interface{}{val}, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").InterSlice()[0])
assert.Equal(t, val, New(m).Get("value").MustInterSlice()[0])
assert.Equal(t, []interface{}(nil), New(m).Get("nothing").InterSlice())
assert.Equal(t, val, New(m).Get("nothing").InterSlice([]interface{}{interface{}("something")})[0])
-
assert.Panics(t, func() {
New(m).Get("nothing").MustInterSlice()
})
-
}
func TestIsInter(t *testing.T) {
-
- var v *Value
-
- v = &Value{data: interface{}("something")}
+ v := &Value{data: interface{}("something")}
assert.True(t, v.IsInter())
- v = &Value{data: []interface{}{interface{}("something")}}
- assert.True(t, v.IsInterSlice())
+}
+func TestIsInterSlice(t *testing.T) {
+ v := &Value{data: []interface{}{interface{}("something")}}
+ assert.True(t, v.IsInterSlice())
}
func TestEachInter(t *testing.T) {
-
v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}}
count := 0
replacedVals := make([]interface{}, 0)
assert.Equal(t, v, v.EachInter(func(i int, val interface{}) bool {
-
count++
replacedVals = append(replacedVals, val)
// abort early
- if i == 2 {
- return false
- }
-
- return true
-
+ return i != 2
}))
assert.Equal(t, count, 3)
assert.Equal(t, replacedVals[0], v.MustInterSlice()[0])
assert.Equal(t, replacedVals[1], v.MustInterSlice()[1])
assert.Equal(t, replacedVals[2], v.MustInterSlice()[2])
-
}
func TestWhereInter(t *testing.T) {
-
v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}}
selected := v.WhereInter(func(i int, val interface{}) bool {
@@ -87,11 +74,9 @@ func TestWhereInter(t *testing.T) {
}).MustInterSlice()
assert.Equal(t, 3, len(selected))
-
}
func TestGroupInter(t *testing.T) {
-
v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}}
grouped := v.GroupInter(func(i int, val interface{}) string {
@@ -101,11 +86,9 @@ func TestGroupInter(t *testing.T) {
assert.Equal(t, 2, len(grouped))
assert.Equal(t, 3, len(grouped["true"]))
assert.Equal(t, 3, len(grouped["false"]))
-
}
func TestReplaceInter(t *testing.T) {
-
v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}}
rawArr := v.MustInterSlice()
@@ -126,11 +109,9 @@ func TestReplaceInter(t *testing.T) {
assert.Equal(t, replacedArr[4], rawArr[5])
assert.Equal(t, replacedArr[5], rawArr[0])
}
-
}
func TestCollectInter(t *testing.T) {
-
v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}}
collected := v.CollectInter(func(index int, val interface{}) interface{} {
@@ -146,83 +127,68 @@ func TestCollectInter(t *testing.T) {
assert.Equal(t, collectedArr[4], 4)
assert.Equal(t, collectedArr[5], 5)
}
-
}
-// ************************************************************
-// TESTS
-// ************************************************************
+/*
+ Tests for MSI (map[string]interface{} and []map[string]interface{})
+*/
func TestMSI(t *testing.T) {
-
val := map[string]interface{}(map[string]interface{}{"name": "Tyler"})
+
m := map[string]interface{}{"value": val, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").MSI())
assert.Equal(t, val, New(m).Get("value").MustMSI())
assert.Equal(t, map[string]interface{}(nil), New(m).Get("nothing").MSI())
assert.Equal(t, val, New(m).Get("nothing").MSI(map[string]interface{}{"name": "Tyler"}))
-
assert.Panics(t, func() {
New(m).Get("age").MustMSI()
})
-
}
func TestMSISlice(t *testing.T) {
-
val := map[string]interface{}(map[string]interface{}{"name": "Tyler"})
+
m := map[string]interface{}{"value": []map[string]interface{}{val}, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").MSISlice()[0])
assert.Equal(t, val, New(m).Get("value").MustMSISlice()[0])
assert.Equal(t, []map[string]interface{}(nil), New(m).Get("nothing").MSISlice())
assert.Equal(t, val, New(m).Get("nothing").MSISlice([]map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"})})[0])
-
assert.Panics(t, func() {
New(m).Get("nothing").MustMSISlice()
})
-
}
func TestIsMSI(t *testing.T) {
-
- var v *Value
-
- v = &Value{data: map[string]interface{}(map[string]interface{}{"name": "Tyler"})}
+ v := &Value{data: map[string]interface{}(map[string]interface{}{"name": "Tyler"})}
assert.True(t, v.IsMSI())
- v = &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"})}}
- assert.True(t, v.IsMSISlice())
+}
+func TestIsMSISlice(t *testing.T) {
+ v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"})}}
+ assert.True(t, v.IsMSISlice())
}
func TestEachMSI(t *testing.T) {
-
v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}}
count := 0
replacedVals := make([]map[string]interface{}, 0)
assert.Equal(t, v, v.EachMSI(func(i int, val map[string]interface{}) bool {
-
count++
replacedVals = append(replacedVals, val)
// abort early
- if i == 2 {
- return false
- }
-
- return true
-
+ return i != 2
}))
assert.Equal(t, count, 3)
assert.Equal(t, replacedVals[0], v.MustMSISlice()[0])
assert.Equal(t, replacedVals[1], v.MustMSISlice()[1])
assert.Equal(t, replacedVals[2], v.MustMSISlice()[2])
-
}
func TestWhereMSI(t *testing.T) {
-
v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}}
selected := v.WhereMSI(func(i int, val map[string]interface{}) bool {
@@ -230,11 +196,9 @@ func TestWhereMSI(t *testing.T) {
}).MustMSISlice()
assert.Equal(t, 3, len(selected))
-
}
func TestGroupMSI(t *testing.T) {
-
v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}}
grouped := v.GroupMSI(func(i int, val map[string]interface{}) string {
@@ -244,11 +208,9 @@ func TestGroupMSI(t *testing.T) {
assert.Equal(t, 2, len(grouped))
assert.Equal(t, 3, len(grouped["true"]))
assert.Equal(t, 3, len(grouped["false"]))
-
}
func TestReplaceMSI(t *testing.T) {
-
v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}}
rawArr := v.MustMSISlice()
@@ -269,11 +231,9 @@ func TestReplaceMSI(t *testing.T) {
assert.Equal(t, replacedArr[4], rawArr[5])
assert.Equal(t, replacedArr[5], rawArr[0])
}
-
}
func TestCollectMSI(t *testing.T) {
-
v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}}
collected := v.CollectMSI(func(index int, val map[string]interface{}) interface{} {
@@ -289,83 +249,68 @@ func TestCollectMSI(t *testing.T) {
assert.Equal(t, collectedArr[4], 4)
assert.Equal(t, collectedArr[5], 5)
}
-
}
-// ************************************************************
-// TESTS
-// ************************************************************
+/*
+ Tests for ObjxMap ((Map) and [](Map))
+*/
func TestObjxMap(t *testing.T) {
-
val := (Map)(New(1))
+
m := map[string]interface{}{"value": val, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").ObjxMap())
assert.Equal(t, val, New(m).Get("value").MustObjxMap())
assert.Equal(t, (Map)(New(nil)), New(m).Get("nothing").ObjxMap())
assert.Equal(t, val, New(m).Get("nothing").ObjxMap(New(1)))
-
assert.Panics(t, func() {
New(m).Get("age").MustObjxMap()
})
-
}
func TestObjxMapSlice(t *testing.T) {
-
val := (Map)(New(1))
+
m := map[string]interface{}{"value": [](Map){val}, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").ObjxMapSlice()[0])
assert.Equal(t, val, New(m).Get("value").MustObjxMapSlice()[0])
assert.Equal(t, [](Map)(nil), New(m).Get("nothing").ObjxMapSlice())
assert.Equal(t, val, New(m).Get("nothing").ObjxMapSlice([](Map){(Map)(New(1))})[0])
-
assert.Panics(t, func() {
New(m).Get("nothing").MustObjxMapSlice()
})
-
}
func TestIsObjxMap(t *testing.T) {
-
- var v *Value
-
- v = &Value{data: (Map)(New(1))}
+ v := &Value{data: (Map)(New(1))}
assert.True(t, v.IsObjxMap())
- v = &Value{data: [](Map){(Map)(New(1))}}
- assert.True(t, v.IsObjxMapSlice())
+}
+func TestIsObjxMapSlice(t *testing.T) {
+ v := &Value{data: [](Map){(Map)(New(1))}}
+ assert.True(t, v.IsObjxMapSlice())
}
func TestEachObjxMap(t *testing.T) {
-
v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}}
count := 0
replacedVals := make([](Map), 0)
assert.Equal(t, v, v.EachObjxMap(func(i int, val Map) bool {
-
count++
replacedVals = append(replacedVals, val)
// abort early
- if i == 2 {
- return false
- }
-
- return true
-
+ return i != 2
}))
assert.Equal(t, count, 3)
assert.Equal(t, replacedVals[0], v.MustObjxMapSlice()[0])
assert.Equal(t, replacedVals[1], v.MustObjxMapSlice()[1])
assert.Equal(t, replacedVals[2], v.MustObjxMapSlice()[2])
-
}
func TestWhereObjxMap(t *testing.T) {
-
v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}}
selected := v.WhereObjxMap(func(i int, val Map) bool {
@@ -373,11 +318,9 @@ func TestWhereObjxMap(t *testing.T) {
}).MustObjxMapSlice()
assert.Equal(t, 3, len(selected))
-
}
func TestGroupObjxMap(t *testing.T) {
-
v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}}
grouped := v.GroupObjxMap(func(i int, val Map) string {
@@ -387,11 +330,9 @@ func TestGroupObjxMap(t *testing.T) {
assert.Equal(t, 2, len(grouped))
assert.Equal(t, 3, len(grouped["true"]))
assert.Equal(t, 3, len(grouped["false"]))
-
}
func TestReplaceObjxMap(t *testing.T) {
-
v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}}
rawArr := v.MustObjxMapSlice()
@@ -412,11 +353,9 @@ func TestReplaceObjxMap(t *testing.T) {
assert.Equal(t, replacedArr[4], rawArr[5])
assert.Equal(t, replacedArr[5], rawArr[0])
}
-
}
func TestCollectObjxMap(t *testing.T) {
-
v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}}
collected := v.CollectObjxMap(func(index int, val Map) interface{} {
@@ -432,83 +371,68 @@ func TestCollectObjxMap(t *testing.T) {
assert.Equal(t, collectedArr[4], 4)
assert.Equal(t, collectedArr[5], 5)
}
-
}
-// ************************************************************
-// TESTS
-// ************************************************************
+/*
+ Tests for Bool (bool and []bool)
+*/
func TestBool(t *testing.T) {
-
val := bool(true)
+
m := map[string]interface{}{"value": val, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Bool())
assert.Equal(t, val, New(m).Get("value").MustBool())
assert.Equal(t, bool(false), New(m).Get("nothing").Bool())
assert.Equal(t, val, New(m).Get("nothing").Bool(true))
-
assert.Panics(t, func() {
New(m).Get("age").MustBool()
})
-
}
func TestBoolSlice(t *testing.T) {
-
val := bool(true)
+
m := map[string]interface{}{"value": []bool{val}, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").BoolSlice()[0])
assert.Equal(t, val, New(m).Get("value").MustBoolSlice()[0])
assert.Equal(t, []bool(nil), New(m).Get("nothing").BoolSlice())
assert.Equal(t, val, New(m).Get("nothing").BoolSlice([]bool{bool(true)})[0])
-
assert.Panics(t, func() {
New(m).Get("nothing").MustBoolSlice()
})
-
}
func TestIsBool(t *testing.T) {
-
- var v *Value
-
- v = &Value{data: bool(true)}
+ v := &Value{data: bool(true)}
assert.True(t, v.IsBool())
- v = &Value{data: []bool{bool(true)}}
- assert.True(t, v.IsBoolSlice())
+}
+func TestIsBoolSlice(t *testing.T) {
+ v := &Value{data: []bool{bool(true)}}
+ assert.True(t, v.IsBoolSlice())
}
func TestEachBool(t *testing.T) {
-
v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true)}}
count := 0
replacedVals := make([]bool, 0)
assert.Equal(t, v, v.EachBool(func(i int, val bool) bool {
-
count++
replacedVals = append(replacedVals, val)
// abort early
- if i == 2 {
- return false
- }
-
- return true
-
+ return i != 2
}))
assert.Equal(t, count, 3)
assert.Equal(t, replacedVals[0], v.MustBoolSlice()[0])
assert.Equal(t, replacedVals[1], v.MustBoolSlice()[1])
assert.Equal(t, replacedVals[2], v.MustBoolSlice()[2])
-
}
func TestWhereBool(t *testing.T) {
-
v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}}
selected := v.WhereBool(func(i int, val bool) bool {
@@ -516,11 +440,9 @@ func TestWhereBool(t *testing.T) {
}).MustBoolSlice()
assert.Equal(t, 3, len(selected))
-
}
func TestGroupBool(t *testing.T) {
-
v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}}
grouped := v.GroupBool(func(i int, val bool) string {
@@ -530,11 +452,9 @@ func TestGroupBool(t *testing.T) {
assert.Equal(t, 2, len(grouped))
assert.Equal(t, 3, len(grouped["true"]))
assert.Equal(t, 3, len(grouped["false"]))
-
}
func TestReplaceBool(t *testing.T) {
-
v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}}
rawArr := v.MustBoolSlice()
@@ -555,11 +475,9 @@ func TestReplaceBool(t *testing.T) {
assert.Equal(t, replacedArr[4], rawArr[5])
assert.Equal(t, replacedArr[5], rawArr[0])
}
-
}
func TestCollectBool(t *testing.T) {
-
v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}}
collected := v.CollectBool(func(index int, val bool) interface{} {
@@ -575,83 +493,68 @@ func TestCollectBool(t *testing.T) {
assert.Equal(t, collectedArr[4], 4)
assert.Equal(t, collectedArr[5], 5)
}
-
}
-// ************************************************************
-// TESTS
-// ************************************************************
+/*
+ Tests for Str (string and []string)
+*/
func TestStr(t *testing.T) {
-
val := string("hello")
+
m := map[string]interface{}{"value": val, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Str())
assert.Equal(t, val, New(m).Get("value").MustStr())
assert.Equal(t, string(""), New(m).Get("nothing").Str())
assert.Equal(t, val, New(m).Get("nothing").Str("hello"))
-
assert.Panics(t, func() {
New(m).Get("age").MustStr()
})
-
}
func TestStrSlice(t *testing.T) {
-
val := string("hello")
+
m := map[string]interface{}{"value": []string{val}, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").StrSlice()[0])
assert.Equal(t, val, New(m).Get("value").MustStrSlice()[0])
assert.Equal(t, []string(nil), New(m).Get("nothing").StrSlice())
assert.Equal(t, val, New(m).Get("nothing").StrSlice([]string{string("hello")})[0])
-
assert.Panics(t, func() {
New(m).Get("nothing").MustStrSlice()
})
-
}
func TestIsStr(t *testing.T) {
-
- var v *Value
-
- v = &Value{data: string("hello")}
+ v := &Value{data: string("hello")}
assert.True(t, v.IsStr())
- v = &Value{data: []string{string("hello")}}
- assert.True(t, v.IsStrSlice())
+}
+func TestIsStrSlice(t *testing.T) {
+ v := &Value{data: []string{string("hello")}}
+ assert.True(t, v.IsStrSlice())
}
func TestEachStr(t *testing.T) {
-
v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}}
count := 0
replacedVals := make([]string, 0)
assert.Equal(t, v, v.EachStr(func(i int, val string) bool {
-
count++
replacedVals = append(replacedVals, val)
// abort early
- if i == 2 {
- return false
- }
-
- return true
-
+ return i != 2
}))
assert.Equal(t, count, 3)
assert.Equal(t, replacedVals[0], v.MustStrSlice()[0])
assert.Equal(t, replacedVals[1], v.MustStrSlice()[1])
assert.Equal(t, replacedVals[2], v.MustStrSlice()[2])
-
}
func TestWhereStr(t *testing.T) {
-
v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}}
selected := v.WhereStr(func(i int, val string) bool {
@@ -659,11 +562,9 @@ func TestWhereStr(t *testing.T) {
}).MustStrSlice()
assert.Equal(t, 3, len(selected))
-
}
func TestGroupStr(t *testing.T) {
-
v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}}
grouped := v.GroupStr(func(i int, val string) string {
@@ -673,11 +574,9 @@ func TestGroupStr(t *testing.T) {
assert.Equal(t, 2, len(grouped))
assert.Equal(t, 3, len(grouped["true"]))
assert.Equal(t, 3, len(grouped["false"]))
-
}
func TestReplaceStr(t *testing.T) {
-
v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}}
rawArr := v.MustStrSlice()
@@ -698,11 +597,9 @@ func TestReplaceStr(t *testing.T) {
assert.Equal(t, replacedArr[4], rawArr[5])
assert.Equal(t, replacedArr[5], rawArr[0])
}
-
}
func TestCollectStr(t *testing.T) {
-
v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}}
collected := v.CollectStr(func(index int, val string) interface{} {
@@ -718,83 +615,68 @@ func TestCollectStr(t *testing.T) {
assert.Equal(t, collectedArr[4], 4)
assert.Equal(t, collectedArr[5], 5)
}
-
}
-// ************************************************************
-// TESTS
-// ************************************************************
+/*
+ Tests for Int (int and []int)
+*/
func TestInt(t *testing.T) {
-
val := int(1)
+
m := map[string]interface{}{"value": val, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Int())
assert.Equal(t, val, New(m).Get("value").MustInt())
assert.Equal(t, int(0), New(m).Get("nothing").Int())
assert.Equal(t, val, New(m).Get("nothing").Int(1))
-
assert.Panics(t, func() {
New(m).Get("age").MustInt()
})
-
}
func TestIntSlice(t *testing.T) {
-
val := int(1)
+
m := map[string]interface{}{"value": []int{val}, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").IntSlice()[0])
assert.Equal(t, val, New(m).Get("value").MustIntSlice()[0])
assert.Equal(t, []int(nil), New(m).Get("nothing").IntSlice())
assert.Equal(t, val, New(m).Get("nothing").IntSlice([]int{int(1)})[0])
-
assert.Panics(t, func() {
New(m).Get("nothing").MustIntSlice()
})
-
}
func TestIsInt(t *testing.T) {
-
- var v *Value
-
- v = &Value{data: int(1)}
+ v := &Value{data: int(1)}
assert.True(t, v.IsInt())
- v = &Value{data: []int{int(1)}}
- assert.True(t, v.IsIntSlice())
+}
+func TestIsIntSlice(t *testing.T) {
+ v := &Value{data: []int{int(1)}}
+ assert.True(t, v.IsIntSlice())
}
func TestEachInt(t *testing.T) {
-
v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1)}}
count := 0
replacedVals := make([]int, 0)
assert.Equal(t, v, v.EachInt(func(i int, val int) bool {
-
count++
replacedVals = append(replacedVals, val)
// abort early
- if i == 2 {
- return false
- }
-
- return true
-
+ return i != 2
}))
assert.Equal(t, count, 3)
assert.Equal(t, replacedVals[0], v.MustIntSlice()[0])
assert.Equal(t, replacedVals[1], v.MustIntSlice()[1])
assert.Equal(t, replacedVals[2], v.MustIntSlice()[2])
-
}
func TestWhereInt(t *testing.T) {
-
v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}}
selected := v.WhereInt(func(i int, val int) bool {
@@ -802,11 +684,9 @@ func TestWhereInt(t *testing.T) {
}).MustIntSlice()
assert.Equal(t, 3, len(selected))
-
}
func TestGroupInt(t *testing.T) {
-
v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}}
grouped := v.GroupInt(func(i int, val int) string {
@@ -816,11 +696,9 @@ func TestGroupInt(t *testing.T) {
assert.Equal(t, 2, len(grouped))
assert.Equal(t, 3, len(grouped["true"]))
assert.Equal(t, 3, len(grouped["false"]))
-
}
func TestReplaceInt(t *testing.T) {
-
v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}}
rawArr := v.MustIntSlice()
@@ -841,11 +719,9 @@ func TestReplaceInt(t *testing.T) {
assert.Equal(t, replacedArr[4], rawArr[5])
assert.Equal(t, replacedArr[5], rawArr[0])
}
-
}
func TestCollectInt(t *testing.T) {
-
v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}}
collected := v.CollectInt(func(index int, val int) interface{} {
@@ -861,83 +737,68 @@ func TestCollectInt(t *testing.T) {
assert.Equal(t, collectedArr[4], 4)
assert.Equal(t, collectedArr[5], 5)
}
-
}
-// ************************************************************
-// TESTS
-// ************************************************************
+/*
+ Tests for Int8 (int8 and []int8)
+*/
func TestInt8(t *testing.T) {
-
val := int8(1)
+
m := map[string]interface{}{"value": val, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Int8())
assert.Equal(t, val, New(m).Get("value").MustInt8())
assert.Equal(t, int8(0), New(m).Get("nothing").Int8())
assert.Equal(t, val, New(m).Get("nothing").Int8(1))
-
assert.Panics(t, func() {
New(m).Get("age").MustInt8()
})
-
}
func TestInt8Slice(t *testing.T) {
-
val := int8(1)
+
m := map[string]interface{}{"value": []int8{val}, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Int8Slice()[0])
assert.Equal(t, val, New(m).Get("value").MustInt8Slice()[0])
assert.Equal(t, []int8(nil), New(m).Get("nothing").Int8Slice())
assert.Equal(t, val, New(m).Get("nothing").Int8Slice([]int8{int8(1)})[0])
-
assert.Panics(t, func() {
New(m).Get("nothing").MustInt8Slice()
})
-
}
func TestIsInt8(t *testing.T) {
-
- var v *Value
-
- v = &Value{data: int8(1)}
+ v := &Value{data: int8(1)}
assert.True(t, v.IsInt8())
- v = &Value{data: []int8{int8(1)}}
- assert.True(t, v.IsInt8Slice())
+}
+func TestIsInt8Slice(t *testing.T) {
+ v := &Value{data: []int8{int8(1)}}
+ assert.True(t, v.IsInt8Slice())
}
func TestEachInt8(t *testing.T) {
-
v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1)}}
count := 0
replacedVals := make([]int8, 0)
assert.Equal(t, v, v.EachInt8(func(i int, val int8) bool {
-
count++
replacedVals = append(replacedVals, val)
// abort early
- if i == 2 {
- return false
- }
-
- return true
-
+ return i != 2
}))
assert.Equal(t, count, 3)
assert.Equal(t, replacedVals[0], v.MustInt8Slice()[0])
assert.Equal(t, replacedVals[1], v.MustInt8Slice()[1])
assert.Equal(t, replacedVals[2], v.MustInt8Slice()[2])
-
}
func TestWhereInt8(t *testing.T) {
-
v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}}
selected := v.WhereInt8(func(i int, val int8) bool {
@@ -945,11 +806,9 @@ func TestWhereInt8(t *testing.T) {
}).MustInt8Slice()
assert.Equal(t, 3, len(selected))
-
}
func TestGroupInt8(t *testing.T) {
-
v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}}
grouped := v.GroupInt8(func(i int, val int8) string {
@@ -959,11 +818,9 @@ func TestGroupInt8(t *testing.T) {
assert.Equal(t, 2, len(grouped))
assert.Equal(t, 3, len(grouped["true"]))
assert.Equal(t, 3, len(grouped["false"]))
-
}
func TestReplaceInt8(t *testing.T) {
-
v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}}
rawArr := v.MustInt8Slice()
@@ -984,11 +841,9 @@ func TestReplaceInt8(t *testing.T) {
assert.Equal(t, replacedArr[4], rawArr[5])
assert.Equal(t, replacedArr[5], rawArr[0])
}
-
}
func TestCollectInt8(t *testing.T) {
-
v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}}
collected := v.CollectInt8(func(index int, val int8) interface{} {
@@ -1004,83 +859,68 @@ func TestCollectInt8(t *testing.T) {
assert.Equal(t, collectedArr[4], 4)
assert.Equal(t, collectedArr[5], 5)
}
-
}
-// ************************************************************
-// TESTS
-// ************************************************************
+/*
+ Tests for Int16 (int16 and []int16)
+*/
func TestInt16(t *testing.T) {
-
val := int16(1)
+
m := map[string]interface{}{"value": val, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Int16())
assert.Equal(t, val, New(m).Get("value").MustInt16())
assert.Equal(t, int16(0), New(m).Get("nothing").Int16())
assert.Equal(t, val, New(m).Get("nothing").Int16(1))
-
assert.Panics(t, func() {
New(m).Get("age").MustInt16()
})
-
}
func TestInt16Slice(t *testing.T) {
-
val := int16(1)
+
m := map[string]interface{}{"value": []int16{val}, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Int16Slice()[0])
assert.Equal(t, val, New(m).Get("value").MustInt16Slice()[0])
assert.Equal(t, []int16(nil), New(m).Get("nothing").Int16Slice())
assert.Equal(t, val, New(m).Get("nothing").Int16Slice([]int16{int16(1)})[0])
-
assert.Panics(t, func() {
New(m).Get("nothing").MustInt16Slice()
})
-
}
func TestIsInt16(t *testing.T) {
-
- var v *Value
-
- v = &Value{data: int16(1)}
+ v := &Value{data: int16(1)}
assert.True(t, v.IsInt16())
- v = &Value{data: []int16{int16(1)}}
- assert.True(t, v.IsInt16Slice())
+}
+func TestIsInt16Slice(t *testing.T) {
+ v := &Value{data: []int16{int16(1)}}
+ assert.True(t, v.IsInt16Slice())
}
func TestEachInt16(t *testing.T) {
-
v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1)}}
count := 0
replacedVals := make([]int16, 0)
assert.Equal(t, v, v.EachInt16(func(i int, val int16) bool {
-
count++
replacedVals = append(replacedVals, val)
// abort early
- if i == 2 {
- return false
- }
-
- return true
-
+ return i != 2
}))
assert.Equal(t, count, 3)
assert.Equal(t, replacedVals[0], v.MustInt16Slice()[0])
assert.Equal(t, replacedVals[1], v.MustInt16Slice()[1])
assert.Equal(t, replacedVals[2], v.MustInt16Slice()[2])
-
}
func TestWhereInt16(t *testing.T) {
-
v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}}
selected := v.WhereInt16(func(i int, val int16) bool {
@@ -1088,11 +928,9 @@ func TestWhereInt16(t *testing.T) {
}).MustInt16Slice()
assert.Equal(t, 3, len(selected))
-
}
func TestGroupInt16(t *testing.T) {
-
v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}}
grouped := v.GroupInt16(func(i int, val int16) string {
@@ -1102,11 +940,9 @@ func TestGroupInt16(t *testing.T) {
assert.Equal(t, 2, len(grouped))
assert.Equal(t, 3, len(grouped["true"]))
assert.Equal(t, 3, len(grouped["false"]))
-
}
func TestReplaceInt16(t *testing.T) {
-
v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}}
rawArr := v.MustInt16Slice()
@@ -1127,11 +963,9 @@ func TestReplaceInt16(t *testing.T) {
assert.Equal(t, replacedArr[4], rawArr[5])
assert.Equal(t, replacedArr[5], rawArr[0])
}
-
}
func TestCollectInt16(t *testing.T) {
-
v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}}
collected := v.CollectInt16(func(index int, val int16) interface{} {
@@ -1147,83 +981,68 @@ func TestCollectInt16(t *testing.T) {
assert.Equal(t, collectedArr[4], 4)
assert.Equal(t, collectedArr[5], 5)
}
-
}
-// ************************************************************
-// TESTS
-// ************************************************************
+/*
+ Tests for Int32 (int32 and []int32)
+*/
func TestInt32(t *testing.T) {
-
val := int32(1)
+
m := map[string]interface{}{"value": val, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Int32())
assert.Equal(t, val, New(m).Get("value").MustInt32())
assert.Equal(t, int32(0), New(m).Get("nothing").Int32())
assert.Equal(t, val, New(m).Get("nothing").Int32(1))
-
assert.Panics(t, func() {
New(m).Get("age").MustInt32()
})
-
}
func TestInt32Slice(t *testing.T) {
-
val := int32(1)
+
m := map[string]interface{}{"value": []int32{val}, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Int32Slice()[0])
assert.Equal(t, val, New(m).Get("value").MustInt32Slice()[0])
assert.Equal(t, []int32(nil), New(m).Get("nothing").Int32Slice())
assert.Equal(t, val, New(m).Get("nothing").Int32Slice([]int32{int32(1)})[0])
-
assert.Panics(t, func() {
New(m).Get("nothing").MustInt32Slice()
})
-
}
func TestIsInt32(t *testing.T) {
-
- var v *Value
-
- v = &Value{data: int32(1)}
+ v := &Value{data: int32(1)}
assert.True(t, v.IsInt32())
- v = &Value{data: []int32{int32(1)}}
- assert.True(t, v.IsInt32Slice())
+}
+func TestIsInt32Slice(t *testing.T) {
+ v := &Value{data: []int32{int32(1)}}
+ assert.True(t, v.IsInt32Slice())
}
func TestEachInt32(t *testing.T) {
-
v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1)}}
count := 0
replacedVals := make([]int32, 0)
assert.Equal(t, v, v.EachInt32(func(i int, val int32) bool {
-
count++
replacedVals = append(replacedVals, val)
// abort early
- if i == 2 {
- return false
- }
-
- return true
-
+ return i != 2
}))
assert.Equal(t, count, 3)
assert.Equal(t, replacedVals[0], v.MustInt32Slice()[0])
assert.Equal(t, replacedVals[1], v.MustInt32Slice()[1])
assert.Equal(t, replacedVals[2], v.MustInt32Slice()[2])
-
}
func TestWhereInt32(t *testing.T) {
-
v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}}
selected := v.WhereInt32(func(i int, val int32) bool {
@@ -1231,11 +1050,9 @@ func TestWhereInt32(t *testing.T) {
}).MustInt32Slice()
assert.Equal(t, 3, len(selected))
-
}
func TestGroupInt32(t *testing.T) {
-
v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}}
grouped := v.GroupInt32(func(i int, val int32) string {
@@ -1245,11 +1062,9 @@ func TestGroupInt32(t *testing.T) {
assert.Equal(t, 2, len(grouped))
assert.Equal(t, 3, len(grouped["true"]))
assert.Equal(t, 3, len(grouped["false"]))
-
}
func TestReplaceInt32(t *testing.T) {
-
v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}}
rawArr := v.MustInt32Slice()
@@ -1270,11 +1085,9 @@ func TestReplaceInt32(t *testing.T) {
assert.Equal(t, replacedArr[4], rawArr[5])
assert.Equal(t, replacedArr[5], rawArr[0])
}
-
}
func TestCollectInt32(t *testing.T) {
-
v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}}
collected := v.CollectInt32(func(index int, val int32) interface{} {
@@ -1290,83 +1103,68 @@ func TestCollectInt32(t *testing.T) {
assert.Equal(t, collectedArr[4], 4)
assert.Equal(t, collectedArr[5], 5)
}
-
}
-// ************************************************************
-// TESTS
-// ************************************************************
+/*
+ Tests for Int64 (int64 and []int64)
+*/
func TestInt64(t *testing.T) {
-
val := int64(1)
+
m := map[string]interface{}{"value": val, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Int64())
assert.Equal(t, val, New(m).Get("value").MustInt64())
assert.Equal(t, int64(0), New(m).Get("nothing").Int64())
assert.Equal(t, val, New(m).Get("nothing").Int64(1))
-
assert.Panics(t, func() {
New(m).Get("age").MustInt64()
})
-
}
func TestInt64Slice(t *testing.T) {
-
val := int64(1)
+
m := map[string]interface{}{"value": []int64{val}, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Int64Slice()[0])
assert.Equal(t, val, New(m).Get("value").MustInt64Slice()[0])
assert.Equal(t, []int64(nil), New(m).Get("nothing").Int64Slice())
assert.Equal(t, val, New(m).Get("nothing").Int64Slice([]int64{int64(1)})[0])
-
assert.Panics(t, func() {
New(m).Get("nothing").MustInt64Slice()
})
-
}
func TestIsInt64(t *testing.T) {
-
- var v *Value
-
- v = &Value{data: int64(1)}
+ v := &Value{data: int64(1)}
assert.True(t, v.IsInt64())
- v = &Value{data: []int64{int64(1)}}
- assert.True(t, v.IsInt64Slice())
+}
+func TestIsInt64Slice(t *testing.T) {
+ v := &Value{data: []int64{int64(1)}}
+ assert.True(t, v.IsInt64Slice())
}
func TestEachInt64(t *testing.T) {
-
v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1)}}
count := 0
replacedVals := make([]int64, 0)
assert.Equal(t, v, v.EachInt64(func(i int, val int64) bool {
-
count++
replacedVals = append(replacedVals, val)
// abort early
- if i == 2 {
- return false
- }
-
- return true
-
+ return i != 2
}))
assert.Equal(t, count, 3)
assert.Equal(t, replacedVals[0], v.MustInt64Slice()[0])
assert.Equal(t, replacedVals[1], v.MustInt64Slice()[1])
assert.Equal(t, replacedVals[2], v.MustInt64Slice()[2])
-
}
func TestWhereInt64(t *testing.T) {
-
v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}}
selected := v.WhereInt64(func(i int, val int64) bool {
@@ -1374,11 +1172,9 @@ func TestWhereInt64(t *testing.T) {
}).MustInt64Slice()
assert.Equal(t, 3, len(selected))
-
}
func TestGroupInt64(t *testing.T) {
-
v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}}
grouped := v.GroupInt64(func(i int, val int64) string {
@@ -1388,11 +1184,9 @@ func TestGroupInt64(t *testing.T) {
assert.Equal(t, 2, len(grouped))
assert.Equal(t, 3, len(grouped["true"]))
assert.Equal(t, 3, len(grouped["false"]))
-
}
func TestReplaceInt64(t *testing.T) {
-
v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}}
rawArr := v.MustInt64Slice()
@@ -1413,11 +1207,9 @@ func TestReplaceInt64(t *testing.T) {
assert.Equal(t, replacedArr[4], rawArr[5])
assert.Equal(t, replacedArr[5], rawArr[0])
}
-
}
func TestCollectInt64(t *testing.T) {
-
v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}}
collected := v.CollectInt64(func(index int, val int64) interface{} {
@@ -1433,83 +1225,68 @@ func TestCollectInt64(t *testing.T) {
assert.Equal(t, collectedArr[4], 4)
assert.Equal(t, collectedArr[5], 5)
}
-
}
-// ************************************************************
-// TESTS
-// ************************************************************
+/*
+ Tests for Uint (uint and []uint)
+*/
func TestUint(t *testing.T) {
-
val := uint(1)
+
m := map[string]interface{}{"value": val, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Uint())
assert.Equal(t, val, New(m).Get("value").MustUint())
assert.Equal(t, uint(0), New(m).Get("nothing").Uint())
assert.Equal(t, val, New(m).Get("nothing").Uint(1))
-
assert.Panics(t, func() {
New(m).Get("age").MustUint()
})
-
}
func TestUintSlice(t *testing.T) {
-
val := uint(1)
+
m := map[string]interface{}{"value": []uint{val}, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").UintSlice()[0])
assert.Equal(t, val, New(m).Get("value").MustUintSlice()[0])
assert.Equal(t, []uint(nil), New(m).Get("nothing").UintSlice())
assert.Equal(t, val, New(m).Get("nothing").UintSlice([]uint{uint(1)})[0])
-
assert.Panics(t, func() {
New(m).Get("nothing").MustUintSlice()
})
-
}
func TestIsUint(t *testing.T) {
-
- var v *Value
-
- v = &Value{data: uint(1)}
+ v := &Value{data: uint(1)}
assert.True(t, v.IsUint())
- v = &Value{data: []uint{uint(1)}}
- assert.True(t, v.IsUintSlice())
+}
+func TestIsUintSlice(t *testing.T) {
+ v := &Value{data: []uint{uint(1)}}
+ assert.True(t, v.IsUintSlice())
}
func TestEachUint(t *testing.T) {
-
v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1)}}
count := 0
replacedVals := make([]uint, 0)
assert.Equal(t, v, v.EachUint(func(i int, val uint) bool {
-
count++
replacedVals = append(replacedVals, val)
// abort early
- if i == 2 {
- return false
- }
-
- return true
-
+ return i != 2
}))
assert.Equal(t, count, 3)
assert.Equal(t, replacedVals[0], v.MustUintSlice()[0])
assert.Equal(t, replacedVals[1], v.MustUintSlice()[1])
assert.Equal(t, replacedVals[2], v.MustUintSlice()[2])
-
}
func TestWhereUint(t *testing.T) {
-
v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}}
selected := v.WhereUint(func(i int, val uint) bool {
@@ -1517,11 +1294,9 @@ func TestWhereUint(t *testing.T) {
}).MustUintSlice()
assert.Equal(t, 3, len(selected))
-
}
func TestGroupUint(t *testing.T) {
-
v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}}
grouped := v.GroupUint(func(i int, val uint) string {
@@ -1531,11 +1306,9 @@ func TestGroupUint(t *testing.T) {
assert.Equal(t, 2, len(grouped))
assert.Equal(t, 3, len(grouped["true"]))
assert.Equal(t, 3, len(grouped["false"]))
-
}
func TestReplaceUint(t *testing.T) {
-
v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}}
rawArr := v.MustUintSlice()
@@ -1556,11 +1329,9 @@ func TestReplaceUint(t *testing.T) {
assert.Equal(t, replacedArr[4], rawArr[5])
assert.Equal(t, replacedArr[5], rawArr[0])
}
-
}
func TestCollectUint(t *testing.T) {
-
v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}}
collected := v.CollectUint(func(index int, val uint) interface{} {
@@ -1576,83 +1347,68 @@ func TestCollectUint(t *testing.T) {
assert.Equal(t, collectedArr[4], 4)
assert.Equal(t, collectedArr[5], 5)
}
-
}
-// ************************************************************
-// TESTS
-// ************************************************************
+/*
+ Tests for Uint8 (uint8 and []uint8)
+*/
func TestUint8(t *testing.T) {
-
val := uint8(1)
+
m := map[string]interface{}{"value": val, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Uint8())
assert.Equal(t, val, New(m).Get("value").MustUint8())
assert.Equal(t, uint8(0), New(m).Get("nothing").Uint8())
assert.Equal(t, val, New(m).Get("nothing").Uint8(1))
-
assert.Panics(t, func() {
New(m).Get("age").MustUint8()
})
-
}
func TestUint8Slice(t *testing.T) {
-
val := uint8(1)
+
m := map[string]interface{}{"value": []uint8{val}, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Uint8Slice()[0])
assert.Equal(t, val, New(m).Get("value").MustUint8Slice()[0])
assert.Equal(t, []uint8(nil), New(m).Get("nothing").Uint8Slice())
assert.Equal(t, val, New(m).Get("nothing").Uint8Slice([]uint8{uint8(1)})[0])
-
assert.Panics(t, func() {
New(m).Get("nothing").MustUint8Slice()
})
-
}
func TestIsUint8(t *testing.T) {
-
- var v *Value
-
- v = &Value{data: uint8(1)}
+ v := &Value{data: uint8(1)}
assert.True(t, v.IsUint8())
- v = &Value{data: []uint8{uint8(1)}}
- assert.True(t, v.IsUint8Slice())
+}
+func TestIsUint8Slice(t *testing.T) {
+ v := &Value{data: []uint8{uint8(1)}}
+ assert.True(t, v.IsUint8Slice())
}
func TestEachUint8(t *testing.T) {
-
v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}}
count := 0
replacedVals := make([]uint8, 0)
assert.Equal(t, v, v.EachUint8(func(i int, val uint8) bool {
-
count++
replacedVals = append(replacedVals, val)
// abort early
- if i == 2 {
- return false
- }
-
- return true
-
+ return i != 2
}))
assert.Equal(t, count, 3)
assert.Equal(t, replacedVals[0], v.MustUint8Slice()[0])
assert.Equal(t, replacedVals[1], v.MustUint8Slice()[1])
assert.Equal(t, replacedVals[2], v.MustUint8Slice()[2])
-
}
func TestWhereUint8(t *testing.T) {
-
v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}}
selected := v.WhereUint8(func(i int, val uint8) bool {
@@ -1660,11 +1416,9 @@ func TestWhereUint8(t *testing.T) {
}).MustUint8Slice()
assert.Equal(t, 3, len(selected))
-
}
func TestGroupUint8(t *testing.T) {
-
v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}}
grouped := v.GroupUint8(func(i int, val uint8) string {
@@ -1674,11 +1428,9 @@ func TestGroupUint8(t *testing.T) {
assert.Equal(t, 2, len(grouped))
assert.Equal(t, 3, len(grouped["true"]))
assert.Equal(t, 3, len(grouped["false"]))
-
}
func TestReplaceUint8(t *testing.T) {
-
v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}}
rawArr := v.MustUint8Slice()
@@ -1699,11 +1451,9 @@ func TestReplaceUint8(t *testing.T) {
assert.Equal(t, replacedArr[4], rawArr[5])
assert.Equal(t, replacedArr[5], rawArr[0])
}
-
}
func TestCollectUint8(t *testing.T) {
-
v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}}
collected := v.CollectUint8(func(index int, val uint8) interface{} {
@@ -1719,83 +1469,68 @@ func TestCollectUint8(t *testing.T) {
assert.Equal(t, collectedArr[4], 4)
assert.Equal(t, collectedArr[5], 5)
}
-
}
-// ************************************************************
-// TESTS
-// ************************************************************
+/*
+ Tests for Uint16 (uint16 and []uint16)
+*/
func TestUint16(t *testing.T) {
-
val := uint16(1)
+
m := map[string]interface{}{"value": val, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Uint16())
assert.Equal(t, val, New(m).Get("value").MustUint16())
assert.Equal(t, uint16(0), New(m).Get("nothing").Uint16())
assert.Equal(t, val, New(m).Get("nothing").Uint16(1))
-
assert.Panics(t, func() {
New(m).Get("age").MustUint16()
})
-
}
func TestUint16Slice(t *testing.T) {
-
val := uint16(1)
+
m := map[string]interface{}{"value": []uint16{val}, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Uint16Slice()[0])
assert.Equal(t, val, New(m).Get("value").MustUint16Slice()[0])
assert.Equal(t, []uint16(nil), New(m).Get("nothing").Uint16Slice())
assert.Equal(t, val, New(m).Get("nothing").Uint16Slice([]uint16{uint16(1)})[0])
-
assert.Panics(t, func() {
New(m).Get("nothing").MustUint16Slice()
})
-
}
func TestIsUint16(t *testing.T) {
-
- var v *Value
-
- v = &Value{data: uint16(1)}
+ v := &Value{data: uint16(1)}
assert.True(t, v.IsUint16())
- v = &Value{data: []uint16{uint16(1)}}
- assert.True(t, v.IsUint16Slice())
+}
+func TestIsUint16Slice(t *testing.T) {
+ v := &Value{data: []uint16{uint16(1)}}
+ assert.True(t, v.IsUint16Slice())
}
func TestEachUint16(t *testing.T) {
-
v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}}
count := 0
replacedVals := make([]uint16, 0)
assert.Equal(t, v, v.EachUint16(func(i int, val uint16) bool {
-
count++
replacedVals = append(replacedVals, val)
// abort early
- if i == 2 {
- return false
- }
-
- return true
-
+ return i != 2
}))
assert.Equal(t, count, 3)
assert.Equal(t, replacedVals[0], v.MustUint16Slice()[0])
assert.Equal(t, replacedVals[1], v.MustUint16Slice()[1])
assert.Equal(t, replacedVals[2], v.MustUint16Slice()[2])
-
}
func TestWhereUint16(t *testing.T) {
-
v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}}
selected := v.WhereUint16(func(i int, val uint16) bool {
@@ -1803,11 +1538,9 @@ func TestWhereUint16(t *testing.T) {
}).MustUint16Slice()
assert.Equal(t, 3, len(selected))
-
}
func TestGroupUint16(t *testing.T) {
-
v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}}
grouped := v.GroupUint16(func(i int, val uint16) string {
@@ -1817,11 +1550,9 @@ func TestGroupUint16(t *testing.T) {
assert.Equal(t, 2, len(grouped))
assert.Equal(t, 3, len(grouped["true"]))
assert.Equal(t, 3, len(grouped["false"]))
-
}
func TestReplaceUint16(t *testing.T) {
-
v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}}
rawArr := v.MustUint16Slice()
@@ -1842,11 +1573,9 @@ func TestReplaceUint16(t *testing.T) {
assert.Equal(t, replacedArr[4], rawArr[5])
assert.Equal(t, replacedArr[5], rawArr[0])
}
-
}
func TestCollectUint16(t *testing.T) {
-
v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}}
collected := v.CollectUint16(func(index int, val uint16) interface{} {
@@ -1862,83 +1591,68 @@ func TestCollectUint16(t *testing.T) {
assert.Equal(t, collectedArr[4], 4)
assert.Equal(t, collectedArr[5], 5)
}
-
}
-// ************************************************************
-// TESTS
-// ************************************************************
+/*
+ Tests for Uint32 (uint32 and []uint32)
+*/
func TestUint32(t *testing.T) {
-
val := uint32(1)
+
m := map[string]interface{}{"value": val, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Uint32())
assert.Equal(t, val, New(m).Get("value").MustUint32())
assert.Equal(t, uint32(0), New(m).Get("nothing").Uint32())
assert.Equal(t, val, New(m).Get("nothing").Uint32(1))
-
assert.Panics(t, func() {
New(m).Get("age").MustUint32()
})
-
}
func TestUint32Slice(t *testing.T) {
-
val := uint32(1)
+
m := map[string]interface{}{"value": []uint32{val}, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Uint32Slice()[0])
assert.Equal(t, val, New(m).Get("value").MustUint32Slice()[0])
assert.Equal(t, []uint32(nil), New(m).Get("nothing").Uint32Slice())
assert.Equal(t, val, New(m).Get("nothing").Uint32Slice([]uint32{uint32(1)})[0])
-
assert.Panics(t, func() {
New(m).Get("nothing").MustUint32Slice()
})
-
}
func TestIsUint32(t *testing.T) {
-
- var v *Value
-
- v = &Value{data: uint32(1)}
+ v := &Value{data: uint32(1)}
assert.True(t, v.IsUint32())
- v = &Value{data: []uint32{uint32(1)}}
- assert.True(t, v.IsUint32Slice())
+}
+func TestIsUint32Slice(t *testing.T) {
+ v := &Value{data: []uint32{uint32(1)}}
+ assert.True(t, v.IsUint32Slice())
}
func TestEachUint32(t *testing.T) {
-
v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}}
count := 0
replacedVals := make([]uint32, 0)
assert.Equal(t, v, v.EachUint32(func(i int, val uint32) bool {
-
count++
replacedVals = append(replacedVals, val)
// abort early
- if i == 2 {
- return false
- }
-
- return true
-
+ return i != 2
}))
assert.Equal(t, count, 3)
assert.Equal(t, replacedVals[0], v.MustUint32Slice()[0])
assert.Equal(t, replacedVals[1], v.MustUint32Slice()[1])
assert.Equal(t, replacedVals[2], v.MustUint32Slice()[2])
-
}
func TestWhereUint32(t *testing.T) {
-
v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}}
selected := v.WhereUint32(func(i int, val uint32) bool {
@@ -1946,11 +1660,9 @@ func TestWhereUint32(t *testing.T) {
}).MustUint32Slice()
assert.Equal(t, 3, len(selected))
-
}
func TestGroupUint32(t *testing.T) {
-
v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}}
grouped := v.GroupUint32(func(i int, val uint32) string {
@@ -1960,11 +1672,9 @@ func TestGroupUint32(t *testing.T) {
assert.Equal(t, 2, len(grouped))
assert.Equal(t, 3, len(grouped["true"]))
assert.Equal(t, 3, len(grouped["false"]))
-
}
func TestReplaceUint32(t *testing.T) {
-
v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}}
rawArr := v.MustUint32Slice()
@@ -1985,11 +1695,9 @@ func TestReplaceUint32(t *testing.T) {
assert.Equal(t, replacedArr[4], rawArr[5])
assert.Equal(t, replacedArr[5], rawArr[0])
}
-
}
func TestCollectUint32(t *testing.T) {
-
v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}}
collected := v.CollectUint32(func(index int, val uint32) interface{} {
@@ -2005,83 +1713,68 @@ func TestCollectUint32(t *testing.T) {
assert.Equal(t, collectedArr[4], 4)
assert.Equal(t, collectedArr[5], 5)
}
-
}
-// ************************************************************
-// TESTS
-// ************************************************************
+/*
+ Tests for Uint64 (uint64 and []uint64)
+*/
func TestUint64(t *testing.T) {
-
val := uint64(1)
+
m := map[string]interface{}{"value": val, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Uint64())
assert.Equal(t, val, New(m).Get("value").MustUint64())
assert.Equal(t, uint64(0), New(m).Get("nothing").Uint64())
assert.Equal(t, val, New(m).Get("nothing").Uint64(1))
-
assert.Panics(t, func() {
New(m).Get("age").MustUint64()
})
-
}
func TestUint64Slice(t *testing.T) {
-
val := uint64(1)
+
m := map[string]interface{}{"value": []uint64{val}, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Uint64Slice()[0])
assert.Equal(t, val, New(m).Get("value").MustUint64Slice()[0])
assert.Equal(t, []uint64(nil), New(m).Get("nothing").Uint64Slice())
assert.Equal(t, val, New(m).Get("nothing").Uint64Slice([]uint64{uint64(1)})[0])
-
assert.Panics(t, func() {
New(m).Get("nothing").MustUint64Slice()
})
-
}
func TestIsUint64(t *testing.T) {
-
- var v *Value
-
- v = &Value{data: uint64(1)}
+ v := &Value{data: uint64(1)}
assert.True(t, v.IsUint64())
- v = &Value{data: []uint64{uint64(1)}}
- assert.True(t, v.IsUint64Slice())
+}
+func TestIsUint64Slice(t *testing.T) {
+ v := &Value{data: []uint64{uint64(1)}}
+ assert.True(t, v.IsUint64Slice())
}
func TestEachUint64(t *testing.T) {
-
v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}}
count := 0
replacedVals := make([]uint64, 0)
assert.Equal(t, v, v.EachUint64(func(i int, val uint64) bool {
-
count++
replacedVals = append(replacedVals, val)
// abort early
- if i == 2 {
- return false
- }
-
- return true
-
+ return i != 2
}))
assert.Equal(t, count, 3)
assert.Equal(t, replacedVals[0], v.MustUint64Slice()[0])
assert.Equal(t, replacedVals[1], v.MustUint64Slice()[1])
assert.Equal(t, replacedVals[2], v.MustUint64Slice()[2])
-
}
func TestWhereUint64(t *testing.T) {
-
v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}}
selected := v.WhereUint64(func(i int, val uint64) bool {
@@ -2089,11 +1782,9 @@ func TestWhereUint64(t *testing.T) {
}).MustUint64Slice()
assert.Equal(t, 3, len(selected))
-
}
func TestGroupUint64(t *testing.T) {
-
v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}}
grouped := v.GroupUint64(func(i int, val uint64) string {
@@ -2103,11 +1794,9 @@ func TestGroupUint64(t *testing.T) {
assert.Equal(t, 2, len(grouped))
assert.Equal(t, 3, len(grouped["true"]))
assert.Equal(t, 3, len(grouped["false"]))
-
}
func TestReplaceUint64(t *testing.T) {
-
v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}}
rawArr := v.MustUint64Slice()
@@ -2128,11 +1817,9 @@ func TestReplaceUint64(t *testing.T) {
assert.Equal(t, replacedArr[4], rawArr[5])
assert.Equal(t, replacedArr[5], rawArr[0])
}
-
}
func TestCollectUint64(t *testing.T) {
-
v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}}
collected := v.CollectUint64(func(index int, val uint64) interface{} {
@@ -2148,83 +1835,68 @@ func TestCollectUint64(t *testing.T) {
assert.Equal(t, collectedArr[4], 4)
assert.Equal(t, collectedArr[5], 5)
}
-
}
-// ************************************************************
-// TESTS
-// ************************************************************
+/*
+ Tests for Uintptr (uintptr and []uintptr)
+*/
func TestUintptr(t *testing.T) {
-
val := uintptr(1)
+
m := map[string]interface{}{"value": val, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Uintptr())
assert.Equal(t, val, New(m).Get("value").MustUintptr())
assert.Equal(t, uintptr(0), New(m).Get("nothing").Uintptr())
assert.Equal(t, val, New(m).Get("nothing").Uintptr(1))
-
assert.Panics(t, func() {
New(m).Get("age").MustUintptr()
})
-
}
func TestUintptrSlice(t *testing.T) {
-
val := uintptr(1)
+
m := map[string]interface{}{"value": []uintptr{val}, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").UintptrSlice()[0])
assert.Equal(t, val, New(m).Get("value").MustUintptrSlice()[0])
assert.Equal(t, []uintptr(nil), New(m).Get("nothing").UintptrSlice())
assert.Equal(t, val, New(m).Get("nothing").UintptrSlice([]uintptr{uintptr(1)})[0])
-
assert.Panics(t, func() {
New(m).Get("nothing").MustUintptrSlice()
})
-
}
func TestIsUintptr(t *testing.T) {
-
- var v *Value
-
- v = &Value{data: uintptr(1)}
+ v := &Value{data: uintptr(1)}
assert.True(t, v.IsUintptr())
- v = &Value{data: []uintptr{uintptr(1)}}
- assert.True(t, v.IsUintptrSlice())
+}
+func TestIsUintptrSlice(t *testing.T) {
+ v := &Value{data: []uintptr{uintptr(1)}}
+ assert.True(t, v.IsUintptrSlice())
}
func TestEachUintptr(t *testing.T) {
-
v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}}
count := 0
replacedVals := make([]uintptr, 0)
assert.Equal(t, v, v.EachUintptr(func(i int, val uintptr) bool {
-
count++
replacedVals = append(replacedVals, val)
// abort early
- if i == 2 {
- return false
- }
-
- return true
-
+ return i != 2
}))
assert.Equal(t, count, 3)
assert.Equal(t, replacedVals[0], v.MustUintptrSlice()[0])
assert.Equal(t, replacedVals[1], v.MustUintptrSlice()[1])
assert.Equal(t, replacedVals[2], v.MustUintptrSlice()[2])
-
}
func TestWhereUintptr(t *testing.T) {
-
v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}}
selected := v.WhereUintptr(func(i int, val uintptr) bool {
@@ -2232,11 +1904,9 @@ func TestWhereUintptr(t *testing.T) {
}).MustUintptrSlice()
assert.Equal(t, 3, len(selected))
-
}
func TestGroupUintptr(t *testing.T) {
-
v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}}
grouped := v.GroupUintptr(func(i int, val uintptr) string {
@@ -2246,11 +1916,9 @@ func TestGroupUintptr(t *testing.T) {
assert.Equal(t, 2, len(grouped))
assert.Equal(t, 3, len(grouped["true"]))
assert.Equal(t, 3, len(grouped["false"]))
-
}
func TestReplaceUintptr(t *testing.T) {
-
v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}}
rawArr := v.MustUintptrSlice()
@@ -2271,11 +1939,9 @@ func TestReplaceUintptr(t *testing.T) {
assert.Equal(t, replacedArr[4], rawArr[5])
assert.Equal(t, replacedArr[5], rawArr[0])
}
-
}
func TestCollectUintptr(t *testing.T) {
-
v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}}
collected := v.CollectUintptr(func(index int, val uintptr) interface{} {
@@ -2291,83 +1957,68 @@ func TestCollectUintptr(t *testing.T) {
assert.Equal(t, collectedArr[4], 4)
assert.Equal(t, collectedArr[5], 5)
}
-
}
-// ************************************************************
-// TESTS
-// ************************************************************
+/*
+ Tests for Float32 (float32 and []float32)
+*/
func TestFloat32(t *testing.T) {
-
val := float32(1)
+
m := map[string]interface{}{"value": val, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Float32())
assert.Equal(t, val, New(m).Get("value").MustFloat32())
assert.Equal(t, float32(0), New(m).Get("nothing").Float32())
assert.Equal(t, val, New(m).Get("nothing").Float32(1))
-
assert.Panics(t, func() {
New(m).Get("age").MustFloat32()
})
-
}
func TestFloat32Slice(t *testing.T) {
-
val := float32(1)
+
m := map[string]interface{}{"value": []float32{val}, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Float32Slice()[0])
assert.Equal(t, val, New(m).Get("value").MustFloat32Slice()[0])
assert.Equal(t, []float32(nil), New(m).Get("nothing").Float32Slice())
assert.Equal(t, val, New(m).Get("nothing").Float32Slice([]float32{float32(1)})[0])
-
assert.Panics(t, func() {
New(m).Get("nothing").MustFloat32Slice()
})
-
}
func TestIsFloat32(t *testing.T) {
-
- var v *Value
-
- v = &Value{data: float32(1)}
+ v := &Value{data: float32(1)}
assert.True(t, v.IsFloat32())
- v = &Value{data: []float32{float32(1)}}
- assert.True(t, v.IsFloat32Slice())
+}
+func TestIsFloat32Slice(t *testing.T) {
+ v := &Value{data: []float32{float32(1)}}
+ assert.True(t, v.IsFloat32Slice())
}
func TestEachFloat32(t *testing.T) {
-
v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1)}}
count := 0
replacedVals := make([]float32, 0)
assert.Equal(t, v, v.EachFloat32(func(i int, val float32) bool {
-
count++
replacedVals = append(replacedVals, val)
// abort early
- if i == 2 {
- return false
- }
-
- return true
-
+ return i != 2
}))
assert.Equal(t, count, 3)
assert.Equal(t, replacedVals[0], v.MustFloat32Slice()[0])
assert.Equal(t, replacedVals[1], v.MustFloat32Slice()[1])
assert.Equal(t, replacedVals[2], v.MustFloat32Slice()[2])
-
}
func TestWhereFloat32(t *testing.T) {
-
v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}}
selected := v.WhereFloat32(func(i int, val float32) bool {
@@ -2375,11 +2026,9 @@ func TestWhereFloat32(t *testing.T) {
}).MustFloat32Slice()
assert.Equal(t, 3, len(selected))
-
}
func TestGroupFloat32(t *testing.T) {
-
v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}}
grouped := v.GroupFloat32(func(i int, val float32) string {
@@ -2389,11 +2038,9 @@ func TestGroupFloat32(t *testing.T) {
assert.Equal(t, 2, len(grouped))
assert.Equal(t, 3, len(grouped["true"]))
assert.Equal(t, 3, len(grouped["false"]))
-
}
func TestReplaceFloat32(t *testing.T) {
-
v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}}
rawArr := v.MustFloat32Slice()
@@ -2414,11 +2061,9 @@ func TestReplaceFloat32(t *testing.T) {
assert.Equal(t, replacedArr[4], rawArr[5])
assert.Equal(t, replacedArr[5], rawArr[0])
}
-
}
func TestCollectFloat32(t *testing.T) {
-
v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}}
collected := v.CollectFloat32(func(index int, val float32) interface{} {
@@ -2434,83 +2079,68 @@ func TestCollectFloat32(t *testing.T) {
assert.Equal(t, collectedArr[4], 4)
assert.Equal(t, collectedArr[5], 5)
}
-
}
-// ************************************************************
-// TESTS
-// ************************************************************
+/*
+ Tests for Float64 (float64 and []float64)
+*/
func TestFloat64(t *testing.T) {
-
val := float64(1)
+
m := map[string]interface{}{"value": val, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Float64())
assert.Equal(t, val, New(m).Get("value").MustFloat64())
assert.Equal(t, float64(0), New(m).Get("nothing").Float64())
assert.Equal(t, val, New(m).Get("nothing").Float64(1))
-
assert.Panics(t, func() {
New(m).Get("age").MustFloat64()
})
-
}
func TestFloat64Slice(t *testing.T) {
-
val := float64(1)
+
m := map[string]interface{}{"value": []float64{val}, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Float64Slice()[0])
assert.Equal(t, val, New(m).Get("value").MustFloat64Slice()[0])
assert.Equal(t, []float64(nil), New(m).Get("nothing").Float64Slice())
assert.Equal(t, val, New(m).Get("nothing").Float64Slice([]float64{float64(1)})[0])
-
assert.Panics(t, func() {
New(m).Get("nothing").MustFloat64Slice()
})
-
}
func TestIsFloat64(t *testing.T) {
-
- var v *Value
-
- v = &Value{data: float64(1)}
+ v := &Value{data: float64(1)}
assert.True(t, v.IsFloat64())
- v = &Value{data: []float64{float64(1)}}
- assert.True(t, v.IsFloat64Slice())
+}
+func TestIsFloat64Slice(t *testing.T) {
+ v := &Value{data: []float64{float64(1)}}
+ assert.True(t, v.IsFloat64Slice())
}
func TestEachFloat64(t *testing.T) {
-
v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1)}}
count := 0
replacedVals := make([]float64, 0)
assert.Equal(t, v, v.EachFloat64(func(i int, val float64) bool {
-
count++
replacedVals = append(replacedVals, val)
// abort early
- if i == 2 {
- return false
- }
-
- return true
-
+ return i != 2
}))
assert.Equal(t, count, 3)
assert.Equal(t, replacedVals[0], v.MustFloat64Slice()[0])
assert.Equal(t, replacedVals[1], v.MustFloat64Slice()[1])
assert.Equal(t, replacedVals[2], v.MustFloat64Slice()[2])
-
}
func TestWhereFloat64(t *testing.T) {
-
v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}}
selected := v.WhereFloat64(func(i int, val float64) bool {
@@ -2518,11 +2148,9 @@ func TestWhereFloat64(t *testing.T) {
}).MustFloat64Slice()
assert.Equal(t, 3, len(selected))
-
}
func TestGroupFloat64(t *testing.T) {
-
v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}}
grouped := v.GroupFloat64(func(i int, val float64) string {
@@ -2532,11 +2160,9 @@ func TestGroupFloat64(t *testing.T) {
assert.Equal(t, 2, len(grouped))
assert.Equal(t, 3, len(grouped["true"]))
assert.Equal(t, 3, len(grouped["false"]))
-
}
func TestReplaceFloat64(t *testing.T) {
-
v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}}
rawArr := v.MustFloat64Slice()
@@ -2557,11 +2183,9 @@ func TestReplaceFloat64(t *testing.T) {
assert.Equal(t, replacedArr[4], rawArr[5])
assert.Equal(t, replacedArr[5], rawArr[0])
}
-
}
func TestCollectFloat64(t *testing.T) {
-
v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}}
collected := v.CollectFloat64(func(index int, val float64) interface{} {
@@ -2577,83 +2201,68 @@ func TestCollectFloat64(t *testing.T) {
assert.Equal(t, collectedArr[4], 4)
assert.Equal(t, collectedArr[5], 5)
}
-
}
-// ************************************************************
-// TESTS
-// ************************************************************
+/*
+ Tests for Complex64 (complex64 and []complex64)
+*/
func TestComplex64(t *testing.T) {
-
val := complex64(1)
+
m := map[string]interface{}{"value": val, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Complex64())
assert.Equal(t, val, New(m).Get("value").MustComplex64())
assert.Equal(t, complex64(0), New(m).Get("nothing").Complex64())
assert.Equal(t, val, New(m).Get("nothing").Complex64(1))
-
assert.Panics(t, func() {
New(m).Get("age").MustComplex64()
})
-
}
func TestComplex64Slice(t *testing.T) {
-
val := complex64(1)
+
m := map[string]interface{}{"value": []complex64{val}, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Complex64Slice()[0])
assert.Equal(t, val, New(m).Get("value").MustComplex64Slice()[0])
assert.Equal(t, []complex64(nil), New(m).Get("nothing").Complex64Slice())
assert.Equal(t, val, New(m).Get("nothing").Complex64Slice([]complex64{complex64(1)})[0])
-
assert.Panics(t, func() {
New(m).Get("nothing").MustComplex64Slice()
})
-
}
func TestIsComplex64(t *testing.T) {
-
- var v *Value
-
- v = &Value{data: complex64(1)}
+ v := &Value{data: complex64(1)}
assert.True(t, v.IsComplex64())
- v = &Value{data: []complex64{complex64(1)}}
- assert.True(t, v.IsComplex64Slice())
+}
+func TestIsComplex64Slice(t *testing.T) {
+ v := &Value{data: []complex64{complex64(1)}}
+ assert.True(t, v.IsComplex64Slice())
}
func TestEachComplex64(t *testing.T) {
-
v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}}
count := 0
replacedVals := make([]complex64, 0)
assert.Equal(t, v, v.EachComplex64(func(i int, val complex64) bool {
-
count++
replacedVals = append(replacedVals, val)
// abort early
- if i == 2 {
- return false
- }
-
- return true
-
+ return i != 2
}))
assert.Equal(t, count, 3)
assert.Equal(t, replacedVals[0], v.MustComplex64Slice()[0])
assert.Equal(t, replacedVals[1], v.MustComplex64Slice()[1])
assert.Equal(t, replacedVals[2], v.MustComplex64Slice()[2])
-
}
func TestWhereComplex64(t *testing.T) {
-
v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}}
selected := v.WhereComplex64(func(i int, val complex64) bool {
@@ -2661,11 +2270,9 @@ func TestWhereComplex64(t *testing.T) {
}).MustComplex64Slice()
assert.Equal(t, 3, len(selected))
-
}
func TestGroupComplex64(t *testing.T) {
-
v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}}
grouped := v.GroupComplex64(func(i int, val complex64) string {
@@ -2675,11 +2282,9 @@ func TestGroupComplex64(t *testing.T) {
assert.Equal(t, 2, len(grouped))
assert.Equal(t, 3, len(grouped["true"]))
assert.Equal(t, 3, len(grouped["false"]))
-
}
func TestReplaceComplex64(t *testing.T) {
-
v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}}
rawArr := v.MustComplex64Slice()
@@ -2700,11 +2305,9 @@ func TestReplaceComplex64(t *testing.T) {
assert.Equal(t, replacedArr[4], rawArr[5])
assert.Equal(t, replacedArr[5], rawArr[0])
}
-
}
func TestCollectComplex64(t *testing.T) {
-
v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}}
collected := v.CollectComplex64(func(index int, val complex64) interface{} {
@@ -2720,83 +2323,68 @@ func TestCollectComplex64(t *testing.T) {
assert.Equal(t, collectedArr[4], 4)
assert.Equal(t, collectedArr[5], 5)
}
-
}
-// ************************************************************
-// TESTS
-// ************************************************************
+/*
+ Tests for Complex128 (complex128 and []complex128)
+*/
func TestComplex128(t *testing.T) {
-
val := complex128(1)
+
m := map[string]interface{}{"value": val, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Complex128())
assert.Equal(t, val, New(m).Get("value").MustComplex128())
assert.Equal(t, complex128(0), New(m).Get("nothing").Complex128())
assert.Equal(t, val, New(m).Get("nothing").Complex128(1))
-
assert.Panics(t, func() {
New(m).Get("age").MustComplex128()
})
-
}
func TestComplex128Slice(t *testing.T) {
-
val := complex128(1)
+
m := map[string]interface{}{"value": []complex128{val}, "nothing": nil}
assert.Equal(t, val, New(m).Get("value").Complex128Slice()[0])
assert.Equal(t, val, New(m).Get("value").MustComplex128Slice()[0])
assert.Equal(t, []complex128(nil), New(m).Get("nothing").Complex128Slice())
assert.Equal(t, val, New(m).Get("nothing").Complex128Slice([]complex128{complex128(1)})[0])
-
assert.Panics(t, func() {
New(m).Get("nothing").MustComplex128Slice()
})
-
}
func TestIsComplex128(t *testing.T) {
-
- var v *Value
-
- v = &Value{data: complex128(1)}
+ v := &Value{data: complex128(1)}
assert.True(t, v.IsComplex128())
- v = &Value{data: []complex128{complex128(1)}}
- assert.True(t, v.IsComplex128Slice())
+}
+func TestIsComplex128Slice(t *testing.T) {
+ v := &Value{data: []complex128{complex128(1)}}
+ assert.True(t, v.IsComplex128Slice())
}
func TestEachComplex128(t *testing.T) {
-
v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}}
count := 0
replacedVals := make([]complex128, 0)
assert.Equal(t, v, v.EachComplex128(func(i int, val complex128) bool {
-
count++
replacedVals = append(replacedVals, val)
// abort early
- if i == 2 {
- return false
- }
-
- return true
-
+ return i != 2
}))
assert.Equal(t, count, 3)
assert.Equal(t, replacedVals[0], v.MustComplex128Slice()[0])
assert.Equal(t, replacedVals[1], v.MustComplex128Slice()[1])
assert.Equal(t, replacedVals[2], v.MustComplex128Slice()[2])
-
}
func TestWhereComplex128(t *testing.T) {
-
v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}}
selected := v.WhereComplex128(func(i int, val complex128) bool {
@@ -2804,11 +2392,9 @@ func TestWhereComplex128(t *testing.T) {
}).MustComplex128Slice()
assert.Equal(t, 3, len(selected))
-
}
func TestGroupComplex128(t *testing.T) {
-
v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}}
grouped := v.GroupComplex128(func(i int, val complex128) string {
@@ -2818,11 +2404,9 @@ func TestGroupComplex128(t *testing.T) {
assert.Equal(t, 2, len(grouped))
assert.Equal(t, 3, len(grouped["true"]))
assert.Equal(t, 3, len(grouped["false"]))
-
}
func TestReplaceComplex128(t *testing.T) {
-
v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}}
rawArr := v.MustComplex128Slice()
@@ -2843,11 +2427,9 @@ func TestReplaceComplex128(t *testing.T) {
assert.Equal(t, replacedArr[4], rawArr[5])
assert.Equal(t, replacedArr[5], rawArr[0])
}
-
}
func TestCollectComplex128(t *testing.T) {
-
v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}}
collected := v.CollectComplex128(func(index int, val complex128) interface{} {
@@ -2863,5 +2445,4 @@ func TestCollectComplex128(t *testing.T) {
assert.Equal(t, collectedArr[4], 4)
assert.Equal(t, collectedArr[5], 5)
}
-
}
diff --git a/vendor/github.com/stretchr/objx/value.go b/vendor/github.com/stretchr/objx/value.go
index 7aaef06b1..e4b4a1433 100644
--- a/vendor/github.com/stretchr/objx/value.go
+++ b/vendor/github.com/stretchr/objx/value.go
@@ -1,5 +1,10 @@
package objx
+import (
+ "fmt"
+ "strconv"
+)
+
// Value provides methods for extracting interface{} data in various
// types.
type Value struct {
@@ -11,3 +16,38 @@ type Value struct {
func (v *Value) Data() interface{} {
return v.data
}
+
+// String returns the value always as a string
+func (v *Value) String() string {
+ switch {
+ case v.IsStr():
+ return v.Str()
+ case v.IsBool():
+ return strconv.FormatBool(v.Bool())
+ case v.IsFloat32():
+ return strconv.FormatFloat(float64(v.Float32()), 'f', -1, 32)
+ case v.IsFloat64():
+ return strconv.FormatFloat(v.Float64(), 'f', -1, 64)
+ case v.IsInt():
+ return strconv.FormatInt(int64(v.Int()), 10)
+ case v.IsInt8():
+ return strconv.FormatInt(int64(v.Int8()), 10)
+ case v.IsInt16():
+ return strconv.FormatInt(int64(v.Int16()), 10)
+ case v.IsInt32():
+ return strconv.FormatInt(int64(v.Int32()), 10)
+ case v.IsInt64():
+ return strconv.FormatInt(v.Int64(), 10)
+ case v.IsUint():
+ return strconv.FormatUint(uint64(v.Uint()), 10)
+ case v.IsUint8():
+ return strconv.FormatUint(uint64(v.Uint8()), 10)
+ case v.IsUint16():
+ return strconv.FormatUint(uint64(v.Uint16()), 10)
+ case v.IsUint32():
+ return strconv.FormatUint(uint64(v.Uint32()), 10)
+ case v.IsUint64():
+ return strconv.FormatUint(v.Uint64(), 10)
+ }
+ return fmt.Sprintf("%#v", v.Data())
+}
diff --git a/vendor/github.com/stretchr/objx/value_test.go b/vendor/github.com/stretchr/objx/value_test.go
index 0bc65d92c..1b1e3091f 100644
--- a/vendor/github.com/stretchr/objx/value_test.go
+++ b/vendor/github.com/stretchr/objx/value_test.go
@@ -1 +1,74 @@
-package objx
+package objx_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/objx"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestStringTypeString(t *testing.T) {
+ m := objx.Map{
+ "string": "foo",
+ }
+
+ assert.Equal(t, "foo", m.Get("string").String())
+}
+
+func TestStringTypeBool(t *testing.T) {
+ m := objx.Map{
+ "bool": true,
+ }
+
+ assert.Equal(t, "true", m.Get("bool").String())
+}
+
+func TestStringTypeInt(t *testing.T) {
+ m := objx.Map{
+ "int": int(1),
+ "int8": int8(8),
+ "int16": int16(16),
+ "int32": int32(32),
+ "int64": int64(64),
+ }
+
+ assert.Equal(t, "1", m.Get("int").String())
+ assert.Equal(t, "8", m.Get("int8").String())
+ assert.Equal(t, "16", m.Get("int16").String())
+ assert.Equal(t, "32", m.Get("int32").String())
+ assert.Equal(t, "64", m.Get("int64").String())
+}
+
+func TestStringTypeUint(t *testing.T) {
+ m := objx.Map{
+ "uint": uint(1),
+ "uint8": uint8(8),
+ "uint16": uint16(16),
+ "uint32": uint32(32),
+ "uint64": uint64(64),
+ }
+
+ assert.Equal(t, "1", m.Get("uint").String())
+ assert.Equal(t, "8", m.Get("uint8").String())
+ assert.Equal(t, "16", m.Get("uint16").String())
+ assert.Equal(t, "32", m.Get("uint32").String())
+ assert.Equal(t, "64", m.Get("uint64").String())
+}
+
+func TestStringTypeFloat(t *testing.T) {
+ m := objx.Map{
+ "float32": float32(32.32),
+ "float64": float64(64.64),
+ }
+
+ assert.Equal(t, "32.32", m.Get("float32").String())
+ assert.Equal(t, "64.64", m.Get("float64").String())
+}
+
+func TestStringTypeOther(t *testing.T) {
+ m := objx.Map{
+ "other": []string{"foo", "bar"},
+ }
+
+ assert.Equal(t, "[]string{\"foo\", \"bar\"}", m.Get("other").String())
+}
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 000000000..c83641619
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 000000000..8a4a6589a
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,152 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build !js,!appengine,!safe,!disableunsafe
+
+package spew
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = false
+
+ // ptrSize is the size of a pointer on the current arch.
+ ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+var (
+ // offsetPtr, offsetScalar, and offsetFlag are the offsets for the
+ // internal reflect.Value fields. These values are valid before golang
+ // commit ecccf07e7f9d which changed the format. The are also valid
+ // after commit 82f48826c6c7 which changed the format again to mirror
+ // the original format. Code in the init function updates these offsets
+ // as necessary.
+ offsetPtr = uintptr(ptrSize)
+ offsetScalar = uintptr(0)
+ offsetFlag = uintptr(ptrSize * 2)
+
+ // flagKindWidth and flagKindShift indicate various bits that the
+ // reflect package uses internally to track kind information.
+ //
+ // flagRO indicates whether or not the value field of a reflect.Value is
+ // read-only.
+ //
+ // flagIndir indicates whether the value field of a reflect.Value is
+ // the actual data or a pointer to the data.
+ //
+ // These values are valid before golang commit 90a7c3c86944 which
+ // changed their positions. Code in the init function updates these
+ // flags as necessary.
+ flagKindWidth = uintptr(5)
+ flagKindShift = uintptr(flagKindWidth - 1)
+ flagRO = uintptr(1 << 0)
+ flagIndir = uintptr(1 << 1)
+)
+
+func init() {
+ // Older versions of reflect.Value stored small integers directly in the
+ // ptr field (which is named val in the older versions). Versions
+ // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
+ // scalar for this purpose which unfortunately came before the flag
+ // field, so the offset of the flag field is different for those
+ // versions.
+ //
+ // This code constructs a new reflect.Value from a known small integer
+ // and checks if the size of the reflect.Value struct indicates it has
+ // the scalar field. When it does, the offsets are updated accordingly.
+ vv := reflect.ValueOf(0xf00)
+ if unsafe.Sizeof(vv) == (ptrSize * 4) {
+ offsetScalar = ptrSize * 2
+ offsetFlag = ptrSize * 3
+ }
+
+ // Commit 90a7c3c86944 changed the flag positions such that the low
+ // order bits are the kind. This code extracts the kind from the flags
+ // field and ensures it's the correct type. When it's not, the flag
+ // order has been changed to the newer format, so the flags are updated
+ // accordingly.
+ upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
+ upfv := *(*uintptr)(upf)
+ flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
+ if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
+ flagKindShift = 0
+ flagRO = 1 << 5
+ flagIndir = 1 << 6
+
+ // Commit adf9b30e5594 modified the flags to separate the
+ // flagRO flag into two bits which specifies whether or not the
+ // field is embedded. This causes flagIndir to move over a bit
+ // and means that flagRO is the combination of either of the
+ // original flagRO bit and the new bit.
+ //
+ // This code detects the change by extracting what used to be
+ // the indirect bit to ensure it's set. When it's not, the flag
+ // order has been changed to the newer format, so the flags are
+ // updated accordingly.
+ if upfv&flagIndir == 0 {
+ flagRO = 3 << 5
+ flagIndir = 1 << 7
+ }
+ }
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data. It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
+ indirects := 1
+ vt := v.Type()
+ upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
+ rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
+ if rvf&flagIndir != 0 {
+ vt = reflect.PtrTo(v.Type())
+ indirects++
+ } else if offsetScalar != 0 {
+ // The value is in the scalar field when it's not one of the
+ // reference types.
+ switch vt.Kind() {
+ case reflect.Uintptr:
+ case reflect.Chan:
+ case reflect.Func:
+ case reflect.Map:
+ case reflect.Ptr:
+ case reflect.UnsafePointer:
+ default:
+ upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
+ offsetScalar)
+ }
+ }
+
+ pv := reflect.NewAt(vt, upv)
+ rv = pv
+ for i := 0; i < indirects; i++ {
+ rv = rv.Elem()
+ }
+ return rv
+}
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 000000000..1fe3cf3d5
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe
+
+package spew
+
+import "reflect"
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data. However, doing this relies on access to
+// the unsafe package. This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ return v
+}
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 000000000..7c519ff47
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead. This mirrors
+// the technique used in the fmt package.
+var (
+ panicBytes = []byte("(PANIC=")
+ plusBytes = []byte("+")
+ iBytes = []byte("i")
+ trueBytes = []byte("true")
+ falseBytes = []byte("false")
+ interfaceBytes = []byte("(interface {})")
+ commaNewlineBytes = []byte(",\n")
+ newlineBytes = []byte("\n")
+ openBraceBytes = []byte("{")
+ openBraceNewlineBytes = []byte("{\n")
+ closeBraceBytes = []byte("}")
+ asteriskBytes = []byte("*")
+ colonBytes = []byte(":")
+ colonSpaceBytes = []byte(": ")
+ openParenBytes = []byte("(")
+ closeParenBytes = []byte(")")
+ spaceBytes = []byte(" ")
+ pointerChainBytes = []byte("->")
+ nilAngleBytes = []byte("<nil>")
+ maxNewlineBytes = []byte("<max depth reached>\n")
+ maxShortBytes = []byte("<max>")
+ circularBytes = []byte("<already shown>")
+ circularShortBytes = []byte("<shown>")
+ invalidAngleBytes = []byte("<invalid>")
+ openBracketBytes = []byte("[")
+ closeBracketBytes = []byte("]")
+ percentBytes = []byte("%")
+ precisionBytes = []byte(".")
+ openAngleBytes = []byte("<")
+ closeAngleBytes = []byte(">")
+ openMapBytes = []byte("map[")
+ closeMapBytes = []byte("]")
+ lenEqualsBytes = []byte("len=")
+ capEqualsBytes = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+ if err := recover(); err != nil {
+ w.Write(panicBytes)
+ fmt.Fprintf(w, "%v", err)
+ w.Write(closeParenBytes)
+ }
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+ // We need an interface to check if the type implements the error or
+ // Stringer interface. However, the reflect package won't give us an
+ // interface on certain things like unexported struct fields in order
+ // to enforce visibility rules. We use unsafe, when it's available,
+ // to bypass these restrictions since this package does not mutate the
+ // values.
+ if !v.CanInterface() {
+ if UnsafeDisabled {
+ return false
+ }
+
+ v = unsafeReflectValue(v)
+ }
+
+ // Choose whether or not to do error and Stringer interface lookups against
+ // the base type or a pointer to the base type depending on settings.
+ // Technically calling one of these methods with a pointer receiver can
+ // mutate the value, however, types which choose to satisify an error or
+ // Stringer interface with a pointer receiver should not be mutating their
+ // state inside these interface methods.
+ if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+ v = unsafeReflectValue(v)
+ }
+ if v.CanAddr() {
+ v = v.Addr()
+ }
+
+ // Is it an error or Stringer?
+ switch iface := v.Interface().(type) {
+ case error:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.Error()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+
+ w.Write([]byte(iface.Error()))
+ return true
+
+ case fmt.Stringer:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.String()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+ w.Write([]byte(iface.String()))
+ return true
+ }
+ return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+ if val {
+ w.Write(trueBytes)
+ } else {
+ w.Write(falseBytes)
+ }
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+ w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+ w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+ w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+ r := real(c)
+ w.Write(openParenBytes)
+ w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+ i := imag(c)
+ if i >= 0 {
+ w.Write(plusBytes)
+ }
+ w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+ w.Write(iBytes)
+ w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+ // Null pointer.
+ num := uint64(p)
+ if num == 0 {
+ w.Write(nilAngleBytes)
+ return
+ }
+
+ // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+ buf := make([]byte, 18)
+
+ // It's simpler to construct the hex string right to left.
+ base := uint64(16)
+ i := len(buf) - 1
+ for num >= base {
+ buf[i] = hexDigits[num%base]
+ num /= base
+ i--
+ }
+ buf[i] = hexDigits[num]
+
+ // Add '0x' prefix.
+ i--
+ buf[i] = 'x'
+ i--
+ buf[i] = '0'
+
+ // Strip unused leading bytes.
+ buf = buf[i:]
+ w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+ values []reflect.Value
+ strings []string // either nil or same len and values
+ cs *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted. It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+ vs := &valuesSorter{values: values, cs: cs}
+ if canSortSimply(vs.values[0].Kind()) {
+ return vs
+ }
+ if !cs.DisableMethods {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ b := bytes.Buffer{}
+ if !handleMethods(cs, &b, vs.values[i]) {
+ vs.strings = nil
+ break
+ }
+ vs.strings[i] = b.String()
+ }
+ }
+ if vs.strings == nil && cs.SpewKeys {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+ }
+ }
+ return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+ // This switch parallels valueSortLess, except for the default case.
+ switch kind {
+ case reflect.Bool:
+ return true
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return true
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return true
+ case reflect.Float32, reflect.Float64:
+ return true
+ case reflect.String:
+ return true
+ case reflect.Uintptr:
+ return true
+ case reflect.Array:
+ return true
+ }
+ return false
+}
+
+// Len returns the number of values in the slice. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+ return len(s.values)
+}
+
+// Swap swaps the values at the passed indices. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+ s.values[i], s.values[j] = s.values[j], s.values[i]
+ if s.strings != nil {
+ s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+ }
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value. It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return a.Int() < b.Int()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return a.Uint() < b.Uint()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.String:
+ return a.String() < b.String()
+ case reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Array:
+ // Compare the contents of both arrays.
+ l := a.Len()
+ for i := 0; i < l; i++ {
+ av := a.Index(i)
+ bv := b.Index(i)
+ if av.Interface() == bv.Interface() {
+ continue
+ }
+ return valueSortLess(av, bv)
+ }
+ }
+ return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j. It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+ if s.strings == nil {
+ return valueSortLess(s.values[i], s.values[j])
+ }
+ return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer. Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+ if len(values) == 0 {
+ return
+ }
+ sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 000000000..2e3d22f31
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values. There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality. Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation. You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings. See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+ // Indent specifies the string to use for each indentation level. The
+ // global config instance that all top-level functions use set this to a
+ // single space by default. If you would like more indentation, you might
+ // set this to a tab with "\t" or perhaps two spaces with " ".
+ Indent string
+
+ // MaxDepth controls the maximum number of levels to descend into nested
+ // data structures. The default, 0, means there is no limit.
+ //
+ // NOTE: Circular data structures are properly detected, so it is not
+ // necessary to set this value unless you specifically want to limit deeply
+ // nested data structures.
+ MaxDepth int
+
+ // DisableMethods specifies whether or not error and Stringer interfaces are
+ // invoked for types that implement them.
+ DisableMethods bool
+
+ // DisablePointerMethods specifies whether or not to check for and invoke
+ // error and Stringer interfaces on types which only accept a pointer
+ // receiver when the current type is not a pointer.
+ //
+ // NOTE: This might be an unsafe action since calling one of these methods
+ // with a pointer receiver could technically mutate the value, however,
+ // in practice, types which choose to satisify an error or Stringer
+ // interface with a pointer receiver should not be mutating their state
+ // inside these interface methods. As a result, this option relies on
+ // access to the unsafe package, so it will not have any effect when
+ // running in environments without access to the unsafe package such as
+ // Google App Engine or with the "safe" build tag specified.
+ DisablePointerMethods bool
+
+ // DisablePointerAddresses specifies whether to disable the printing of
+ // pointer addresses. This is useful when diffing data structures in tests.
+ DisablePointerAddresses bool
+
+ // DisableCapacities specifies whether to disable the printing of capacities
+ // for arrays, slices, maps and channels. This is useful when diffing
+ // data structures in tests.
+ DisableCapacities bool
+
+ // ContinueOnMethod specifies whether or not recursion should continue once
+ // a custom error or Stringer interface is invoked. The default, false,
+ // means it will print the results of invoking the custom error or Stringer
+ // interface and return immediately instead of continuing to recurse into
+ // the internals of the data type.
+ //
+ // NOTE: This flag does not have any effect if method invocation is disabled
+ // via the DisableMethods or DisablePointerMethods options.
+ ContinueOnMethod bool
+
+ // SortKeys specifies map keys should be sorted before being printed. Use
+ // this to have a more deterministic, diffable output. Note that only
+ // native types (bool, int, uint, floats, uintptr and string) and types
+ // that support the error or Stringer interfaces (if methods are
+ // enabled) are supported, with other types sorted according to the
+ // reflect.Value.String() output which guarantees display stability.
+ SortKeys bool
+
+ // SpewKeys specifies that, as a last resort attempt, map keys should
+ // be spewed to strings and sorted by those strings. This is only
+ // considered if SortKeys is true.
+ SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the formatted string as a value that satisfies error. See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+ return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+ fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+ fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(c, &buf, a...)
+ return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = newFormatter(c, arg)
+ }
+ return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// Indent: " "
+// MaxDepth: 0
+// DisableMethods: false
+// DisablePointerMethods: false
+// ContinueOnMethod: false
+// SortKeys: false
+func NewDefaultConfig() *ConfigState {
+ return &ConfigState{Indent: " "}
+}
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 000000000..aacaac6f1
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output (only when using
+ Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+ * Dump style which prints with newlines, customizable indentation,
+ and additional debug information such as types and all pointer addresses
+ used to indirect to the final value
+ * A custom Formatter interface that integrates cleanly with the standard fmt
+ package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+ similar to the default %v while providing the additional functionality
+ outlined above and passing unsupported format verbs such as %x and %q
+ along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew. See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+ spew.Dump(myVar1, myVar2, ...)
+ spew.Fdump(someWriter, myVar1, myVar2, ...)
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+The following configuration options are available:
+ * Indent
+ String to use for each indentation level for Dump functions.
+ It is a single space by default. A popular alternative is "\t".
+
+ * MaxDepth
+ Maximum number of levels to descend into nested data structures.
+ There is no limit by default.
+
+ * DisableMethods
+ Disables invocation of error and Stringer interface methods.
+ Method invocation is enabled by default.
+
+ * DisablePointerMethods
+ Disables invocation of error and Stringer interface methods on types
+ which only accept pointer receivers from non-pointer variables.
+ Pointer method invocation is enabled by default.
+
+ * DisablePointerAddresses
+ DisablePointerAddresses specifies whether to disable the printing of
+ pointer addresses. This is useful when diffing data structures in tests.
+
+ * DisableCapacities
+ DisableCapacities specifies whether to disable the printing of
+ capacities for arrays, slices, maps and channels. This is useful when
+ diffing data structures in tests.
+
+ * ContinueOnMethod
+ Enables recursion into types after invoking error and Stringer interface
+ methods. Recursion after method invocation is disabled by default.
+
+ * SortKeys
+ Specifies map keys should be sorted before being printed. Use
+ this to have a more deterministic, diffable output. Note that
+ only native types (bool, int, uint, floats, uintptr and string)
+ and types which implement error or Stringer interfaces are
+ supported with other types sorted according to the
+ reflect.Value.String() output which guarantees display
+ stability. Natural map order is used by default.
+
+ * SpewKeys
+ Specifies that, as a last resort attempt, map keys should be
+ spewed to strings and sorted by those strings. This is only
+ considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+ spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer. For example, to dump to standard error:
+
+ spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+ (main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+ flag: (main.Flag) flagTwo,
+ data: (uintptr) <nil>
+ }),
+ ExportedField: (map[interface {}]interface {}) (len=1) {
+ (string) (len=3) "one": (bool) true
+ }
+ }
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+ ([]uint8) (len=32 cap=32) {
+ 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ 00000020 31 32 |12|
+ }
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
+functions have syntax you are most likely already familiar with:
+
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Println(myVar, myVar2)
+ spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+ %v: <**>5
+ %+v: <**>(0xf8400420d0->0xf8400420c8)5
+ %#v: (**uint8)5
+ %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+ %v: <*>{1 <*><shown>}
+ %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
+ %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
+ %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output. Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 000000000..df1d582a7
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ // uint8Type is a reflect.Type representing a uint8. It is used to
+ // convert cgo types to uint8 slices for hexdumping.
+ uint8Type = reflect.TypeOf(uint8(0))
+
+ // cCharRE is a regular expression that matches a cgo char.
+ // It is used to detect character arrays to hexdump them.
+ cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
+
+ // cUnsignedCharRE is a regular expression that matches a cgo unsigned
+ // char. It is used to detect unsigned character arrays to hexdump
+ // them.
+ cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
+
+ // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+ // It is used to detect uint8_t arrays to hexdump them.
+ cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+ w io.Writer
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ ignoreNextIndent bool
+ cs *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+ if d.ignoreNextIndent {
+ d.ignoreNextIndent = false
+ return
+ }
+ d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range d.pointers {
+ if depth >= d.depth {
+ delete(d.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by dereferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ d.pointers[addr] = d.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type information.
+ d.w.Write(openParenBytes)
+ d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+ d.w.Write([]byte(ve.Type().String()))
+ d.w.Write(closeParenBytes)
+
+ // Display pointer information.
+ if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+ d.w.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ d.w.Write(pointerChainBytes)
+ }
+ printHexPtr(d.w, addr)
+ }
+ d.w.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ d.w.Write(openParenBytes)
+ switch {
+ case nilFound == true:
+ d.w.Write(nilAngleBytes)
+
+ case cycleFound == true:
+ d.w.Write(circularBytes)
+
+ default:
+ d.ignoreNextType = true
+ d.dump(ve)
+ }
+ d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+ // Determine whether this type should be hex dumped or not. Also,
+ // for types which should be hexdumped, try to use the underlying data
+ // first, then fall back to trying to convert them to a uint8 slice.
+ var buf []uint8
+ doConvert := false
+ doHexDump := false
+ numEntries := v.Len()
+ if numEntries > 0 {
+ vt := v.Index(0).Type()
+ vts := vt.String()
+ switch {
+ // C types that need to be converted.
+ case cCharRE.MatchString(vts):
+ fallthrough
+ case cUnsignedCharRE.MatchString(vts):
+ fallthrough
+ case cUint8tCharRE.MatchString(vts):
+ doConvert = true
+
+ // Try to use existing uint8 slices and fall back to converting
+ // and copying if that fails.
+ case vt.Kind() == reflect.Uint8:
+ // We need an addressable interface to convert the type
+ // to a byte slice. However, the reflect package won't
+ // give us an interface on certain things like
+ // unexported struct fields in order to enforce
+ // visibility rules. We use unsafe, when available, to
+ // bypass these restrictions since this package does not
+ // mutate the values.
+ vs := v
+ if !vs.CanInterface() || !vs.CanAddr() {
+ vs = unsafeReflectValue(vs)
+ }
+ if !UnsafeDisabled {
+ vs = vs.Slice(0, numEntries)
+
+ // Use the existing uint8 slice if it can be
+ // type asserted.
+ iface := vs.Interface()
+ if slice, ok := iface.([]uint8); ok {
+ buf = slice
+ doHexDump = true
+ break
+ }
+ }
+
+ // The underlying data needs to be converted if it can't
+ // be type asserted to a uint8 slice.
+ doConvert = true
+ }
+
+ // Copy and convert the underlying type if needed.
+ if doConvert && vt.ConvertibleTo(uint8Type) {
+ // Convert and copy each element into a uint8 byte
+ // slice.
+ buf = make([]uint8, numEntries)
+ for i := 0; i < numEntries; i++ {
+ vv := v.Index(i)
+ buf[i] = uint8(vv.Convert(uint8Type).Uint())
+ }
+ doHexDump = true
+ }
+ }
+
+ // Hexdump the entire slice as needed.
+ if doHexDump {
+ indent := strings.Repeat(d.cs.Indent, d.depth)
+ str := indent + hex.Dump(buf)
+ str = strings.Replace(str, "\n", "\n"+indent, -1)
+ str = strings.TrimRight(str, d.cs.Indent)
+ d.w.Write([]byte(str))
+ return
+ }
+
+ // Recursively call dump for each item.
+ for i := 0; i < numEntries; i++ {
+ d.dump(d.unpackValue(v.Index(i)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+}
+
+// dump is the main workhorse for dumping a value. It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately. It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ d.w.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ d.indent()
+ d.dumpPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !d.ignoreNextType {
+ d.indent()
+ d.w.Write(openParenBytes)
+ d.w.Write([]byte(v.Type().String()))
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+ d.ignoreNextType = false
+
+ // Display length and capacity if the built-in len and cap functions
+ // work with the value's kind and the len/cap itself is non-zero.
+ valueLen, valueCap := 0, 0
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ valueLen, valueCap = v.Len(), v.Cap()
+ case reflect.Map, reflect.String:
+ valueLen = v.Len()
+ }
+ if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+ d.w.Write(openParenBytes)
+ if valueLen != 0 {
+ d.w.Write(lenEqualsBytes)
+ printInt(d.w, int64(valueLen), 10)
+ }
+ if !d.cs.DisableCapacities && valueCap != 0 {
+ if valueLen != 0 {
+ d.w.Write(spaceBytes)
+ }
+ d.w.Write(capEqualsBytes)
+ printInt(d.w, int64(valueCap), 10)
+ }
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+
+ // Call Stringer/error interfaces if they exist and the handle methods flag
+ // is enabled
+ if !d.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(d.cs, d.w, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(d.w, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(d.w, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(d.w, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(d.w, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(d.w, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(d.w, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(d.w, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ d.dumpSlice(v)
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.String:
+ d.w.Write([]byte(strconv.Quote(v.String())))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ numEntries := v.Len()
+ keys := v.MapKeys()
+ if d.cs.SortKeys {
+ sortValues(keys, d.cs)
+ }
+ for i, key := range keys {
+ d.dump(d.unpackValue(key))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.MapIndex(key)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Struct:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ vt := v.Type()
+ numFields := v.NumField()
+ for i := 0; i < numFields; i++ {
+ d.indent()
+ vtf := vt.Field(i)
+ d.w.Write([]byte(vtf.Name))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.Field(i)))
+ if i < (numFields - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(d.w, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(d.w, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it in case any new
+ // types are added.
+ default:
+ if v.CanInterface() {
+ fmt.Fprintf(d.w, "%v", v.Interface())
+ } else {
+ fmt.Fprintf(d.w, "%v", v.String())
+ }
+ }
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+ for _, arg := range a {
+ if arg == nil {
+ w.Write(interfaceBytes)
+ w.Write(spaceBytes)
+ w.Write(nilAngleBytes)
+ w.Write(newlineBytes)
+ continue
+ }
+
+ d := dumpState{w: w, cs: cs}
+ d.pointers = make(map[uintptr]int)
+ d.dump(reflect.ValueOf(arg))
+ d.w.Write(newlineBytes)
+ }
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+ fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(&Config, &buf, a...)
+ return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+ fdump(&Config, os.Stdout, a...)
+}
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 000000000..c49875bac
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation. The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+ value interface{}
+ fs fmt.State
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ cs *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type. Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ buf.WriteRune('v')
+
+ format = buf.String()
+ return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package. This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ if width, ok := f.fs.Width(); ok {
+ buf.WriteString(strconv.Itoa(width))
+ }
+
+ if precision, ok := f.fs.Precision(); ok {
+ buf.Write(precisionBytes)
+ buf.WriteString(strconv.Itoa(precision))
+ }
+
+ buf.WriteRune(verb)
+
+ format = buf.String()
+ return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface {
+ f.ignoreNextType = false
+ if !v.IsNil() {
+ v = v.Elem()
+ }
+ }
+ return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+ // Display nil if top level pointer is nil.
+ showTypes := f.fs.Flag('#')
+ if v.IsNil() && (!showTypes || f.ignoreNextType) {
+ f.fs.Write(nilAngleBytes)
+ return
+ }
+
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range f.pointers {
+ if depth >= f.depth {
+ delete(f.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to possibly show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by derferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ f.pointers[addr] = f.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type or indirection level depending on flags.
+ if showTypes && !f.ignoreNextType {
+ f.fs.Write(openParenBytes)
+ f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+ f.fs.Write([]byte(ve.Type().String()))
+ f.fs.Write(closeParenBytes)
+ } else {
+ if nilFound || cycleFound {
+ indirects += strings.Count(ve.Type().String(), "*")
+ }
+ f.fs.Write(openAngleBytes)
+ f.fs.Write([]byte(strings.Repeat("*", indirects)))
+ f.fs.Write(closeAngleBytes)
+ }
+
+ // Display pointer information depending on flags.
+ if f.fs.Flag('+') && (len(pointerChain) > 0) {
+ f.fs.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ f.fs.Write(pointerChainBytes)
+ }
+ printHexPtr(f.fs, addr)
+ }
+ f.fs.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ switch {
+ case nilFound == true:
+ f.fs.Write(nilAngleBytes)
+
+ case cycleFound == true:
+ f.fs.Write(circularShortBytes)
+
+ default:
+ f.ignoreNextType = true
+ f.format(ve)
+ }
+}
+
+// format is the main workhorse for providing the Formatter interface. It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately. It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ f.fs.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ f.formatPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !f.ignoreNextType && f.fs.Flag('#') {
+ f.fs.Write(openParenBytes)
+ f.fs.Write([]byte(v.Type().String()))
+ f.fs.Write(closeParenBytes)
+ }
+ f.ignoreNextType = false
+
+ // Call Stringer/error interfaces if they exist and the handle methods
+ // flag is enabled.
+ if !f.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(f.cs, f.fs, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(f.fs, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(f.fs, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(f.fs, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(f.fs, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(f.fs, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(f.fs, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(f.fs, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ f.fs.Write(openBracketBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ numEntries := v.Len()
+ for i := 0; i < numEntries; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.Index(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBracketBytes)
+
+ case reflect.String:
+ f.fs.Write([]byte(v.String()))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+
+ f.fs.Write(openMapBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ keys := v.MapKeys()
+ if f.cs.SortKeys {
+ sortValues(keys, f.cs)
+ }
+ for i, key := range keys {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(key))
+ f.fs.Write(colonBytes)
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.MapIndex(key)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeMapBytes)
+
+ case reflect.Struct:
+ numFields := v.NumField()
+ f.fs.Write(openBraceBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ vt := v.Type()
+ for i := 0; i < numFields; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ vtf := vt.Field(i)
+ if f.fs.Flag('+') || f.fs.Flag('#') {
+ f.fs.Write([]byte(vtf.Name))
+ f.fs.Write(colonBytes)
+ }
+ f.format(f.unpackValue(v.Field(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(f.fs, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(f.fs, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it if any get added.
+ default:
+ format := f.buildDefaultFormat()
+ if v.CanInterface() {
+ fmt.Fprintf(f.fs, format, v.Interface())
+ } else {
+ fmt.Fprintf(f.fs, format, v.String())
+ }
+ }
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+ f.fs = fs
+
+ // Use standard formatting for verbs that are not v.
+ if verb != 'v' {
+ format := f.constructOrigFormat(verb)
+ fmt.Fprintf(fs, format, f.value)
+ return
+ }
+
+ if f.value == nil {
+ if fs.Flag('#') {
+ fs.Write(interfaceBytes)
+ }
+ fs.Write(nilAngleBytes)
+ return
+ }
+
+ f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+ fs := &formatState{value: v, cs: cs}
+ fs.pointers = make(map[uintptr]int)
+ return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(&Config, v)
+}
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 000000000..32c0e3388
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "fmt"
+ "io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the formatted string as a value that satisfies error. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+ return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = NewFormatter(arg)
+ }
+ return formatters
+}
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/stretchr/objx/vendor/github.com/pmezard/go-difflib/LICENSE
new file mode 100644
index 000000000..c67dad612
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/pmezard/go-difflib/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013, Patrick Mezard
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+ The names of its contributors may not be used to endorse or promote
+products derived from this software without specific prior written
+permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/stretchr/objx/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
new file mode 100644
index 000000000..003e99fad
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
@@ -0,0 +1,772 @@
+// Package difflib is a partial port of Python difflib module.
+//
+// It provides tools to compare sequences of strings and generate textual diffs.
+//
+// The following class and functions have been ported:
+//
+// - SequenceMatcher
+//
+// - unified_diff
+//
+// - context_diff
+//
+// Getting unified diffs was the main goal of the port. Keep in mind this code
+// is mostly suitable to output text differences in a human friendly way, there
+// are no guarantees generated diffs are consumable by patch(1).
+package difflib
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+)
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func calculateRatio(matches, length int) float64 {
+ if length > 0 {
+ return 2.0 * float64(matches) / float64(length)
+ }
+ return 1.0
+}
+
+type Match struct {
+ A int
+ B int
+ Size int
+}
+
+type OpCode struct {
+ Tag byte
+ I1 int
+ I2 int
+ J1 int
+ J2 int
+}
+
+// SequenceMatcher compares sequence of strings. The basic
+// algorithm predates, and is a little fancier than, an algorithm
+// published in the late 1980's by Ratcliff and Obershelp under the
+// hyperbolic name "gestalt pattern matching". The basic idea is to find
+// the longest contiguous matching subsequence that contains no "junk"
+// elements (R-O doesn't address junk). The same idea is then applied
+// recursively to the pieces of the sequences to the left and to the right
+// of the matching subsequence. This does not yield minimal edit
+// sequences, but does tend to yield matches that "look right" to people.
+//
+// SequenceMatcher tries to compute a "human-friendly diff" between two
+// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+// longest *contiguous* & junk-free matching subsequence. That's what
+// catches peoples' eyes. The Windows(tm) windiff has another interesting
+// notion, pairing up elements that appear uniquely in each sequence.
+// That, and the method here, appear to yield more intuitive difference
+// reports than does diff. This method appears to be the least vulnerable
+// to synching up on blocks of "junk lines", though (like blank lines in
+// ordinary text files, or maybe "<P>" lines in HTML files). That may be
+// because this is the only method of the 3 that has a *concept* of
+// "junk" <wink>.
+//
+// Timing: Basic R-O is cubic time worst case and quadratic time expected
+// case. SequenceMatcher is quadratic time for the worst case and has
+// expected-case behavior dependent in a complicated way on how many
+// elements the sequences have in common; best case time is linear.
+type SequenceMatcher struct {
+ a []string
+ b []string
+ b2j map[string][]int
+ IsJunk func(string) bool
+ autoJunk bool
+ bJunk map[string]struct{}
+ matchingBlocks []Match
+ fullBCount map[string]int
+ bPopular map[string]struct{}
+ opCodes []OpCode
+}
+
+func NewMatcher(a, b []string) *SequenceMatcher {
+ m := SequenceMatcher{autoJunk: true}
+ m.SetSeqs(a, b)
+ return &m
+}
+
+func NewMatcherWithJunk(a, b []string, autoJunk bool,
+ isJunk func(string) bool) *SequenceMatcher {
+
+ m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
+ m.SetSeqs(a, b)
+ return &m
+}
+
+// Set two sequences to be compared.
+func (m *SequenceMatcher) SetSeqs(a, b []string) {
+ m.SetSeq1(a)
+ m.SetSeq2(b)
+}
+
+// Set the first sequence to be compared. The second sequence to be compared is
+// not changed.
+//
+// SequenceMatcher computes and caches detailed information about the second
+// sequence, so if you want to compare one sequence S against many sequences,
+// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
+// sequences.
+//
+// See also SetSeqs() and SetSeq2().
+func (m *SequenceMatcher) SetSeq1(a []string) {
+ if &a == &m.a {
+ return
+ }
+ m.a = a
+ m.matchingBlocks = nil
+ m.opCodes = nil
+}
+
+// Set the second sequence to be compared. The first sequence to be compared is
+// not changed.
+func (m *SequenceMatcher) SetSeq2(b []string) {
+ if &b == &m.b {
+ return
+ }
+ m.b = b
+ m.matchingBlocks = nil
+ m.opCodes = nil
+ m.fullBCount = nil
+ m.chainB()
+}
+
+func (m *SequenceMatcher) chainB() {
+ // Populate line -> index mapping
+ b2j := map[string][]int{}
+ for i, s := range m.b {
+ indices := b2j[s]
+ indices = append(indices, i)
+ b2j[s] = indices
+ }
+
+ // Purge junk elements
+ m.bJunk = map[string]struct{}{}
+ if m.IsJunk != nil {
+ junk := m.bJunk
+ for s, _ := range b2j {
+ if m.IsJunk(s) {
+ junk[s] = struct{}{}
+ }
+ }
+ for s, _ := range junk {
+ delete(b2j, s)
+ }
+ }
+
+ // Purge remaining popular elements
+ popular := map[string]struct{}{}
+ n := len(m.b)
+ if m.autoJunk && n >= 200 {
+ ntest := n/100 + 1
+ for s, indices := range b2j {
+ if len(indices) > ntest {
+ popular[s] = struct{}{}
+ }
+ }
+ for s, _ := range popular {
+ delete(b2j, s)
+ }
+ }
+ m.bPopular = popular
+ m.b2j = b2j
+}
+
+func (m *SequenceMatcher) isBJunk(s string) bool {
+ _, ok := m.bJunk[s]
+ return ok
+}
+
+// Find longest matching block in a[alo:ahi] and b[blo:bhi].
+//
+// If IsJunk is not defined:
+//
+// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
+// alo <= i <= i+k <= ahi
+// blo <= j <= j+k <= bhi
+// and for all (i',j',k') meeting those conditions,
+// k >= k'
+// i <= i'
+// and if i == i', j <= j'
+//
+// In other words, of all maximal matching blocks, return one that
+// starts earliest in a, and of all those maximal matching blocks that
+// start earliest in a, return the one that starts earliest in b.
+//
+// If IsJunk is defined, first the longest matching block is
+// determined as above, but with the additional restriction that no
+// junk element appears in the block. Then that block is extended as
+// far as possible by matching (only) junk elements on both sides. So
+// the resulting block never matches on junk except as identical junk
+// happens to be adjacent to an "interesting" match.
+//
+// If no blocks match, return (alo, blo, 0).
+func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
+ // CAUTION: stripping common prefix or suffix would be incorrect.
+ // E.g.,
+ // ab
+ // acab
+ // Longest matching block is "ab", but if common prefix is
+ // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
+ // strip, so ends up claiming that ab is changed to acab by
+ // inserting "ca" in the middle. That's minimal but unintuitive:
+ // "it's obvious" that someone inserted "ac" at the front.
+ // Windiff ends up at the same place as diff, but by pairing up
+ // the unique 'b's and then matching the first two 'a's.
+ besti, bestj, bestsize := alo, blo, 0
+
+ // find longest junk-free match
+ // during an iteration of the loop, j2len[j] = length of longest
+ // junk-free match ending with a[i-1] and b[j]
+ j2len := map[int]int{}
+ for i := alo; i != ahi; i++ {
+ // look at all instances of a[i] in b; note that because
+ // b2j has no junk keys, the loop is skipped if a[i] is junk
+ newj2len := map[int]int{}
+ for _, j := range m.b2j[m.a[i]] {
+ // a[i] matches b[j]
+ if j < blo {
+ continue
+ }
+ if j >= bhi {
+ break
+ }
+ k := j2len[j-1] + 1
+ newj2len[j] = k
+ if k > bestsize {
+ besti, bestj, bestsize = i-k+1, j-k+1, k
+ }
+ }
+ j2len = newj2len
+ }
+
+ // Extend the best by non-junk elements on each end. In particular,
+ // "popular" non-junk elements aren't in b2j, which greatly speeds
+ // the inner loop above, but also means "the best" match so far
+ // doesn't contain any junk *or* popular non-junk elements.
+ for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
+ m.a[besti-1] == m.b[bestj-1] {
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ }
+ for besti+bestsize < ahi && bestj+bestsize < bhi &&
+ !m.isBJunk(m.b[bestj+bestsize]) &&
+ m.a[besti+bestsize] == m.b[bestj+bestsize] {
+ bestsize += 1
+ }
+
+ // Now that we have a wholly interesting match (albeit possibly
+ // empty!), we may as well suck up the matching junk on each
+ // side of it too. Can't think of a good reason not to, and it
+ // saves post-processing the (possibly considerable) expense of
+ // figuring out what to do with it. In the case of an empty
+ // interesting match, this is clearly the right thing to do,
+ // because no other kind of match is possible in the regions.
+ for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
+ m.a[besti-1] == m.b[bestj-1] {
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ }
+ for besti+bestsize < ahi && bestj+bestsize < bhi &&
+ m.isBJunk(m.b[bestj+bestsize]) &&
+ m.a[besti+bestsize] == m.b[bestj+bestsize] {
+ bestsize += 1
+ }
+
+ return Match{A: besti, B: bestj, Size: bestsize}
+}
+
+// Return list of triples describing matching subsequences.
+//
+// Each triple is of the form (i, j, n), and means that
+// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
+// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
+// adjacent triples in the list, and the second is not the last triple in the
+// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
+// adjacent equal blocks.
+//
+// The last triple is a dummy, (len(a), len(b), 0), and is the only
+// triple with n==0.
+func (m *SequenceMatcher) GetMatchingBlocks() []Match {
+ if m.matchingBlocks != nil {
+ return m.matchingBlocks
+ }
+
+ var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
+ matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
+ match := m.findLongestMatch(alo, ahi, blo, bhi)
+ i, j, k := match.A, match.B, match.Size
+ if match.Size > 0 {
+ if alo < i && blo < j {
+ matched = matchBlocks(alo, i, blo, j, matched)
+ }
+ matched = append(matched, match)
+ if i+k < ahi && j+k < bhi {
+ matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
+ }
+ }
+ return matched
+ }
+ matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
+
+ // It's possible that we have adjacent equal blocks in the
+ // matching_blocks list now.
+ nonAdjacent := []Match{}
+ i1, j1, k1 := 0, 0, 0
+ for _, b := range matched {
+ // Is this block adjacent to i1, j1, k1?
+ i2, j2, k2 := b.A, b.B, b.Size
+ if i1+k1 == i2 && j1+k1 == j2 {
+ // Yes, so collapse them -- this just increases the length of
+ // the first block by the length of the second, and the first
+ // block so lengthened remains the block to compare against.
+ k1 += k2
+ } else {
+ // Not adjacent. Remember the first block (k1==0 means it's
+ // the dummy we started with), and make the second block the
+ // new block to compare against.
+ if k1 > 0 {
+ nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+ }
+ i1, j1, k1 = i2, j2, k2
+ }
+ }
+ if k1 > 0 {
+ nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+ }
+
+ nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
+ m.matchingBlocks = nonAdjacent
+ return m.matchingBlocks
+}
+
+// Return list of 5-tuples describing how to turn a into b.
+//
+// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
+// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
+// tuple preceding it, and likewise for j1 == the previous j2.
+//
+// The tags are characters, with these meanings:
+//
+// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
+//
+// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
+//
+// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
+//
+// 'e' (equal): a[i1:i2] == b[j1:j2]
+func (m *SequenceMatcher) GetOpCodes() []OpCode {
+ if m.opCodes != nil {
+ return m.opCodes
+ }
+ i, j := 0, 0
+ matching := m.GetMatchingBlocks()
+ opCodes := make([]OpCode, 0, len(matching))
+ for _, m := range matching {
+ // invariant: we've pumped out correct diffs to change
+ // a[:i] into b[:j], and the next matching block is
+ // a[ai:ai+size] == b[bj:bj+size]. So we need to pump
+ // out a diff to change a[i:ai] into b[j:bj], pump out
+ // the matching block, and move (i,j) beyond the match
+ ai, bj, size := m.A, m.B, m.Size
+ tag := byte(0)
+ if i < ai && j < bj {
+ tag = 'r'
+ } else if i < ai {
+ tag = 'd'
+ } else if j < bj {
+ tag = 'i'
+ }
+ if tag > 0 {
+ opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
+ }
+ i, j = ai+size, bj+size
+ // the list of matching blocks is terminated by a
+ // sentinel with size 0
+ if size > 0 {
+ opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
+ }
+ }
+ m.opCodes = opCodes
+ return m.opCodes
+}
+
+// Isolate change clusters by eliminating ranges with no changes.
+//
+// Return a generator of groups with up to n lines of context.
+// Each group is in the same format as returned by GetOpCodes().
+func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
+ if n < 0 {
+ n = 3
+ }
+ codes := m.GetOpCodes()
+ if len(codes) == 0 {
+ codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
+ }
+ // Fixup leading and trailing groups if they show no changes.
+ if codes[0].Tag == 'e' {
+ c := codes[0]
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
+ }
+ if codes[len(codes)-1].Tag == 'e' {
+ c := codes[len(codes)-1]
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
+ }
+ nn := n + n
+ groups := [][]OpCode{}
+ group := []OpCode{}
+ for _, c := range codes {
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ // End the current group and start a new one whenever
+ // there is a large range with no changes.
+ if c.Tag == 'e' && i2-i1 > nn {
+ group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
+ j1, min(j2, j1+n)})
+ groups = append(groups, group)
+ group = []OpCode{}
+ i1, j1 = max(i1, i2-n), max(j1, j2-n)
+ }
+ group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
+ }
+ if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
+ groups = append(groups, group)
+ }
+ return groups
+}
+
+// Return a measure of the sequences' similarity (float in [0,1]).
+//
+// Where T is the total number of elements in both sequences, and
+// M is the number of matches, this is 2.0*M / T.
+// Note that this is 1 if the sequences are identical, and 0 if
+// they have nothing in common.
+//
+// .Ratio() is expensive to compute if you haven't already computed
+// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
+// want to try .QuickRatio() or .RealQuickRation() first to get an
+// upper bound.
+func (m *SequenceMatcher) Ratio() float64 {
+ matches := 0
+ for _, m := range m.GetMatchingBlocks() {
+ matches += m.Size
+ }
+ return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() relatively quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute.
+func (m *SequenceMatcher) QuickRatio() float64 {
+ // viewing a and b as multisets, set matches to the cardinality
+ // of their intersection; this counts the number of matches
+ // without regard to order, so is clearly an upper bound
+ if m.fullBCount == nil {
+ m.fullBCount = map[string]int{}
+ for _, s := range m.b {
+ m.fullBCount[s] = m.fullBCount[s] + 1
+ }
+ }
+
+ // avail[x] is the number of times x appears in 'b' less the
+ // number of times we've seen it in 'a' so far ... kinda
+ avail := map[string]int{}
+ matches := 0
+ for _, s := range m.a {
+ n, ok := avail[s]
+ if !ok {
+ n = m.fullBCount[s]
+ }
+ avail[s] = n - 1
+ if n > 0 {
+ matches += 1
+ }
+ }
+ return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() very quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute than either .Ratio() or .QuickRatio().
+func (m *SequenceMatcher) RealQuickRatio() float64 {
+ la, lb := len(m.a), len(m.b)
+ return calculateRatio(min(la, lb), la+lb)
+}
+
+// Convert range to the "ed" format
+func formatRangeUnified(start, stop int) string {
+ // Per the diff spec at http://www.unix.org/single_unix_specification/
+ beginning := start + 1 // lines start numbering with one
+ length := stop - start
+ if length == 1 {
+ return fmt.Sprintf("%d", beginning)
+ }
+ if length == 0 {
+ beginning -= 1 // empty ranges begin at line just before the range
+ }
+ return fmt.Sprintf("%d,%d", beginning, length)
+}
+
+// Unified diff parameters
+type UnifiedDiff struct {
+ A []string // First sequence lines
+ FromFile string // First file name
+ FromDate string // First file time
+ B []string // Second sequence lines
+ ToFile string // Second file name
+ ToDate string // Second file time
+ Eol string // Headers end of line, defaults to LF
+ Context int // Number of context lines
+}
+
+// Compare two sequences of lines; generate the delta as a unified diff.
+//
+// Unified diffs are a compact way of showing line changes and a few
+// lines of context. The number of context lines is set by 'n' which
+// defaults to three.
+//
+// By default, the diff control lines (those with ---, +++, or @@) are
+// created with a trailing newline. This is helpful so that inputs
+// created from file.readlines() result in diffs that are suitable for
+// file.writelines() since both the inputs and outputs have trailing
+// newlines.
+//
+// For inputs that do not have trailing newlines, set the lineterm
+// argument to "" so that the output will be uniformly newline free.
+//
+// The unidiff format normally has a header for filenames and modification
+// times. Any or all of these may be specified using strings for
+// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
+// The modification times are normally expressed in the ISO 8601 format.
+func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
+ buf := bufio.NewWriter(writer)
+ defer buf.Flush()
+ wf := func(format string, args ...interface{}) error {
+ _, err := buf.WriteString(fmt.Sprintf(format, args...))
+ return err
+ }
+ ws := func(s string) error {
+ _, err := buf.WriteString(s)
+ return err
+ }
+
+ if len(diff.Eol) == 0 {
+ diff.Eol = "\n"
+ }
+
+ started := false
+ m := NewMatcher(diff.A, diff.B)
+ for _, g := range m.GetGroupedOpCodes(diff.Context) {
+ if !started {
+ started = true
+ fromDate := ""
+ if len(diff.FromDate) > 0 {
+ fromDate = "\t" + diff.FromDate
+ }
+ toDate := ""
+ if len(diff.ToDate) > 0 {
+ toDate = "\t" + diff.ToDate
+ }
+ if diff.FromFile != "" || diff.ToFile != "" {
+ err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ first, last := g[0], g[len(g)-1]
+ range1 := formatRangeUnified(first.I1, last.I2)
+ range2 := formatRangeUnified(first.J1, last.J2)
+ if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
+ return err
+ }
+ for _, c := range g {
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ if c.Tag == 'e' {
+ for _, line := range diff.A[i1:i2] {
+ if err := ws(" " + line); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if c.Tag == 'r' || c.Tag == 'd' {
+ for _, line := range diff.A[i1:i2] {
+ if err := ws("-" + line); err != nil {
+ return err
+ }
+ }
+ }
+ if c.Tag == 'r' || c.Tag == 'i' {
+ for _, line := range diff.B[j1:j2] {
+ if err := ws("+" + line); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// Like WriteUnifiedDiff but returns the diff a string.
+func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
+ w := &bytes.Buffer{}
+ err := WriteUnifiedDiff(w, diff)
+ return string(w.Bytes()), err
+}
+
+// Convert range to the "ed" format.
+func formatRangeContext(start, stop int) string {
+ // Per the diff spec at http://www.unix.org/single_unix_specification/
+ beginning := start + 1 // lines start numbering with one
+ length := stop - start
+ if length == 0 {
+ beginning -= 1 // empty ranges begin at line just before the range
+ }
+ if length <= 1 {
+ return fmt.Sprintf("%d", beginning)
+ }
+ return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
+}
+
+type ContextDiff UnifiedDiff
+
+// Compare two sequences of lines; generate the delta as a context diff.
+//
+// Context diffs are a compact way of showing line changes and a few
+// lines of context. The number of context lines is set by diff.Context
+// which defaults to three.
+//
+// By default, the diff control lines (those with *** or ---) are
+// created with a trailing newline.
+//
+// For inputs that do not have trailing newlines, set the diff.Eol
+// argument to "" so that the output will be uniformly newline free.
+//
+// The context diff format normally has a header for filenames and
+// modification times. Any or all of these may be specified using
+// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
+// The modification times are normally expressed in the ISO 8601 format.
+// If not specified, the strings default to blanks.
+func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
+ buf := bufio.NewWriter(writer)
+ defer buf.Flush()
+ var diffErr error
+ wf := func(format string, args ...interface{}) {
+ _, err := buf.WriteString(fmt.Sprintf(format, args...))
+ if diffErr == nil && err != nil {
+ diffErr = err
+ }
+ }
+ ws := func(s string) {
+ _, err := buf.WriteString(s)
+ if diffErr == nil && err != nil {
+ diffErr = err
+ }
+ }
+
+ if len(diff.Eol) == 0 {
+ diff.Eol = "\n"
+ }
+
+ prefix := map[byte]string{
+ 'i': "+ ",
+ 'd': "- ",
+ 'r': "! ",
+ 'e': " ",
+ }
+
+ started := false
+ m := NewMatcher(diff.A, diff.B)
+ for _, g := range m.GetGroupedOpCodes(diff.Context) {
+ if !started {
+ started = true
+ fromDate := ""
+ if len(diff.FromDate) > 0 {
+ fromDate = "\t" + diff.FromDate
+ }
+ toDate := ""
+ if len(diff.ToDate) > 0 {
+ toDate = "\t" + diff.ToDate
+ }
+ if diff.FromFile != "" || diff.ToFile != "" {
+ wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
+ wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
+ }
+ }
+
+ first, last := g[0], g[len(g)-1]
+ ws("***************" + diff.Eol)
+
+ range1 := formatRangeContext(first.I1, last.I2)
+ wf("*** %s ****%s", range1, diff.Eol)
+ for _, c := range g {
+ if c.Tag == 'r' || c.Tag == 'd' {
+ for _, cc := range g {
+ if cc.Tag == 'i' {
+ continue
+ }
+ for _, line := range diff.A[cc.I1:cc.I2] {
+ ws(prefix[cc.Tag] + line)
+ }
+ }
+ break
+ }
+ }
+
+ range2 := formatRangeContext(first.J1, last.J2)
+ wf("--- %s ----%s", range2, diff.Eol)
+ for _, c := range g {
+ if c.Tag == 'r' || c.Tag == 'i' {
+ for _, cc := range g {
+ if cc.Tag == 'd' {
+ continue
+ }
+ for _, line := range diff.B[cc.J1:cc.J2] {
+ ws(prefix[cc.Tag] + line)
+ }
+ }
+ break
+ }
+ }
+ }
+ return diffErr
+}
+
+// Like WriteContextDiff but returns the diff a string.
+func GetContextDiffString(diff ContextDiff) (string, error) {
+ w := &bytes.Buffer{}
+ err := WriteContextDiff(w, diff)
+ return string(w.Bytes()), err
+}
+
+// Split a string on "\n" while preserving them. The output can be used
+// as input for UnifiedDiff and ContextDiff structures.
+func SplitLines(s string) []string {
+ lines := strings.SplitAfter(s, "\n")
+ lines[len(lines)-1] += "\n"
+ return lines
+}
diff --git a/vendor/github.com/stretchr/testify/LICENCE.txt b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/LICENSE
index 473b670a7..473b670a7 100644
--- a/vendor/github.com/stretchr/testify/LICENCE.txt
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/LICENSE
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/assertion_format.go
new file mode 100644
index 000000000..3e172f2ce
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -0,0 +1,405 @@
+/*
+* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
+* THIS FILE MUST NOT BE EDITED BY HAND
+ */
+
+package assert
+
+import (
+ http "net/http"
+ url "net/url"
+ time "time"
+)
+
+// Conditionf uses a Comparison to assert a complex condition.
+func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool {
+ return Condition(t, comp, append([]interface{}{msg}, args...)...)
+}
+
+// Containsf asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
+// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
+// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ return Contains(t, s, contains, append([]interface{}{msg}, args...)...)
+}
+
+// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ return DirExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted"))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+ return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
+}
+
+// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// assert.Emptyf(t, obj, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ return Empty(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// Equalf asserts that two objects are equal.
+//
+// assert.Equalf(t, 123, 123, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return Equal(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool {
+ return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...)
+}
+
+// EqualValuesf asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Errorf asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.Errorf(t, err, "error message %s", "formatted") {
+// assert.Equal(t, expectedErrorf, err)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
+ return Error(t, err, append([]interface{}{msg}, args...)...)
+}
+
+// Exactlyf asserts that two objects are equal in value and type.
+//
+// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Failf reports a failure through
+func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
+ return Fail(t, failureMessage, append([]interface{}{msg}, args...)...)
+}
+
+// FailNowf fails test
+func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
+ return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...)
+}
+
+// Falsef asserts that the specified value is false.
+//
+// assert.Falsef(t, myBool, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
+ return False(t, value, append([]interface{}{msg}, args...)...)
+}
+
+// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ return FileExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPBodyContainsf asserts that a specified handler returns a
+// body that contains a string.
+//
+// assert.HTTPBodyContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPBodyNotContainsf asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// assert.HTTPBodyNotContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPErrorf asserts that a specified handler returns an error status code.
+//
+// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPRedirectf asserts that a specified handler returns a redirect status code.
+//
+// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPSuccessf asserts that a specified handler returns a success status code.
+//
+// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// Implementsf asserts that an object is implemented by the specified interface.
+//
+// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+ return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaf asserts that the two numerals are within delta of each other.
+//
+// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaSlicef is the same as InDelta, except it compares two slices.
+func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InEpsilonf asserts that expected and actual have a relative error less than epsilon
+//
+// Returns whether the assertion was successful (true) or not (false).
+func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
+}
+
+// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
+func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
+}
+
+// IsTypef asserts that the specified objects are of the same type.
+func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
+ return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...)
+}
+
+// JSONEqf asserts that two JSON strings are equivalent.
+//
+// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
+ return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Lenf asserts that the specified object has specific length.
+// Lenf also fails if the object has a type that len() not accept.
+//
+// assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool {
+ return Len(t, object, length, append([]interface{}{msg}, args...)...)
+}
+
+// Nilf asserts that the specified object is nil.
+//
+// assert.Nilf(t, err, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ return Nil(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NoErrorf asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.NoErrorf(t, err, "error message %s", "formatted") {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
+ return NoError(t, err, append([]interface{}{msg}, args...)...)
+}
+
+// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
+// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
+// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ return NotContains(t, s, contains, append([]interface{}{msg}, args...)...)
+}
+
+// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
+// assert.Equal(t, "two", obj[1])
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ return NotEmpty(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NotEqualf asserts that the specified values are NOT equal.
+//
+// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// NotNilf asserts that the specified object is not nil.
+//
+// assert.NotNilf(t, err, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ return NotNil(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
+ return NotPanics(t, f, append([]interface{}{msg}, args...)...)
+}
+
+// NotRegexpf asserts that a specified regexp does not match a string.
+//
+// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...)
+}
+
+// NotSubsetf asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...)
+}
+
+// NotZerof asserts that i is not the zero value for its type and returns the truth.
+func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
+ return NotZero(t, i, append([]interface{}{msg}, args...)...)
+}
+
+// Panicsf asserts that the code inside the specified PanicTestFunc panics.
+//
+// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
+ return Panics(t, f, append([]interface{}{msg}, args...)...)
+}
+
+// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
+ return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...)
+}
+
+// Regexpf asserts that a specified regexp matches a string.
+//
+// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ return Regexp(t, rx, str, append([]interface{}{msg}, args...)...)
+}
+
+// Subsetf asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ return Subset(t, list, subset, append([]interface{}{msg}, args...)...)
+}
+
+// Truef asserts that the specified value is true.
+//
+// assert.Truef(t, myBool, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Truef(t TestingT, value bool, msg string, args ...interface{}) bool {
+ return True(t, value, append([]interface{}{msg}, args...)...)
+}
+
+// WithinDurationf asserts that the two times are within duration delta of each other.
+//
+// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
+ return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// Zerof asserts that i is the zero value for its type and returns the truth.
+func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
+ return Zero(t, i, append([]interface{}{msg}, args...)...)
+}
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/assertion_forward.go
new file mode 100644
index 000000000..7c4f497bb
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -0,0 +1,798 @@
+/*
+* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
+* THIS FILE MUST NOT BE EDITED BY HAND
+ */
+
+package assert
+
+import (
+ http "net/http"
+ url "net/url"
+ time "time"
+)
+
+// Condition uses a Comparison to assert a complex condition.
+func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool {
+ return Condition(a.t, comp, msgAndArgs...)
+}
+
+// Conditionf uses a Comparison to assert a complex condition.
+func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool {
+ return Conditionf(a.t, comp, msg, args...)
+}
+
+// Contains asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// a.Contains("Hello World", "World")
+// a.Contains(["Hello", "World"], "World")
+// a.Contains({"Hello": "World"}, "Hello")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
+ return Contains(a.t, s, contains, msgAndArgs...)
+}
+
+// Containsf asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// a.Containsf("Hello World", "World", "error message %s", "formatted")
+// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted")
+// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ return Containsf(a.t, s, contains, msg, args...)
+}
+
+// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool {
+ return DirExists(a.t, path, msgAndArgs...)
+}
+
+// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool {
+ return DirExistsf(a.t, path, msg, args...)
+}
+
+// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool {
+ return ElementsMatch(a.t, listA, listB, msgAndArgs...)
+}
+
+// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted"))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+ return ElementsMatchf(a.t, listA, listB, msg, args...)
+}
+
+// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// a.Empty(obj)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
+ return Empty(a.t, object, msgAndArgs...)
+}
+
+// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// a.Emptyf(obj, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool {
+ return Emptyf(a.t, object, msg, args...)
+}
+
+// Equal asserts that two objects are equal.
+//
+// a.Equal(123, 123)
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ return Equal(a.t, expected, actual, msgAndArgs...)
+}
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// a.EqualError(err, expectedErrorString)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
+ return EqualError(a.t, theError, errString, msgAndArgs...)
+}
+
+// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool {
+ return EqualErrorf(a.t, theError, errString, msg, args...)
+}
+
+// EqualValues asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// a.EqualValues(uint32(123), int32(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ return EqualValues(a.t, expected, actual, msgAndArgs...)
+}
+
+// EqualValuesf asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return EqualValuesf(a.t, expected, actual, msg, args...)
+}
+
+// Equalf asserts that two objects are equal.
+//
+// a.Equalf(123, 123, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return Equalf(a.t, expected, actual, msg, args...)
+}
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.Error(err) {
+// assert.Equal(t, expectedError, err)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
+ return Error(a.t, err, msgAndArgs...)
+}
+
+// Errorf asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.Errorf(err, "error message %s", "formatted") {
+// assert.Equal(t, expectedErrorf, err)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool {
+ return Errorf(a.t, err, msg, args...)
+}
+
+// Exactly asserts that two objects are equal in value and type.
+//
+// a.Exactly(int32(123), int64(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ return Exactly(a.t, expected, actual, msgAndArgs...)
+}
+
+// Exactlyf asserts that two objects are equal in value and type.
+//
+// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return Exactlyf(a.t, expected, actual, msg, args...)
+}
+
+// Fail reports a failure through
+func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool {
+ return Fail(a.t, failureMessage, msgAndArgs...)
+}
+
+// FailNow fails test
+func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool {
+ return FailNow(a.t, failureMessage, msgAndArgs...)
+}
+
+// FailNowf fails test
+func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool {
+ return FailNowf(a.t, failureMessage, msg, args...)
+}
+
+// Failf reports a failure through
+func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool {
+ return Failf(a.t, failureMessage, msg, args...)
+}
+
+// False asserts that the specified value is false.
+//
+// a.False(myBool)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
+ return False(a.t, value, msgAndArgs...)
+}
+
+// Falsef asserts that the specified value is false.
+//
+// a.Falsef(myBool, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool {
+ return Falsef(a.t, value, msg, args...)
+}
+
+// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool {
+ return FileExists(a.t, path, msgAndArgs...)
+}
+
+// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool {
+ return FileExistsf(a.t, path, msg, args...)
+}
+
+// HTTPBodyContains asserts that a specified handler returns a
+// body that contains a string.
+//
+// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+ return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...)
+}
+
+// HTTPBodyContainsf asserts that a specified handler returns a
+// body that contains a string.
+//
+// a.HTTPBodyContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...)
+}
+
+// HTTPBodyNotContains asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+ return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...)
+}
+
+// HTTPBodyNotContainsf asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// a.HTTPBodyNotContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...)
+}
+
+// HTTPError asserts that a specified handler returns an error status code.
+//
+// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ return HTTPError(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPErrorf asserts that a specified handler returns an error status code.
+//
+// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ return HTTPErrorf(a.t, handler, method, url, values, msg, args...)
+}
+
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
+//
+// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPRedirectf asserts that a specified handler returns a redirect status code.
+//
+// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ return HTTPRedirectf(a.t, handler, method, url, values, msg, args...)
+}
+
+// HTTPSuccess asserts that a specified handler returns a success status code.
+//
+// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPSuccessf asserts that a specified handler returns a success status code.
+//
+// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ return HTTPSuccessf(a.t, handler, method, url, values, msg, args...)
+}
+
+// Implements asserts that an object is implemented by the specified interface.
+//
+// a.Implements((*MyInterface)(nil), new(MyObject))
+func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ return Implements(a.t, interfaceObject, object, msgAndArgs...)
+}
+
+// Implementsf asserts that an object is implemented by the specified interface.
+//
+// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+ return Implementsf(a.t, interfaceObject, object, msg, args...)
+}
+
+// InDelta asserts that the two numerals are within delta of each other.
+//
+// a.InDelta(math.Pi, (22 / 7.0), 0.01)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ return InDelta(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...)
+}
+
+// InDeltaSlice is the same as InDelta, except it compares two slices.
+func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaSlicef is the same as InDelta, except it compares two slices.
+func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ return InDeltaSlicef(a.t, expected, actual, delta, msg, args...)
+}
+
+// InDeltaf asserts that the two numerals are within delta of each other.
+//
+// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ return InDeltaf(a.t, expected, actual, delta, msg, args...)
+}
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
+}
+
+// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
+func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...)
+}
+
+// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
+func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...)
+}
+
+// InEpsilonf asserts that expected and actual have a relative error less than epsilon
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ return InEpsilonf(a.t, expected, actual, epsilon, msg, args...)
+}
+
+// IsType asserts that the specified objects are of the same type.
+func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ return IsType(a.t, expectedType, object, msgAndArgs...)
+}
+
+// IsTypef asserts that the specified objects are of the same type.
+func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
+ return IsTypef(a.t, expectedType, object, msg, args...)
+}
+
+// JSONEq asserts that two JSON strings are equivalent.
+//
+// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool {
+ return JSONEq(a.t, expected, actual, msgAndArgs...)
+}
+
+// JSONEqf asserts that two JSON strings are equivalent.
+//
+// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool {
+ return JSONEqf(a.t, expected, actual, msg, args...)
+}
+
+// Len asserts that the specified object has specific length.
+// Len also fails if the object has a type that len() not accept.
+//
+// a.Len(mySlice, 3)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {
+ return Len(a.t, object, length, msgAndArgs...)
+}
+
+// Lenf asserts that the specified object has specific length.
+// Lenf also fails if the object has a type that len() not accept.
+//
+// a.Lenf(mySlice, 3, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool {
+ return Lenf(a.t, object, length, msg, args...)
+}
+
+// Nil asserts that the specified object is nil.
+//
+// a.Nil(err)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
+ return Nil(a.t, object, msgAndArgs...)
+}
+
+// Nilf asserts that the specified object is nil.
+//
+// a.Nilf(err, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool {
+ return Nilf(a.t, object, msg, args...)
+}
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.NoError(err) {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {
+ return NoError(a.t, err, msgAndArgs...)
+}
+
+// NoErrorf asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.NoErrorf(err, "error message %s", "formatted") {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool {
+ return NoErrorf(a.t, err, msg, args...)
+}
+
+// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// a.NotContains("Hello World", "Earth")
+// a.NotContains(["Hello", "World"], "Earth")
+// a.NotContains({"Hello": "World"}, "Earth")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
+ return NotContains(a.t, s, contains, msgAndArgs...)
+}
+
+// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted")
+// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted")
+// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ return NotContainsf(a.t, s, contains, msg, args...)
+}
+
+// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if a.NotEmpty(obj) {
+// assert.Equal(t, "two", obj[1])
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool {
+ return NotEmpty(a.t, object, msgAndArgs...)
+}
+
+// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if a.NotEmptyf(obj, "error message %s", "formatted") {
+// assert.Equal(t, "two", obj[1])
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool {
+ return NotEmptyf(a.t, object, msg, args...)
+}
+
+// NotEqual asserts that the specified values are NOT equal.
+//
+// a.NotEqual(obj1, obj2)
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ return NotEqual(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotEqualf asserts that the specified values are NOT equal.
+//
+// a.NotEqualf(obj1, obj2, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return NotEqualf(a.t, expected, actual, msg, args...)
+}
+
+// NotNil asserts that the specified object is not nil.
+//
+// a.NotNil(err)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {
+ return NotNil(a.t, object, msgAndArgs...)
+}
+
+// NotNilf asserts that the specified object is not nil.
+//
+// a.NotNilf(err, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool {
+ return NotNilf(a.t, object, msg, args...)
+}
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// a.NotPanics(func(){ RemainCalm() })
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ return NotPanics(a.t, f, msgAndArgs...)
+}
+
+// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
+ return NotPanicsf(a.t, f, msg, args...)
+}
+
+// NotRegexp asserts that a specified regexp does not match a string.
+//
+// a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
+// a.NotRegexp("^start", "it's not starting")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+ return NotRegexp(a.t, rx, str, msgAndArgs...)
+}
+
+// NotRegexpf asserts that a specified regexp does not match a string.
+//
+// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ return NotRegexpf(a.t, rx, str, msg, args...)
+}
+
+// NotSubset asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
+ return NotSubset(a.t, list, subset, msgAndArgs...)
+}
+
+// NotSubsetf asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ return NotSubsetf(a.t, list, subset, msg, args...)
+}
+
+// NotZero asserts that i is not the zero value for its type and returns the truth.
+func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool {
+ return NotZero(a.t, i, msgAndArgs...)
+}
+
+// NotZerof asserts that i is not the zero value for its type and returns the truth.
+func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool {
+ return NotZerof(a.t, i, msg, args...)
+}
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+//
+// a.Panics(func(){ GoCrazy() })
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ return Panics(a.t, f, msgAndArgs...)
+}
+
+// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// a.PanicsWithValue("crazy error", func(){ GoCrazy() })
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ return PanicsWithValue(a.t, expected, f, msgAndArgs...)
+}
+
+// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
+ return PanicsWithValuef(a.t, expected, f, msg, args...)
+}
+
+// Panicsf asserts that the code inside the specified PanicTestFunc panics.
+//
+// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
+ return Panicsf(a.t, f, msg, args...)
+}
+
+// Regexp asserts that a specified regexp matches a string.
+//
+// a.Regexp(regexp.MustCompile("start"), "it's starting")
+// a.Regexp("start...$", "it's not starting")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+ return Regexp(a.t, rx, str, msgAndArgs...)
+}
+
+// Regexpf asserts that a specified regexp matches a string.
+//
+// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ return Regexpf(a.t, rx, str, msg, args...)
+}
+
+// Subset asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
+ return Subset(a.t, list, subset, msgAndArgs...)
+}
+
+// Subsetf asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ return Subsetf(a.t, list, subset, msg, args...)
+}
+
+// True asserts that the specified value is true.
+//
+// a.True(myBool)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
+ return True(a.t, value, msgAndArgs...)
+}
+
+// Truef asserts that the specified value is true.
+//
+// a.Truef(myBool, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool {
+ return Truef(a.t, value, msg, args...)
+}
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+//
+// a.WithinDuration(time.Now(), time.Now(), 10*time.Second)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
+ return WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// WithinDurationf asserts that the two times are within duration delta of each other.
+//
+// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
+ return WithinDurationf(a.t, expected, actual, delta, msg, args...)
+}
+
+// Zero asserts that i is the zero value for its type and returns the truth.
+func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
+ return Zero(a.t, i, msgAndArgs...)
+}
+
+// Zerof asserts that i is the zero value for its type and returns the truth.
+func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool {
+ return Zerof(a.t, i, msg, args...)
+}
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/assertions.go
new file mode 100644
index 000000000..9d387bc77
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -0,0 +1,1312 @@
+package assert
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "os"
+ "reflect"
+ "regexp"
+ "runtime"
+ "strings"
+ "time"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/pmezard/go-difflib/difflib"
+)
+
+//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl
+
+// TestingT is an interface wrapper around *testing.T
+type TestingT interface {
+ Errorf(format string, args ...interface{})
+}
+
+// Comparison a custom function that returns true on success and false on failure
+type Comparison func() (success bool)
+
+/*
+ Helper functions
+*/
+
+// ObjectsAreEqual determines if two objects are considered equal.
+//
+// This function does no assertion of any kind.
+func ObjectsAreEqual(expected, actual interface{}) bool {
+
+ if expected == nil || actual == nil {
+ return expected == actual
+ }
+ if exp, ok := expected.([]byte); ok {
+ act, ok := actual.([]byte)
+ if !ok {
+ return false
+ } else if exp == nil || act == nil {
+ return exp == nil && act == nil
+ }
+ return bytes.Equal(exp, act)
+ }
+ return reflect.DeepEqual(expected, actual)
+
+}
+
+// ObjectsAreEqualValues gets whether two objects are equal, or if their
+// values are equal.
+func ObjectsAreEqualValues(expected, actual interface{}) bool {
+ if ObjectsAreEqual(expected, actual) {
+ return true
+ }
+
+ actualType := reflect.TypeOf(actual)
+ if actualType == nil {
+ return false
+ }
+ expectedValue := reflect.ValueOf(expected)
+ if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {
+ // Attempt comparison after type conversion
+ return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)
+ }
+
+ return false
+}
+
+/* CallerInfo is necessary because the assert functions use the testing object
+internally, causing it to print the file:line of the assert method, rather than where
+the problem actually occurred in calling code.*/
+
+// CallerInfo returns an array of strings containing the file and line number
+// of each stack frame leading from the current test to the assert call that
+// failed.
+func CallerInfo() []string {
+
+ pc := uintptr(0)
+ file := ""
+ line := 0
+ ok := false
+ name := ""
+
+ callers := []string{}
+ for i := 0; ; i++ {
+ pc, file, line, ok = runtime.Caller(i)
+ if !ok {
+ // The breaks below failed to terminate the loop, and we ran off the
+ // end of the call stack.
+ break
+ }
+
+ // This is a huge edge case, but it will panic if this is the case, see #180
+ if file == "<autogenerated>" {
+ break
+ }
+
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ break
+ }
+ name = f.Name()
+
+ // testing.tRunner is the standard library function that calls
+ // tests. Subtests are called directly by tRunner, without going through
+ // the Test/Benchmark/Example function that contains the t.Run calls, so
+ // with subtests we should break when we hit tRunner, without adding it
+ // to the list of callers.
+ if name == "testing.tRunner" {
+ break
+ }
+
+ parts := strings.Split(file, "/")
+ file = parts[len(parts)-1]
+ if len(parts) > 1 {
+ dir := parts[len(parts)-2]
+ if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
+ callers = append(callers, fmt.Sprintf("%s:%d", file, line))
+ }
+ }
+
+ // Drop the package
+ segments := strings.Split(name, ".")
+ name = segments[len(segments)-1]
+ if isTest(name, "Test") ||
+ isTest(name, "Benchmark") ||
+ isTest(name, "Example") {
+ break
+ }
+ }
+
+ return callers
+}
+
+// Stolen from the `go test` tool.
+// isTest tells whether name looks like a test (or benchmark, according to prefix).
+// It is a Test (say) if there is a character after Test that is not a lower-case letter.
+// We don't want TesticularCancer.
+func isTest(name, prefix string) bool {
+ if !strings.HasPrefix(name, prefix) {
+ return false
+ }
+ if len(name) == len(prefix) { // "Test" is ok
+ return true
+ }
+ rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
+ return !unicode.IsLower(rune)
+}
+
+// getWhitespaceString returns a string that is long enough to overwrite the default
+// output from the go testing framework.
+func getWhitespaceString() string {
+
+ _, file, line, ok := runtime.Caller(1)
+ if !ok {
+ return ""
+ }
+ parts := strings.Split(file, "/")
+ file = parts[len(parts)-1]
+
+ return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line)))
+
+}
+
+func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
+ if len(msgAndArgs) == 0 || msgAndArgs == nil {
+ return ""
+ }
+ if len(msgAndArgs) == 1 {
+ return msgAndArgs[0].(string)
+ }
+ if len(msgAndArgs) > 1 {
+ return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...)
+ }
+ return ""
+}
+
+// Aligns the provided message so that all lines after the first line start at the same location as the first line.
+// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab).
+// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the
+// basis on which the alignment occurs).
+func indentMessageLines(message string, longestLabelLen int) string {
+ outBuf := new(bytes.Buffer)
+
+ for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ {
+ // no need to align first line because it starts at the correct location (after the label)
+ if i != 0 {
+ // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab
+ outBuf.WriteString("\n\r\t" + strings.Repeat(" ", longestLabelLen+1) + "\t")
+ }
+ outBuf.WriteString(scanner.Text())
+ }
+
+ return outBuf.String()
+}
+
+type failNower interface {
+ FailNow()
+}
+
+// FailNow fails test
+func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
+ Fail(t, failureMessage, msgAndArgs...)
+
+ // We cannot extend TestingT with FailNow() and
+ // maintain backwards compatibility, so we fallback
+ // to panicking when FailNow is not available in
+ // TestingT.
+ // See issue #263
+
+ if t, ok := t.(failNower); ok {
+ t.FailNow()
+ } else {
+ panic("test failed and t is missing `FailNow()`")
+ }
+ return false
+}
+
+// Fail reports a failure through
+func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
+ content := []labeledContent{
+ {"Error Trace", strings.Join(CallerInfo(), "\n\r\t\t\t")},
+ {"Error", failureMessage},
+ }
+
+ // Add test name if the Go version supports it
+ if n, ok := t.(interface {
+ Name() string
+ }); ok {
+ content = append(content, labeledContent{"Test", n.Name()})
+ }
+
+ message := messageFromMsgAndArgs(msgAndArgs...)
+ if len(message) > 0 {
+ content = append(content, labeledContent{"Messages", message})
+ }
+
+ t.Errorf("%s", "\r"+getWhitespaceString()+labeledOutput(content...))
+
+ return false
+}
+
+type labeledContent struct {
+ label string
+ content string
+}
+
+// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner:
+//
+// \r\t{{label}}:{{align_spaces}}\t{{content}}\n
+//
+// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label.
+// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this
+// alignment is achieved, "\t{{content}}\n" is added for the output.
+//
+// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line.
+func labeledOutput(content ...labeledContent) string {
+ longestLabel := 0
+ for _, v := range content {
+ if len(v.label) > longestLabel {
+ longestLabel = len(v.label)
+ }
+ }
+ var output string
+ for _, v := range content {
+ output += "\r\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n"
+ }
+ return output
+}
+
+// Implements asserts that an object is implemented by the specified interface.
+//
+// assert.Implements(t, (*MyInterface)(nil), new(MyObject))
+func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ interfaceType := reflect.TypeOf(interfaceObject).Elem()
+
+ if object == nil {
+ return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...)
+ }
+ if !reflect.TypeOf(object).Implements(interfaceType) {
+ return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...)
+ }
+
+ return true
+}
+
+// IsType asserts that the specified objects are of the same type.
+func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+
+ if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) {
+ return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...)
+ }
+
+ return true
+}
+
+// Equal asserts that two objects are equal.
+//
+// assert.Equal(t, 123, 123)
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if err := validateEqualArgs(expected, actual); err != nil {
+ return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)",
+ expected, actual, err), msgAndArgs...)
+ }
+
+ if !ObjectsAreEqual(expected, actual) {
+ diff := diff(expected, actual)
+ expected, actual = formatUnequalValues(expected, actual)
+ return Fail(t, fmt.Sprintf("Not equal: \n"+
+ "expected: %s\n"+
+ "actual : %s%s", expected, actual, diff), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// formatUnequalValues takes two values of arbitrary types and returns string
+// representations appropriate to be presented to the user.
+//
+// If the values are not of like type, the returned strings will be prefixed
+// with the type name, and the value will be enclosed in parenthesis similar
+// to a type conversion in the Go grammar.
+func formatUnequalValues(expected, actual interface{}) (e string, a string) {
+ if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
+ return fmt.Sprintf("%T(%#v)", expected, expected),
+ fmt.Sprintf("%T(%#v)", actual, actual)
+ }
+
+ return fmt.Sprintf("%#v", expected),
+ fmt.Sprintf("%#v", actual)
+}
+
+// EqualValues asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// assert.EqualValues(t, uint32(123), int32(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+
+ if !ObjectsAreEqualValues(expected, actual) {
+ diff := diff(expected, actual)
+ expected, actual = formatUnequalValues(expected, actual)
+ return Fail(t, fmt.Sprintf("Not equal: \n"+
+ "expected: %s\n"+
+ "actual : %s%s", expected, actual, diff), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// Exactly asserts that two objects are equal in value and type.
+//
+// assert.Exactly(t, int32(123), int64(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+
+ aType := reflect.TypeOf(expected)
+ bType := reflect.TypeOf(actual)
+
+ if aType != bType {
+ return Fail(t, fmt.Sprintf("Types expected to match exactly\n\r\t%v != %v", aType, bType), msgAndArgs...)
+ }
+
+ return Equal(t, expected, actual, msgAndArgs...)
+
+}
+
+// NotNil asserts that the specified object is not nil.
+//
+// assert.NotNil(t, err)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ if !isNil(object) {
+ return true
+ }
+ return Fail(t, "Expected value not to be nil.", msgAndArgs...)
+}
+
+// isNil checks if a specified object is nil or not, without Failing.
+func isNil(object interface{}) bool {
+ if object == nil {
+ return true
+ }
+
+ value := reflect.ValueOf(object)
+ kind := value.Kind()
+ if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() {
+ return true
+ }
+
+ return false
+}
+
+// Nil asserts that the specified object is nil.
+//
+// assert.Nil(t, err)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ if isNil(object) {
+ return true
+ }
+ return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...)
+}
+
+// isEmpty gets whether the specified object is considered empty or not.
+func isEmpty(object interface{}) bool {
+
+ // get nil case out of the way
+ if object == nil {
+ return true
+ }
+
+ objValue := reflect.ValueOf(object)
+
+ switch objValue.Kind() {
+ // collection types are empty when they have no element
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ return objValue.Len() == 0
+ // pointers are empty if nil or if the value they point to is empty
+ case reflect.Ptr:
+ if objValue.IsNil() {
+ return true
+ }
+ deref := objValue.Elem().Interface()
+ return isEmpty(deref)
+ // for all other types, compare against the zero value
+ default:
+ zero := reflect.Zero(objValue.Type())
+ return reflect.DeepEqual(object, zero.Interface())
+ }
+}
+
+// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// assert.Empty(t, obj)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+
+ pass := isEmpty(object)
+ if !pass {
+ Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...)
+ }
+
+ return pass
+
+}
+
+// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if assert.NotEmpty(t, obj) {
+// assert.Equal(t, "two", obj[1])
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+
+ pass := !isEmpty(object)
+ if !pass {
+ Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...)
+ }
+
+ return pass
+
+}
+
+// getLen try to get length of object.
+// return (false, 0) if impossible.
+func getLen(x interface{}) (ok bool, length int) {
+ v := reflect.ValueOf(x)
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ }
+ }()
+ return true, v.Len()
+}
+
+// Len asserts that the specified object has specific length.
+// Len also fails if the object has a type that len() not accept.
+//
+// assert.Len(t, mySlice, 3)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool {
+ ok, l := getLen(object)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...)
+ }
+
+ if l != length {
+ return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
+ }
+ return true
+}
+
+// True asserts that the specified value is true.
+//
+// assert.True(t, myBool)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
+
+ if value != true {
+ return Fail(t, "Should be true", msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// False asserts that the specified value is false.
+//
+// assert.False(t, myBool)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
+
+ if value != false {
+ return Fail(t, "Should be false", msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// NotEqual asserts that the specified values are NOT equal.
+//
+// assert.NotEqual(t, obj1, obj2)
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if err := validateEqualArgs(expected, actual); err != nil {
+ return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)",
+ expected, actual, err), msgAndArgs...)
+ }
+
+ if ObjectsAreEqual(expected, actual) {
+ return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// containsElement try loop over the list check if the list includes the element.
+// return (false, false) if impossible.
+// return (true, false) if element was not found.
+// return (true, true) if element was found.
+func includeElement(list interface{}, element interface{}) (ok, found bool) {
+
+ listValue := reflect.ValueOf(list)
+ elementValue := reflect.ValueOf(element)
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ found = false
+ }
+ }()
+
+ if reflect.TypeOf(list).Kind() == reflect.String {
+ return true, strings.Contains(listValue.String(), elementValue.String())
+ }
+
+ if reflect.TypeOf(list).Kind() == reflect.Map {
+ mapKeys := listValue.MapKeys()
+ for i := 0; i < len(mapKeys); i++ {
+ if ObjectsAreEqual(mapKeys[i].Interface(), element) {
+ return true, true
+ }
+ }
+ return true, false
+ }
+
+ for i := 0; i < listValue.Len(); i++ {
+ if ObjectsAreEqual(listValue.Index(i).Interface(), element) {
+ return true, true
+ }
+ }
+ return true, false
+
+}
+
+// Contains asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// assert.Contains(t, "Hello World", "World")
+// assert.Contains(t, ["Hello", "World"], "World")
+// assert.Contains(t, {"Hello": "World"}, "Hello")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
+
+ ok, found := includeElement(s, contains)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
+ }
+ if !found {
+ return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// assert.NotContains(t, "Hello World", "Earth")
+// assert.NotContains(t, ["Hello", "World"], "Earth")
+// assert.NotContains(t, {"Hello": "World"}, "Earth")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
+
+ ok, found := includeElement(s, contains)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
+ }
+ if found {
+ return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// Subset asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if subset == nil {
+ return true // we consider nil to be equal to the nil set
+ }
+
+ subsetValue := reflect.ValueOf(subset)
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ }
+ }()
+
+ listKind := reflect.TypeOf(list).Kind()
+ subsetKind := reflect.TypeOf(subset).Kind()
+
+ if listKind != reflect.Array && listKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
+ }
+
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
+ }
+
+ for i := 0; i < subsetValue.Len(); i++ {
+ element := subsetValue.Index(i).Interface()
+ ok, found := includeElement(list, element)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+ }
+ if !found {
+ return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...)
+ }
+ }
+
+ return true
+}
+
+// NotSubset asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if subset == nil {
+ return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...)
+ }
+
+ subsetValue := reflect.ValueOf(subset)
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ }
+ }()
+
+ listKind := reflect.TypeOf(list).Kind()
+ subsetKind := reflect.TypeOf(subset).Kind()
+
+ if listKind != reflect.Array && listKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
+ }
+
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
+ }
+
+ for i := 0; i < subsetValue.Len(); i++ {
+ element := subsetValue.Index(i).Interface()
+ ok, found := includeElement(list, element)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+ }
+ if !found {
+ return true
+ }
+ }
+
+ return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
+}
+
+// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if isEmpty(listA) && isEmpty(listB) {
+ return true
+ }
+
+ aKind := reflect.TypeOf(listA).Kind()
+ bKind := reflect.TypeOf(listB).Kind()
+
+ if aKind != reflect.Array && aKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...)
+ }
+
+ if bKind != reflect.Array && bKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...)
+ }
+
+ aValue := reflect.ValueOf(listA)
+ bValue := reflect.ValueOf(listB)
+
+ aLen := aValue.Len()
+ bLen := bValue.Len()
+
+ if aLen != bLen {
+ return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...)
+ }
+
+ // Mark indexes in bValue that we already used
+ visited := make([]bool, bLen)
+ for i := 0; i < aLen; i++ {
+ element := aValue.Index(i).Interface()
+ found := false
+ for j := 0; j < bLen; j++ {
+ if visited[j] {
+ continue
+ }
+ if ObjectsAreEqual(bValue.Index(j).Interface(), element) {
+ visited[j] = true
+ found = true
+ break
+ }
+ }
+ if !found {
+ return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...)
+ }
+ }
+
+ return true
+}
+
+// Condition uses a Comparison to assert a complex condition.
+func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
+ result := comp()
+ if !result {
+ Fail(t, "Condition failed!", msgAndArgs...)
+ }
+ return result
+}
+
+// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics
+// methods, and represents a simple func that takes no arguments, and returns nothing.
+type PanicTestFunc func()
+
+// didPanic returns true if the function passed to it panics. Otherwise, it returns false.
+func didPanic(f PanicTestFunc) (bool, interface{}) {
+
+ didPanic := false
+ var message interface{}
+ func() {
+
+ defer func() {
+ if message = recover(); message != nil {
+ didPanic = true
+ }
+ }()
+
+ // call the target function
+ f()
+
+ }()
+
+ return didPanic, message
+
+}
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+//
+// assert.Panics(t, func(){ GoCrazy() })
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+
+ if funcDidPanic, panicValue := didPanic(f); !funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...)
+ }
+
+ return true
+}
+
+// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
+//
+// Returns whether the assertion was successful (true) or not (false).
+func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+
+ funcDidPanic, panicValue := didPanic(f)
+ if !funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...)
+ }
+ if panicValue != expected {
+ return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%v\n\r\tPanic value:\t%v", f, expected, panicValue), msgAndArgs...)
+ }
+
+ return true
+}
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// assert.NotPanics(t, func(){ RemainCalm() })
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+
+ if funcDidPanic, panicValue := didPanic(f); funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should not panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...)
+ }
+
+ return true
+}
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+//
+// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
+
+ dt := expected.Sub(actual)
+ if dt < -delta || dt > delta {
+ return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...)
+ }
+
+ return true
+}
+
+func toFloat(x interface{}) (float64, bool) {
+ var xf float64
+ xok := true
+
+ switch xn := x.(type) {
+ case uint8:
+ xf = float64(xn)
+ case uint16:
+ xf = float64(xn)
+ case uint32:
+ xf = float64(xn)
+ case uint64:
+ xf = float64(xn)
+ case int:
+ xf = float64(xn)
+ case int8:
+ xf = float64(xn)
+ case int16:
+ xf = float64(xn)
+ case int32:
+ xf = float64(xn)
+ case int64:
+ xf = float64(xn)
+ case float32:
+ xf = float64(xn)
+ case float64:
+ xf = float64(xn)
+ case time.Duration:
+ xf = float64(xn)
+ default:
+ xok = false
+ }
+
+ return xf, xok
+}
+
+// InDelta asserts that the two numerals are within delta of each other.
+//
+// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+
+ af, aok := toFloat(expected)
+ bf, bok := toFloat(actual)
+
+ if !aok || !bok {
+ return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...)
+ }
+
+ if math.IsNaN(af) {
+ return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...)
+ }
+
+ if math.IsNaN(bf) {
+ return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...)
+ }
+
+ dt := af - bf
+ if dt < -delta || dt > delta {
+ return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...)
+ }
+
+ return true
+}
+
+// InDeltaSlice is the same as InDelta, except it compares two slices.
+func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if expected == nil || actual == nil ||
+ reflect.TypeOf(actual).Kind() != reflect.Slice ||
+ reflect.TypeOf(expected).Kind() != reflect.Slice {
+ return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
+ }
+
+ actualSlice := reflect.ValueOf(actual)
+ expectedSlice := reflect.ValueOf(expected)
+
+ for i := 0; i < actualSlice.Len(); i++ {
+ result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...)
+ if !result {
+ return result
+ }
+ }
+
+ return true
+}
+
+// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if expected == nil || actual == nil ||
+ reflect.TypeOf(actual).Kind() != reflect.Map ||
+ reflect.TypeOf(expected).Kind() != reflect.Map {
+ return Fail(t, "Arguments must be maps", msgAndArgs...)
+ }
+
+ expectedMap := reflect.ValueOf(expected)
+ actualMap := reflect.ValueOf(actual)
+
+ if expectedMap.Len() != actualMap.Len() {
+ return Fail(t, "Arguments must have the same numbe of keys", msgAndArgs...)
+ }
+
+ for _, k := range expectedMap.MapKeys() {
+ ev := expectedMap.MapIndex(k)
+ av := actualMap.MapIndex(k)
+
+ if !ev.IsValid() {
+ return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...)
+ }
+
+ if !av.IsValid() {
+ return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...)
+ }
+
+ if !InDelta(
+ t,
+ ev.Interface(),
+ av.Interface(),
+ delta,
+ msgAndArgs...,
+ ) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func calcRelativeError(expected, actual interface{}) (float64, error) {
+ af, aok := toFloat(expected)
+ if !aok {
+ return 0, fmt.Errorf("expected value %q cannot be converted to float", expected)
+ }
+ if af == 0 {
+ return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error")
+ }
+ bf, bok := toFloat(actual)
+ if !bok {
+ return 0, fmt.Errorf("actual value %q cannot be converted to float", actual)
+ }
+
+ return math.Abs(af-bf) / math.Abs(af), nil
+}
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+//
+// Returns whether the assertion was successful (true) or not (false).
+func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ actualEpsilon, err := calcRelativeError(expected, actual)
+ if err != nil {
+ return Fail(t, err.Error(), msgAndArgs...)
+ }
+ if actualEpsilon > epsilon {
+ return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+
+ " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...)
+ }
+
+ return true
+}
+
+// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
+func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ if expected == nil || actual == nil ||
+ reflect.TypeOf(actual).Kind() != reflect.Slice ||
+ reflect.TypeOf(expected).Kind() != reflect.Slice {
+ return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
+ }
+
+ actualSlice := reflect.ValueOf(actual)
+ expectedSlice := reflect.ValueOf(expected)
+
+ for i := 0; i < actualSlice.Len(); i++ {
+ result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon)
+ if !result {
+ return result
+ }
+ }
+
+ return true
+}
+
+/*
+ Errors
+*/
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.NoError(t, err) {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
+ if err != nil {
+ return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...)
+ }
+
+ return true
+}
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.Error(t, err) {
+// assert.Equal(t, expectedError, err)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
+
+ if err == nil {
+ return Fail(t, "An error is expected but got nil.", msgAndArgs...)
+ }
+
+ return true
+}
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// assert.EqualError(t, err, expectedErrorString)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool {
+ if !Error(t, theError, msgAndArgs...) {
+ return false
+ }
+ expected := errString
+ actual := theError.Error()
+ // don't need to use deep equals here, we know they are both strings
+ if expected != actual {
+ return Fail(t, fmt.Sprintf("Error message not equal:\n"+
+ "expected: %q\n"+
+ "actual : %q", expected, actual), msgAndArgs...)
+ }
+ return true
+}
+
+// matchRegexp return true if a specified regexp matches a string.
+func matchRegexp(rx interface{}, str interface{}) bool {
+
+ var r *regexp.Regexp
+ if rr, ok := rx.(*regexp.Regexp); ok {
+ r = rr
+ } else {
+ r = regexp.MustCompile(fmt.Sprint(rx))
+ }
+
+ return (r.FindStringIndex(fmt.Sprint(str)) != nil)
+
+}
+
+// Regexp asserts that a specified regexp matches a string.
+//
+// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
+// assert.Regexp(t, "start...$", "it's not starting")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+
+ match := matchRegexp(rx, str)
+
+ if !match {
+ Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...)
+ }
+
+ return match
+}
+
+// NotRegexp asserts that a specified regexp does not match a string.
+//
+// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
+// assert.NotRegexp(t, "^start", "it's not starting")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+ match := matchRegexp(rx, str)
+
+ if match {
+ Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...)
+ }
+
+ return !match
+
+}
+
+// Zero asserts that i is the zero value for its type and returns the truth.
+func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
+ if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) {
+ return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...)
+ }
+ return true
+}
+
+// NotZero asserts that i is not the zero value for its type and returns the truth.
+func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
+ if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) {
+ return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...)
+ }
+ return true
+}
+
+// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ info, err := os.Lstat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...)
+ }
+ return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...)
+ }
+ if info.IsDir() {
+ return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...)
+ }
+ return true
+}
+
+// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ info, err := os.Lstat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...)
+ }
+ return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...)
+ }
+ if !info.IsDir() {
+ return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...)
+ }
+ return true
+}
+
+// JSONEq asserts that two JSON strings are equivalent.
+//
+// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
+ var expectedJSONAsInterface, actualJSONAsInterface interface{}
+
+ if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil {
+ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...)
+ }
+
+ if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil {
+ return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...)
+ }
+
+ return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...)
+}
+
+func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) {
+ t := reflect.TypeOf(v)
+ k := t.Kind()
+
+ if k == reflect.Ptr {
+ t = t.Elem()
+ k = t.Kind()
+ }
+ return t, k
+}
+
+// diff returns a diff of both values as long as both are of the same type and
+// are a struct, map, slice or array. Otherwise it returns an empty string.
+func diff(expected interface{}, actual interface{}) string {
+ if expected == nil || actual == nil {
+ return ""
+ }
+
+ et, ek := typeAndKind(expected)
+ at, _ := typeAndKind(actual)
+
+ if et != at {
+ return ""
+ }
+
+ if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array {
+ return ""
+ }
+
+ e := spewConfig.Sdump(expected)
+ a := spewConfig.Sdump(actual)
+
+ diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
+ A: difflib.SplitLines(e),
+ B: difflib.SplitLines(a),
+ FromFile: "Expected",
+ FromDate: "",
+ ToFile: "Actual",
+ ToDate: "",
+ Context: 1,
+ })
+
+ return "\n\nDiff:\n" + diff
+}
+
+// validateEqualArgs checks whether provided arguments can be safely used in the
+// Equal/NotEqual functions.
+func validateEqualArgs(expected, actual interface{}) error {
+ if isFunction(expected) || isFunction(actual) {
+ return errors.New("cannot take func type as argument")
+ }
+ return nil
+}
+
+func isFunction(arg interface{}) bool {
+ if arg == nil {
+ return false
+ }
+ return reflect.TypeOf(arg).Kind() == reflect.Func
+}
+
+var spewConfig = spew.ConfigState{
+ Indent: " ",
+ DisablePointerAddresses: true,
+ DisableCapacities: true,
+ SortKeys: true,
+}
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/doc.go
new file mode 100644
index 000000000..c9dccc4d6
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/doc.go
@@ -0,0 +1,45 @@
+// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
+//
+// Example Usage
+//
+// The following is a complete example using assert in a standard test function:
+// import (
+// "testing"
+// "github.com/stretchr/testify/assert"
+// )
+//
+// func TestSomething(t *testing.T) {
+//
+// var a string = "Hello"
+// var b string = "Hello"
+//
+// assert.Equal(t, a, b, "The two words should be the same.")
+//
+// }
+//
+// if you assert many times, use the format below:
+//
+// import (
+// "testing"
+// "github.com/stretchr/testify/assert"
+// )
+//
+// func TestSomething(t *testing.T) {
+// assert := assert.New(t)
+//
+// var a string = "Hello"
+// var b string = "Hello"
+//
+// assert.Equal(a, b, "The two words should be the same.")
+// }
+//
+// Assertions
+//
+// Assertions allow you to easily write test code, and are global funcs in the `assert` package.
+// All assertion functions take, as the first argument, the `*testing.T` object provided by the
+// testing framework. This allows the assertion funcs to write the failings and other details to
+// the correct place.
+//
+// Every assertion function also takes an optional string message as the final argument,
+// allowing custom error messages to be appended to the message the assertion method outputs.
+package assert
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/errors.go
new file mode 100644
index 000000000..ac9dc9d1d
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/errors.go
@@ -0,0 +1,10 @@
+package assert
+
+import (
+ "errors"
+)
+
+// AnError is an error instance useful for testing. If the code does not care
+// about error specifics, and only needs to return the error for example, this
+// error should be used to make the test code more readable.
+var AnError = errors.New("assert.AnError general error for testing")
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/forward_assertions.go
new file mode 100644
index 000000000..9ad56851d
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/forward_assertions.go
@@ -0,0 +1,16 @@
+package assert
+
+// Assertions provides assertion methods around the
+// TestingT interface.
+type Assertions struct {
+ t TestingT
+}
+
+// New makes a new Assertions object for the specified TestingT.
+func New(t TestingT) *Assertions {
+ return &Assertions{
+ t: t,
+ }
+}
+
+//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/http_assertions.go
new file mode 100644
index 000000000..3101e78dd
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/assert/http_assertions.go
@@ -0,0 +1,127 @@
+package assert
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "strings"
+)
+
+// httpCode is a helper that returns HTTP code of the response. It returns -1 and
+// an error if building a new request fails.
+func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) {
+ w := httptest.NewRecorder()
+ req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
+ if err != nil {
+ return -1, err
+ }
+ handler(w, req)
+ return w.Code, nil
+}
+
+// HTTPSuccess asserts that a specified handler returns a success status code.
+//
+// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ return false
+ }
+
+ isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
+ if !isSuccessCode {
+ Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code))
+ }
+
+ return isSuccessCode
+}
+
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
+//
+// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ return false
+ }
+
+ isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
+ if !isRedirectCode {
+ Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code))
+ }
+
+ return isRedirectCode
+}
+
+// HTTPError asserts that a specified handler returns an error status code.
+//
+// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ return false
+ }
+
+ isErrorCode := code >= http.StatusBadRequest
+ if !isErrorCode {
+ Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code))
+ }
+
+ return isErrorCode
+}
+
+// HTTPBody is a helper that returns HTTP body of the response. It returns
+// empty string if building a new request fails.
+func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
+ w := httptest.NewRecorder()
+ req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
+ if err != nil {
+ return ""
+ }
+ handler(w, req)
+ return w.Body.String()
+}
+
+// HTTPBodyContains asserts that a specified handler returns a
+// body that contains a string.
+//
+// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+ body := HTTPBody(handler, method, url, values)
+
+ contains := strings.Contains(body, fmt.Sprint(str))
+ if !contains {
+ Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+ }
+
+ return contains
+}
+
+// HTTPBodyNotContains asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+ body := HTTPBody(handler, method, url, values)
+
+ contains := strings.Contains(body, fmt.Sprint(str))
+ if contains {
+ Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+ }
+
+ return !contains
+}
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/require/doc.go b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/require/doc.go
new file mode 100644
index 000000000..169de3922
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/require/doc.go
@@ -0,0 +1,28 @@
+// Package require implements the same assertions as the `assert` package but
+// stops test execution when a test fails.
+//
+// Example Usage
+//
+// The following is a complete example using require in a standard test function:
+// import (
+// "testing"
+// "github.com/stretchr/testify/require"
+// )
+//
+// func TestSomething(t *testing.T) {
+//
+// var a string = "Hello"
+// var b string = "Hello"
+//
+// require.Equal(t, a, b, "The two words should be the same.")
+//
+// }
+//
+// Assertions
+//
+// The `require` package have same global functions as in the `assert` package,
+// but instead of returning a boolean result they call `t.FailNow()`.
+//
+// Every assertion function also takes an optional string message as the final argument,
+// allowing custom error messages to be appended to the message the assertion method outputs.
+package require
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/require/forward_requirements.go b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/require/forward_requirements.go
new file mode 100644
index 000000000..ac71d4058
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/require/forward_requirements.go
@@ -0,0 +1,16 @@
+package require
+
+// Assertions provides assertion methods around the
+// TestingT interface.
+type Assertions struct {
+ t TestingT
+}
+
+// New makes a new Assertions object for the specified TestingT.
+func New(t TestingT) *Assertions {
+ return &Assertions{
+ t: t,
+ }
+}
+
+//go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl -include-format-funcs
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/require/require.go
new file mode 100644
index 000000000..a21d02f81
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/require/require.go
@@ -0,0 +1,979 @@
+/*
+* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
+* THIS FILE MUST NOT BE EDITED BY HAND
+ */
+
+package require
+
+import (
+ assert "github.com/stretchr/testify/assert"
+ http "net/http"
+ url "net/url"
+ time "time"
+)
+
+// Condition uses a Comparison to assert a complex condition.
+func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) {
+ if !assert.Condition(t, comp, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Conditionf uses a Comparison to assert a complex condition.
+func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interface{}) {
+ if !assert.Conditionf(t, comp, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// Contains asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// assert.Contains(t, "Hello World", "World")
+// assert.Contains(t, ["Hello", "World"], "World")
+// assert.Contains(t, {"Hello": "World"}, "Hello")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) {
+ if !assert.Contains(t, s, contains, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Containsf asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
+// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
+// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) {
+ if !assert.Containsf(t, s, contains, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func DirExists(t TestingT, path string, msgAndArgs ...interface{}) {
+ if !assert.DirExists(t, path, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func DirExistsf(t TestingT, path string, msg string, args ...interface{}) {
+ if !assert.DirExistsf(t, path, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) {
+ if !assert.ElementsMatch(t, listA, listB, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted"))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) {
+ if !assert.ElementsMatchf(t, listA, listB, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// assert.Empty(t, obj)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
+ if !assert.Empty(t, object, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// assert.Emptyf(t, obj, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) {
+ if !assert.Emptyf(t, object, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// Equal asserts that two objects are equal.
+//
+// assert.Equal(t, 123, 123)
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+ if !assert.Equal(t, expected, actual, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// assert.EqualError(t, err, expectedErrorString)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) {
+ if !assert.EqualError(t, theError, errString, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) {
+ if !assert.EqualErrorf(t, theError, errString, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// EqualValues asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// assert.EqualValues(t, uint32(123), int32(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+ if !assert.EqualValues(t, expected, actual, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// EqualValuesf asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ if !assert.EqualValuesf(t, expected, actual, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// Equalf asserts that two objects are equal.
+//
+// assert.Equalf(t, 123, 123, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ if !assert.Equalf(t, expected, actual, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.Error(t, err) {
+// assert.Equal(t, expectedError, err)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Error(t TestingT, err error, msgAndArgs ...interface{}) {
+ if !assert.Error(t, err, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Errorf asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.Errorf(t, err, "error message %s", "formatted") {
+// assert.Equal(t, expectedErrorf, err)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Errorf(t TestingT, err error, msg string, args ...interface{}) {
+ if !assert.Errorf(t, err, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// Exactly asserts that two objects are equal in value and type.
+//
+// assert.Exactly(t, int32(123), int64(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+ if !assert.Exactly(t, expected, actual, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Exactlyf asserts that two objects are equal in value and type.
+//
+// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ if !assert.Exactlyf(t, expected, actual, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// Fail reports a failure through
+func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
+ if !assert.Fail(t, failureMessage, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// FailNow fails test
+func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
+ if !assert.FailNow(t, failureMessage, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// FailNowf fails test
+func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) {
+ if !assert.FailNowf(t, failureMessage, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// Failf reports a failure through
+func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) {
+ if !assert.Failf(t, failureMessage, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// False asserts that the specified value is false.
+//
+// assert.False(t, myBool)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func False(t TestingT, value bool, msgAndArgs ...interface{}) {
+ if !assert.False(t, value, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Falsef asserts that the specified value is false.
+//
+// assert.Falsef(t, myBool, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Falsef(t TestingT, value bool, msg string, args ...interface{}) {
+ if !assert.Falsef(t, value, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func FileExists(t TestingT, path string, msgAndArgs ...interface{}) {
+ if !assert.FileExists(t, path, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func FileExistsf(t TestingT, path string, msg string, args ...interface{}) {
+ if !assert.FileExistsf(t, path, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// HTTPBodyContains asserts that a specified handler returns a
+// body that contains a string.
+//
+// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) {
+ if !assert.HTTPBodyContains(t, handler, method, url, values, str, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// HTTPBodyContainsf asserts that a specified handler returns a
+// body that contains a string.
+//
+// assert.HTTPBodyContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) {
+ if !assert.HTTPBodyContainsf(t, handler, method, url, values, str, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// HTTPBodyNotContains asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) {
+ if !assert.HTTPBodyNotContains(t, handler, method, url, values, str, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// HTTPBodyNotContainsf asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// assert.HTTPBodyNotContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) {
+ if !assert.HTTPBodyNotContainsf(t, handler, method, url, values, str, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// HTTPError asserts that a specified handler returns an error status code.
+//
+// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) {
+ if !assert.HTTPError(t, handler, method, url, values, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// HTTPErrorf asserts that a specified handler returns an error status code.
+//
+// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
+ if !assert.HTTPErrorf(t, handler, method, url, values, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
+//
+// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) {
+ if !assert.HTTPRedirect(t, handler, method, url, values, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// HTTPRedirectf asserts that a specified handler returns a redirect status code.
+//
+// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
+ if !assert.HTTPRedirectf(t, handler, method, url, values, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// HTTPSuccess asserts that a specified handler returns a success status code.
+//
+// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) {
+ if !assert.HTTPSuccess(t, handler, method, url, values, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// HTTPSuccessf asserts that a specified handler returns a success status code.
+//
+// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
+ if !assert.HTTPSuccessf(t, handler, method, url, values, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// Implements asserts that an object is implemented by the specified interface.
+//
+// assert.Implements(t, (*MyInterface)(nil), new(MyObject))
+func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
+ if !assert.Implements(t, interfaceObject, object, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Implementsf asserts that an object is implemented by the specified interface.
+//
+// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) {
+ if !assert.Implementsf(t, interfaceObject, object, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// InDelta asserts that the two numerals are within delta of each other.
+//
+// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
+ if !assert.InDelta(t, expected, actual, delta, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func InDeltaMapValues(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
+ if !assert.InDeltaMapValues(t, expected, actual, delta, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
+ if !assert.InDeltaMapValuesf(t, expected, actual, delta, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// InDeltaSlice is the same as InDelta, except it compares two slices.
+func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
+ if !assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// InDeltaSlicef is the same as InDelta, except it compares two slices.
+func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
+ if !assert.InDeltaSlicef(t, expected, actual, delta, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// InDeltaf asserts that the two numerals are within delta of each other.
+//
+// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
+ if !assert.InDeltaf(t, expected, actual, delta, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+//
+// Returns whether the assertion was successful (true) or not (false).
+func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
+ if !assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
+func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
+ if !assert.InEpsilonSlice(t, expected, actual, epsilon, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
+func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) {
+ if !assert.InEpsilonSlicef(t, expected, actual, epsilon, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// InEpsilonf asserts that expected and actual have a relative error less than epsilon
+//
+// Returns whether the assertion was successful (true) or not (false).
+func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) {
+ if !assert.InEpsilonf(t, expected, actual, epsilon, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// IsType asserts that the specified objects are of the same type.
+func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
+ if !assert.IsType(t, expectedType, object, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// IsTypef asserts that the specified objects are of the same type.
+func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) {
+ if !assert.IsTypef(t, expectedType, object, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// JSONEq asserts that two JSON strings are equivalent.
+//
+// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) {
+ if !assert.JSONEq(t, expected, actual, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// JSONEqf asserts that two JSON strings are equivalent.
+//
+// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) {
+ if !assert.JSONEqf(t, expected, actual, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// Len asserts that the specified object has specific length.
+// Len also fails if the object has a type that len() not accept.
+//
+// assert.Len(t, mySlice, 3)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) {
+ if !assert.Len(t, object, length, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Lenf asserts that the specified object has specific length.
+// Lenf also fails if the object has a type that len() not accept.
+//
+// assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) {
+ if !assert.Lenf(t, object, length, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// Nil asserts that the specified object is nil.
+//
+// assert.Nil(t, err)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
+ if !assert.Nil(t, object, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Nilf asserts that the specified object is nil.
+//
+// assert.Nilf(t, err, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) {
+ if !assert.Nilf(t, object, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.NoError(t, err) {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NoError(t TestingT, err error, msgAndArgs ...interface{}) {
+ if !assert.NoError(t, err, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// NoErrorf asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.NoErrorf(t, err, "error message %s", "formatted") {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NoErrorf(t TestingT, err error, msg string, args ...interface{}) {
+ if !assert.NoErrorf(t, err, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// assert.NotContains(t, "Hello World", "Earth")
+// assert.NotContains(t, ["Hello", "World"], "Earth")
+// assert.NotContains(t, {"Hello": "World"}, "Earth")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) {
+ if !assert.NotContains(t, s, contains, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
+// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
+// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) {
+ if !assert.NotContainsf(t, s, contains, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if assert.NotEmpty(t, obj) {
+// assert.Equal(t, "two", obj[1])
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
+ if !assert.NotEmpty(t, object, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
+// assert.Equal(t, "two", obj[1])
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) {
+ if !assert.NotEmptyf(t, object, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// NotEqual asserts that the specified values are NOT equal.
+//
+// assert.NotEqual(t, obj1, obj2)
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+ if !assert.NotEqual(t, expected, actual, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// NotEqualf asserts that the specified values are NOT equal.
+//
+// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ if !assert.NotEqualf(t, expected, actual, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// NotNil asserts that the specified object is not nil.
+//
+// assert.NotNil(t, err)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
+ if !assert.NotNil(t, object, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// NotNilf asserts that the specified object is not nil.
+//
+// assert.NotNilf(t, err, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) {
+ if !assert.NotNilf(t, object, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// assert.NotPanics(t, func(){ RemainCalm() })
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
+ if !assert.NotPanics(t, f, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) {
+ if !assert.NotPanicsf(t, f, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// NotRegexp asserts that a specified regexp does not match a string.
+//
+// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
+// assert.NotRegexp(t, "^start", "it's not starting")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) {
+ if !assert.NotRegexp(t, rx, str, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// NotRegexpf asserts that a specified regexp does not match a string.
+//
+// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) {
+ if !assert.NotRegexpf(t, rx, str, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// NotSubset asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) {
+ if !assert.NotSubset(t, list, subset, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// NotSubsetf asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) {
+ if !assert.NotSubsetf(t, list, subset, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// NotZero asserts that i is not the zero value for its type and returns the truth.
+func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
+ if !assert.NotZero(t, i, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// NotZerof asserts that i is not the zero value for its type and returns the truth.
+func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) {
+ if !assert.NotZerof(t, i, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+//
+// assert.Panics(t, func(){ GoCrazy() })
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
+ if !assert.Panics(t, f, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
+//
+// Returns whether the assertion was successful (true) or not (false).
+func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
+ if !assert.PanicsWithValue(t, expected, f, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) {
+ if !assert.PanicsWithValuef(t, expected, f, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// Panicsf asserts that the code inside the specified PanicTestFunc panics.
+//
+// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) {
+ if !assert.Panicsf(t, f, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// Regexp asserts that a specified regexp matches a string.
+//
+// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
+// assert.Regexp(t, "start...$", "it's not starting")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) {
+ if !assert.Regexp(t, rx, str, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Regexpf asserts that a specified regexp matches a string.
+//
+// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) {
+ if !assert.Regexpf(t, rx, str, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// Subset asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) {
+ if !assert.Subset(t, list, subset, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Subsetf asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) {
+ if !assert.Subsetf(t, list, subset, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// True asserts that the specified value is true.
+//
+// assert.True(t, myBool)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func True(t TestingT, value bool, msgAndArgs ...interface{}) {
+ if !assert.True(t, value, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Truef asserts that the specified value is true.
+//
+// assert.Truef(t, myBool, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Truef(t TestingT, value bool, msg string, args ...interface{}) {
+ if !assert.Truef(t, value, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+//
+// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) {
+ if !assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// WithinDurationf asserts that the two times are within duration delta of each other.
+//
+// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) {
+ if !assert.WithinDurationf(t, expected, actual, delta, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// Zero asserts that i is the zero value for its type and returns the truth.
+func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
+ if !assert.Zero(t, i, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Zerof asserts that i is the zero value for its type and returns the truth.
+func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) {
+ if !assert.Zerof(t, i, msg, args...) {
+ t.FailNow()
+ }
+}
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/require/require_forward.go
new file mode 100644
index 000000000..769408503
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/require/require_forward.go
@@ -0,0 +1,799 @@
+/*
+* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
+* THIS FILE MUST NOT BE EDITED BY HAND
+ */
+
+package require
+
+import (
+ assert "github.com/stretchr/testify/assert"
+ http "net/http"
+ url "net/url"
+ time "time"
+)
+
+// Condition uses a Comparison to assert a complex condition.
+func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{}) {
+ Condition(a.t, comp, msgAndArgs...)
+}
+
+// Conditionf uses a Comparison to assert a complex condition.
+func (a *Assertions) Conditionf(comp assert.Comparison, msg string, args ...interface{}) {
+ Conditionf(a.t, comp, msg, args...)
+}
+
+// Contains asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// a.Contains("Hello World", "World")
+// a.Contains(["Hello", "World"], "World")
+// a.Contains({"Hello": "World"}, "Hello")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) {
+ Contains(a.t, s, contains, msgAndArgs...)
+}
+
+// Containsf asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// a.Containsf("Hello World", "World", "error message %s", "formatted")
+// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted")
+// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) {
+ Containsf(a.t, s, contains, msg, args...)
+}
+
+// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) {
+ DirExists(a.t, path, msgAndArgs...)
+}
+
+// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) {
+ DirExistsf(a.t, path, msg, args...)
+}
+
+// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) {
+ ElementsMatch(a.t, listA, listB, msgAndArgs...)
+}
+
+// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted"))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) {
+ ElementsMatchf(a.t, listA, listB, msg, args...)
+}
+
+// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// a.Empty(obj)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) {
+ Empty(a.t, object, msgAndArgs...)
+}
+
+// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// a.Emptyf(obj, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) {
+ Emptyf(a.t, object, msg, args...)
+}
+
+// Equal asserts that two objects are equal.
+//
+// a.Equal(123, 123)
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+ Equal(a.t, expected, actual, msgAndArgs...)
+}
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// a.EqualError(err, expectedErrorString)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) {
+ EqualError(a.t, theError, errString, msgAndArgs...)
+}
+
+// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) {
+ EqualErrorf(a.t, theError, errString, msg, args...)
+}
+
+// EqualValues asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// a.EqualValues(uint32(123), int32(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+ EqualValues(a.t, expected, actual, msgAndArgs...)
+}
+
+// EqualValuesf asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ EqualValuesf(a.t, expected, actual, msg, args...)
+}
+
+// Equalf asserts that two objects are equal.
+//
+// a.Equalf(123, 123, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ Equalf(a.t, expected, actual, msg, args...)
+}
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.Error(err) {
+// assert.Equal(t, expectedError, err)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Error(err error, msgAndArgs ...interface{}) {
+ Error(a.t, err, msgAndArgs...)
+}
+
+// Errorf asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.Errorf(err, "error message %s", "formatted") {
+// assert.Equal(t, expectedErrorf, err)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Errorf(err error, msg string, args ...interface{}) {
+ Errorf(a.t, err, msg, args...)
+}
+
+// Exactly asserts that two objects are equal in value and type.
+//
+// a.Exactly(int32(123), int64(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+ Exactly(a.t, expected, actual, msgAndArgs...)
+}
+
+// Exactlyf asserts that two objects are equal in value and type.
+//
+// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ Exactlyf(a.t, expected, actual, msg, args...)
+}
+
+// Fail reports a failure through
+func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) {
+ Fail(a.t, failureMessage, msgAndArgs...)
+}
+
+// FailNow fails test
+func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) {
+ FailNow(a.t, failureMessage, msgAndArgs...)
+}
+
+// FailNowf fails test
+func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) {
+ FailNowf(a.t, failureMessage, msg, args...)
+}
+
+// Failf reports a failure through
+func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) {
+ Failf(a.t, failureMessage, msg, args...)
+}
+
+// False asserts that the specified value is false.
+//
+// a.False(myBool)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) False(value bool, msgAndArgs ...interface{}) {
+ False(a.t, value, msgAndArgs...)
+}
+
+// Falsef asserts that the specified value is false.
+//
+// a.Falsef(myBool, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) {
+ Falsef(a.t, value, msg, args...)
+}
+
+// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) {
+ FileExists(a.t, path, msgAndArgs...)
+}
+
+// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) {
+ FileExistsf(a.t, path, msg, args...)
+}
+
+// HTTPBodyContains asserts that a specified handler returns a
+// body that contains a string.
+//
+// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) {
+ HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...)
+}
+
+// HTTPBodyContainsf asserts that a specified handler returns a
+// body that contains a string.
+//
+// a.HTTPBodyContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) {
+ HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...)
+}
+
+// HTTPBodyNotContains asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) {
+ HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...)
+}
+
+// HTTPBodyNotContainsf asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// a.HTTPBodyNotContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) {
+ HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...)
+}
+
+// HTTPError asserts that a specified handler returns an error status code.
+//
+// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) {
+ HTTPError(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPErrorf asserts that a specified handler returns an error status code.
+//
+// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
+ HTTPErrorf(a.t, handler, method, url, values, msg, args...)
+}
+
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
+//
+// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) {
+ HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPRedirectf asserts that a specified handler returns a redirect status code.
+//
+// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
+ HTTPRedirectf(a.t, handler, method, url, values, msg, args...)
+}
+
+// HTTPSuccess asserts that a specified handler returns a success status code.
+//
+// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) {
+ HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPSuccessf asserts that a specified handler returns a success status code.
+//
+// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
+ HTTPSuccessf(a.t, handler, method, url, values, msg, args...)
+}
+
+// Implements asserts that an object is implemented by the specified interface.
+//
+// a.Implements((*MyInterface)(nil), new(MyObject))
+func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
+ Implements(a.t, interfaceObject, object, msgAndArgs...)
+}
+
+// Implementsf asserts that an object is implemented by the specified interface.
+//
+// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) {
+ Implementsf(a.t, interfaceObject, object, msg, args...)
+}
+
+// InDelta asserts that the two numerals are within delta of each other.
+//
+// a.InDelta(math.Pi, (22 / 7.0), 0.01)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
+ InDelta(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
+ InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
+ InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...)
+}
+
+// InDeltaSlice is the same as InDelta, except it compares two slices.
+func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
+ InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaSlicef is the same as InDelta, except it compares two slices.
+func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
+ InDeltaSlicef(a.t, expected, actual, delta, msg, args...)
+}
+
+// InDeltaf asserts that the two numerals are within delta of each other.
+//
+// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
+ InDeltaf(a.t, expected, actual, delta, msg, args...)
+}
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
+ InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
+}
+
+// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
+func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
+ InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...)
+}
+
+// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
+func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) {
+ InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...)
+}
+
+// InEpsilonf asserts that expected and actual have a relative error less than epsilon
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) {
+ InEpsilonf(a.t, expected, actual, epsilon, msg, args...)
+}
+
+// IsType asserts that the specified objects are of the same type.
+func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
+ IsType(a.t, expectedType, object, msgAndArgs...)
+}
+
+// IsTypef asserts that the specified objects are of the same type.
+func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) {
+ IsTypef(a.t, expectedType, object, msg, args...)
+}
+
+// JSONEq asserts that two JSON strings are equivalent.
+//
+// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) {
+ JSONEq(a.t, expected, actual, msgAndArgs...)
+}
+
+// JSONEqf asserts that two JSON strings are equivalent.
+//
+// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) {
+ JSONEqf(a.t, expected, actual, msg, args...)
+}
+
+// Len asserts that the specified object has specific length.
+// Len also fails if the object has a type that len() not accept.
+//
+// a.Len(mySlice, 3)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) {
+ Len(a.t, object, length, msgAndArgs...)
+}
+
+// Lenf asserts that the specified object has specific length.
+// Lenf also fails if the object has a type that len() not accept.
+//
+// a.Lenf(mySlice, 3, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) {
+ Lenf(a.t, object, length, msg, args...)
+}
+
+// Nil asserts that the specified object is nil.
+//
+// a.Nil(err)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) {
+ Nil(a.t, object, msgAndArgs...)
+}
+
+// Nilf asserts that the specified object is nil.
+//
+// a.Nilf(err, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) {
+ Nilf(a.t, object, msg, args...)
+}
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.NoError(err) {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) {
+ NoError(a.t, err, msgAndArgs...)
+}
+
+// NoErrorf asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.NoErrorf(err, "error message %s", "formatted") {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) {
+ NoErrorf(a.t, err, msg, args...)
+}
+
+// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// a.NotContains("Hello World", "Earth")
+// a.NotContains(["Hello", "World"], "Earth")
+// a.NotContains({"Hello": "World"}, "Earth")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) {
+ NotContains(a.t, s, contains, msgAndArgs...)
+}
+
+// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted")
+// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted")
+// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) {
+ NotContainsf(a.t, s, contains, msg, args...)
+}
+
+// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if a.NotEmpty(obj) {
+// assert.Equal(t, "two", obj[1])
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) {
+ NotEmpty(a.t, object, msgAndArgs...)
+}
+
+// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if a.NotEmptyf(obj, "error message %s", "formatted") {
+// assert.Equal(t, "two", obj[1])
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) {
+ NotEmptyf(a.t, object, msg, args...)
+}
+
+// NotEqual asserts that the specified values are NOT equal.
+//
+// a.NotEqual(obj1, obj2)
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+ NotEqual(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotEqualf asserts that the specified values are NOT equal.
+//
+// a.NotEqualf(obj1, obj2, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ NotEqualf(a.t, expected, actual, msg, args...)
+}
+
+// NotNil asserts that the specified object is not nil.
+//
+// a.NotNil(err)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) {
+ NotNil(a.t, object, msgAndArgs...)
+}
+
+// NotNilf asserts that the specified object is not nil.
+//
+// a.NotNilf(err, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) {
+ NotNilf(a.t, object, msg, args...)
+}
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// a.NotPanics(func(){ RemainCalm() })
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}) {
+ NotPanics(a.t, f, msgAndArgs...)
+}
+
+// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotPanicsf(f assert.PanicTestFunc, msg string, args ...interface{}) {
+ NotPanicsf(a.t, f, msg, args...)
+}
+
+// NotRegexp asserts that a specified regexp does not match a string.
+//
+// a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
+// a.NotRegexp("^start", "it's not starting")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) {
+ NotRegexp(a.t, rx, str, msgAndArgs...)
+}
+
+// NotRegexpf asserts that a specified regexp does not match a string.
+//
+// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) {
+ NotRegexpf(a.t, rx, str, msg, args...)
+}
+
+// NotSubset asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) {
+ NotSubset(a.t, list, subset, msgAndArgs...)
+}
+
+// NotSubsetf asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) {
+ NotSubsetf(a.t, list, subset, msg, args...)
+}
+
+// NotZero asserts that i is not the zero value for its type and returns the truth.
+func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) {
+ NotZero(a.t, i, msgAndArgs...)
+}
+
+// NotZerof asserts that i is not the zero value for its type and returns the truth.
+func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) {
+ NotZerof(a.t, i, msg, args...)
+}
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+//
+// a.Panics(func(){ GoCrazy() })
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) {
+ Panics(a.t, f, msgAndArgs...)
+}
+
+// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// a.PanicsWithValue("crazy error", func(){ GoCrazy() })
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) PanicsWithValue(expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
+ PanicsWithValue(a.t, expected, f, msgAndArgs...)
+}
+
+// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) PanicsWithValuef(expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) {
+ PanicsWithValuef(a.t, expected, f, msg, args...)
+}
+
+// Panicsf asserts that the code inside the specified PanicTestFunc panics.
+//
+// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Panicsf(f assert.PanicTestFunc, msg string, args ...interface{}) {
+ Panicsf(a.t, f, msg, args...)
+}
+
+// Regexp asserts that a specified regexp matches a string.
+//
+// a.Regexp(regexp.MustCompile("start"), "it's starting")
+// a.Regexp("start...$", "it's not starting")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) {
+ Regexp(a.t, rx, str, msgAndArgs...)
+}
+
+// Regexpf asserts that a specified regexp matches a string.
+//
+// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) {
+ Regexpf(a.t, rx, str, msg, args...)
+}
+
+// Subset asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) {
+ Subset(a.t, list, subset, msgAndArgs...)
+}
+
+// Subsetf asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) {
+ Subsetf(a.t, list, subset, msg, args...)
+}
+
+// True asserts that the specified value is true.
+//
+// a.True(myBool)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) True(value bool, msgAndArgs ...interface{}) {
+ True(a.t, value, msgAndArgs...)
+}
+
+// Truef asserts that the specified value is true.
+//
+// a.Truef(myBool, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Truef(value bool, msg string, args ...interface{}) {
+ Truef(a.t, value, msg, args...)
+}
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+//
+// a.WithinDuration(time.Now(), time.Now(), 10*time.Second)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) {
+ WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// WithinDurationf asserts that the two times are within duration delta of each other.
+//
+// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) {
+ WithinDurationf(a.t, expected, actual, delta, msg, args...)
+}
+
+// Zero asserts that i is the zero value for its type and returns the truth.
+func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) {
+ Zero(a.t, i, msgAndArgs...)
+}
+
+// Zerof asserts that i is the zero value for its type and returns the truth.
+func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) {
+ Zerof(a.t, i, msg, args...)
+}
diff --git a/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/require/requirements.go
new file mode 100644
index 000000000..e404f016d
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/vendor/github.com/stretchr/testify/require/requirements.go
@@ -0,0 +1,9 @@
+package require
+
+// TestingT is an interface wrapper around *testing.T
+type TestingT interface {
+ Errorf(format string, args ...interface{})
+ FailNow()
+}
+
+//go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl -include-format-funcs
diff --git a/vendor/github.com/stretchr/testify/.travis.gofmt.sh b/vendor/github.com/stretchr/testify/.travis.gofmt.sh
new file mode 100755
index 000000000..bfffdca8b
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/.travis.gofmt.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+if [ -n "$(gofmt -l .)" ]; then
+ echo "Go code is not formatted:"
+ gofmt -d .
+ exit 1
+fi
diff --git a/vendor/github.com/stretchr/testify/.travis.gogenerate.sh b/vendor/github.com/stretchr/testify/.travis.gogenerate.sh
new file mode 100755
index 000000000..161b449cd
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/.travis.gogenerate.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+if [[ "$TRAVIS_GO_VERSION" =~ ^1\.[45](\..*)?$ ]]; then
+ exit 0
+fi
+
+go get github.com/ernesto-jimenez/gogen/imports
+go generate ./...
+if [ -n "$(git diff)" ]; then
+ echo "Go generate had not been run"
+ git diff
+ exit 1
+fi
diff --git a/vendor/github.com/stretchr/testify/.travis.govet.sh b/vendor/github.com/stretchr/testify/.travis.govet.sh
new file mode 100755
index 000000000..f8fbba7a1
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/.travis.govet.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+cd "$(dirname $0)"
+DIRS=". assert require mock _codegen"
+set -e
+for subdir in $DIRS; do
+ pushd $subdir
+ go vet
+ popd
+done
diff --git a/vendor/github.com/stretchr/testify/.travis.yml b/vendor/github.com/stretchr/testify/.travis.yml
index ffb9e0ddb..b33dc9f1d 100644
--- a/vendor/github.com/stretchr/testify/.travis.yml
+++ b/vendor/github.com/stretchr/testify/.travis.yml
@@ -3,14 +3,13 @@ language: go
sudo: false
go:
- - 1.1
- - 1.2
- - 1.3
- - 1.4
- - 1.5
- - 1.6
- 1.7
+ - 1.8
+ - 1.9
- tip
script:
- - go test -v ./...
+ - ./.travis.gogenerate.sh
+ - ./.travis.gofmt.sh
+ - ./.travis.govet.sh
+ - go test -v -race ./...
diff --git a/vendor/github.com/stretchr/testify/Godeps/Godeps.json b/vendor/github.com/stretchr/testify/Godeps/Godeps.json
deleted file mode 100644
index df032ac31..000000000
--- a/vendor/github.com/stretchr/testify/Godeps/Godeps.json
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "ImportPath": "github.com/stretchr/testify",
- "GoVersion": "go1.5",
- "GodepVersion": "v74",
- "Packages": [
- "./..."
- ],
- "Deps": [
- {
- "ImportPath": "github.com/davecgh/go-spew/spew",
- "Comment": "v1.0.0-3-g6d21280",
- "Rev": "6d212800a42e8ab5c146b8ace3490ee17e5225f9"
- },
- {
- "ImportPath": "github.com/pmezard/go-difflib/difflib",
- "Rev": "d8ed2627bdf02c080bf22230dbb337003b7aba2d"
- },
- {
- "ImportPath": "github.com/stretchr/objx",
- "Rev": "cbeaeb16a013161a98496fad62933b1d21786672"
- }
- ]
-}
diff --git a/vendor/github.com/stretchr/testify/Godeps/Readme b/vendor/github.com/stretchr/testify/Godeps/Readme
deleted file mode 100644
index 4cdaa53d5..000000000
--- a/vendor/github.com/stretchr/testify/Godeps/Readme
+++ /dev/null
@@ -1,5 +0,0 @@
-This directory tree is generated automatically by godep.
-
-Please do not edit.
-
-See https://github.com/tools/godep for more information.
diff --git a/vendor/github.com/stretchr/testify/Gopkg.lock b/vendor/github.com/stretchr/testify/Gopkg.lock
new file mode 100644
index 000000000..f52deee57
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/Gopkg.lock
@@ -0,0 +1,25 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ name = "github.com/davecgh/go-spew"
+ packages = ["spew"]
+ revision = "346938d642f2ec3594ed81d874461961cd0faa76"
+ version = "v1.1.0"
+
+[[projects]]
+ name = "github.com/pmezard/go-difflib"
+ packages = ["difflib"]
+ revision = "d8ed2627bdf02c080bf22230dbb337003b7aba2d"
+
+[[projects]]
+ name = "github.com/stretchr/objx"
+ packages = ["."]
+ revision = "cbeaeb16a013161a98496fad62933b1d21786672"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ inputs-digest = "6bd8fb1f11a0d3df245fc01bd8853f6dac40b83457e780f7978ca30244647c7b"
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/vendor/github.com/stretchr/testify/Gopkg.toml b/vendor/github.com/stretchr/testify/Gopkg.toml
new file mode 100644
index 000000000..dac862384
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/Gopkg.toml
@@ -0,0 +1,26 @@
+
+# Gopkg.toml example
+#
+# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+# name = "github.com/user/project"
+# version = "1.0.0"
+#
+# [[constraint]]
+# name = "github.com/user/project2"
+# branch = "dev"
+# source = "github.com/myfork/project2"
+#
+# [[override]]
+# name = "github.com/x/y"
+# version = "2.4.0"
+
+
+[[constraint]]
+ name = "github.com/davecgh/go-spew"
+ version = ">=1.0.0, <=3.0.0-g6d21280"
diff --git a/vendor/github.com/stretchr/testify/_codegen/main.go b/vendor/github.com/stretchr/testify/_codegen/main.go
index 328009f84..2e5e8124f 100644
--- a/vendor/github.com/stretchr/testify/_codegen/main.go
+++ b/vendor/github.com/stretchr/testify/_codegen/main.go
@@ -1,5 +1,5 @@
// This program reads all assertion functions from the assert package and
-// automatically generates the corersponding requires and forwarded assertions
+// automatically generates the corresponding requires and forwarded assertions
package main
@@ -10,6 +10,7 @@ import (
"go/ast"
"go/build"
"go/doc"
+ "go/format"
"go/importer"
"go/parser"
"go/token"
@@ -19,6 +20,7 @@ import (
"log"
"os"
"path"
+ "regexp"
"strings"
"text/template"
@@ -27,6 +29,7 @@ import (
var (
pkg = flag.String("assert-path", "github.com/stretchr/testify/assert", "Path to the assert package")
+ includeF = flag.Bool("include-format-funcs", false, "include format functions such as Errorf and Equalf")
outputPkg = flag.String("output-package", "", "package for the resulting code")
tmplFile = flag.String("template", "", "What file to load the function template from")
out = flag.String("out", "", "What file to write the source code to")
@@ -77,13 +80,18 @@ func generateCode(importer imports.Importer, funcs []testFunc) error {
}
}
+ code, err := format.Source(buff.Bytes())
+ if err != nil {
+ return err
+ }
+
// Write file
output, err := outputFile()
if err != nil {
return err
}
defer output.Close()
- _, err = io.Copy(output, buff)
+ _, err = io.Copy(output, bytes.NewReader(code))
return err
}
@@ -133,7 +141,7 @@ func analyzeCode(scope *types.Scope, docs *doc.Package) (imports.Importer, []tes
if !ok {
continue
}
- // Check function signatuer has at least two arguments
+ // Check function signature has at least two arguments
sig := fn.Type().(*types.Signature)
if sig.Params().Len() < 2 {
continue
@@ -151,13 +159,18 @@ func analyzeCode(scope *types.Scope, docs *doc.Package) (imports.Importer, []tes
continue
}
+ // Skip functions ending with f
+ if strings.HasSuffix(fdocs.Name, "f") && !*includeF {
+ continue
+ }
+
funcs = append(funcs, testFunc{*outputPkg, fdocs, fn})
importer.AddImportsFrom(sig.Params())
}
return importer, funcs, nil
}
-// parsePackageSource returns the types scope and the package documentation from the pa
+// parsePackageSource returns the types scope and the package documentation from the package
func parsePackageSource(pkg string) (*types.Scope, *doc.Package, error) {
pd, err := build.Import(pkg, ".", 0)
if err != nil {
@@ -258,10 +271,26 @@ func (f *testFunc) ForwardedParams() string {
return p
}
+func (f *testFunc) ParamsFormat() string {
+ return strings.Replace(f.Params(), "msgAndArgs", "msg string, args", 1)
+}
+
+func (f *testFunc) ForwardedParamsFormat() string {
+ return strings.Replace(f.ForwardedParams(), "msgAndArgs", "append([]interface{}{msg}, args...)", 1)
+}
+
func (f *testFunc) Comment() string {
return "// " + strings.Replace(strings.TrimSpace(f.DocInfo.Doc), "\n", "\n// ", -1)
}
+func (f *testFunc) CommentFormat() string {
+ search := fmt.Sprintf("%s", f.DocInfo.Name)
+ replace := fmt.Sprintf("%sf", f.DocInfo.Name)
+ comment := strings.Replace(f.Comment(), search, replace, -1)
+ exp := regexp.MustCompile(replace + `\(((\(\)|[^)])+)\)`)
+ return exp.ReplaceAllString(comment, replace+`($1, "error message %s", "formatted")`)
+}
+
func (f *testFunc) CommentWithoutT(receiver string) string {
search := fmt.Sprintf("assert.%s(t, ", f.DocInfo.Name)
replace := fmt.Sprintf("%s.%s(", receiver, f.DocInfo.Name)
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
new file mode 100644
index 000000000..3e172f2ce
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -0,0 +1,405 @@
+/*
+* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
+* THIS FILE MUST NOT BE EDITED BY HAND
+ */
+
+package assert
+
+import (
+ http "net/http"
+ url "net/url"
+ time "time"
+)
+
+// Conditionf uses a Comparison to assert a complex condition.
+func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool {
+ return Condition(t, comp, append([]interface{}{msg}, args...)...)
+}
+
+// Containsf asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
+// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
+// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ return Contains(t, s, contains, append([]interface{}{msg}, args...)...)
+}
+
+// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ return DirExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted"))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+ return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
+}
+
+// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// assert.Emptyf(t, obj, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ return Empty(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// Equalf asserts that two objects are equal.
+//
+// assert.Equalf(t, 123, 123, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return Equal(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool {
+ return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...)
+}
+
+// EqualValuesf asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Errorf asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.Errorf(t, err, "error message %s", "formatted") {
+// assert.Equal(t, expectedErrorf, err)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
+ return Error(t, err, append([]interface{}{msg}, args...)...)
+}
+
+// Exactlyf asserts that two objects are equal in value and type.
+//
+// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Failf reports a failure through
+func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
+ return Fail(t, failureMessage, append([]interface{}{msg}, args...)...)
+}
+
+// FailNowf fails test
+func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
+ return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...)
+}
+
+// Falsef asserts that the specified value is false.
+//
+// assert.Falsef(t, myBool, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
+ return False(t, value, append([]interface{}{msg}, args...)...)
+}
+
+// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ return FileExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPBodyContainsf asserts that a specified handler returns a
+// body that contains a string.
+//
+// assert.HTTPBodyContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPBodyNotContainsf asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// assert.HTTPBodyNotContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPErrorf asserts that a specified handler returns an error status code.
+//
+// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPRedirectf asserts that a specified handler returns a redirect status code.
+//
+// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPSuccessf asserts that a specified handler returns a success status code.
+//
+// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// Implementsf asserts that an object is implemented by the specified interface.
+//
+// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+ return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaf asserts that the two numerals are within delta of each other.
+//
+// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaSlicef is the same as InDelta, except it compares two slices.
+func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InEpsilonf asserts that expected and actual have a relative error less than epsilon
+//
+// Returns whether the assertion was successful (true) or not (false).
+func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
+}
+
+// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
+func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
+}
+
+// IsTypef asserts that the specified objects are of the same type.
+func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
+ return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...)
+}
+
+// JSONEqf asserts that two JSON strings are equivalent.
+//
+// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
+ return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Lenf asserts that the specified object has specific length.
+// Lenf also fails if the object has a type that len() not accept.
+//
+// assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool {
+ return Len(t, object, length, append([]interface{}{msg}, args...)...)
+}
+
+// Nilf asserts that the specified object is nil.
+//
+// assert.Nilf(t, err, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ return Nil(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NoErrorf asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.NoErrorf(t, err, "error message %s", "formatted") {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
+ return NoError(t, err, append([]interface{}{msg}, args...)...)
+}
+
+// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
+// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
+// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ return NotContains(t, s, contains, append([]interface{}{msg}, args...)...)
+}
+
+// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
+// assert.Equal(t, "two", obj[1])
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ return NotEmpty(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NotEqualf asserts that the specified values are NOT equal.
+//
+// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// NotNilf asserts that the specified object is not nil.
+//
+// assert.NotNilf(t, err, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ return NotNil(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
+ return NotPanics(t, f, append([]interface{}{msg}, args...)...)
+}
+
+// NotRegexpf asserts that a specified regexp does not match a string.
+//
+// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...)
+}
+
+// NotSubsetf asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...)
+}
+
+// NotZerof asserts that i is not the zero value for its type and returns the truth.
+func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
+ return NotZero(t, i, append([]interface{}{msg}, args...)...)
+}
+
+// Panicsf asserts that the code inside the specified PanicTestFunc panics.
+//
+// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
+ return Panics(t, f, append([]interface{}{msg}, args...)...)
+}
+
+// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
+ return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...)
+}
+
+// Regexpf asserts that a specified regexp matches a string.
+//
+// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ return Regexp(t, rx, str, append([]interface{}{msg}, args...)...)
+}
+
+// Subsetf asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ return Subset(t, list, subset, append([]interface{}{msg}, args...)...)
+}
+
+// Truef asserts that the specified value is true.
+//
+// assert.Truef(t, myBool, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Truef(t TestingT, value bool, msg string, args ...interface{}) bool {
+ return True(t, value, append([]interface{}{msg}, args...)...)
+}
+
+// WithinDurationf asserts that the two times are within duration delta of each other.
+//
+// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
+ return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// Zerof asserts that i is the zero value for its type and returns the truth.
+func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
+ return Zero(t, i, append([]interface{}{msg}, args...)...)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
new file mode 100644
index 000000000..c5cc66f43
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
@@ -0,0 +1,4 @@
+{{.CommentFormat}}
+func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool {
+ return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}})
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index e6a796046..7c4f497bb 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -1,387 +1,798 @@
/*
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
* THIS FILE MUST NOT BE EDITED BY HAND
-*/
+ */
package assert
import (
-
http "net/http"
url "net/url"
time "time"
)
-
// Condition uses a Comparison to assert a complex condition.
func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool {
return Condition(a.t, comp, msgAndArgs...)
}
+// Conditionf uses a Comparison to assert a complex condition.
+func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool {
+ return Conditionf(a.t, comp, msg, args...)
+}
// Contains asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
-//
-// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'")
-// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
-// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
-//
+//
+// a.Contains("Hello World", "World")
+// a.Contains(["Hello", "World"], "World")
+// a.Contains({"Hello": "World"}, "Hello")
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
return Contains(a.t, s, contains, msgAndArgs...)
}
+// Containsf asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// a.Containsf("Hello World", "World", "error message %s", "formatted")
+// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted")
+// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ return Containsf(a.t, s, contains, msg, args...)
+}
+
+// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool {
+ return DirExists(a.t, path, msgAndArgs...)
+}
+
+// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool {
+ return DirExistsf(a.t, path, msg, args...)
+}
+
+// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool {
+ return ElementsMatch(a.t, listA, listB, msgAndArgs...)
+}
+
+// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted"))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+ return ElementsMatchf(a.t, listA, listB, msg, args...)
+}
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
// a slice or a channel with len == 0.
-//
+//
// a.Empty(obj)
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
return Empty(a.t, object, msgAndArgs...)
}
+// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// a.Emptyf(obj, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool {
+ return Emptyf(a.t, object, msg, args...)
+}
// Equal asserts that two objects are equal.
-//
-// a.Equal(123, 123, "123 and 123 should be equal")
-//
+//
+// a.Equal(123, 123)
+//
// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return Equal(a.t, expected, actual, msgAndArgs...)
}
-
// EqualError asserts that a function returned an error (i.e. not `nil`)
// and that it is equal to the provided error.
-//
+//
// actualObj, err := SomeFunction()
-// if assert.Error(t, err, "An error was expected") {
-// assert.Equal(t, err, expectedError)
-// }
-//
+// a.EqualError(err, expectedErrorString)
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
return EqualError(a.t, theError, errString, msgAndArgs...)
}
+// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool {
+ return EqualErrorf(a.t, theError, errString, msg, args...)
+}
// EqualValues asserts that two objects are equal or convertable to the same types
// and equal.
-//
-// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal")
-//
+//
+// a.EqualValues(uint32(123), int32(123))
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return EqualValues(a.t, expected, actual, msgAndArgs...)
}
+// EqualValuesf asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return EqualValuesf(a.t, expected, actual, msg, args...)
+}
+
+// Equalf asserts that two objects are equal.
+//
+// a.Equalf(123, 123, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return Equalf(a.t, expected, actual, msg, args...)
+}
// Error asserts that a function returned an error (i.e. not `nil`).
-//
+//
// actualObj, err := SomeFunction()
-// if a.Error(err, "An error was expected") {
-// assert.Equal(t, err, expectedError)
+// if a.Error(err) {
+// assert.Equal(t, expectedError, err)
// }
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
return Error(a.t, err, msgAndArgs...)
}
+// Errorf asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.Errorf(err, "error message %s", "formatted") {
+// assert.Equal(t, expectedErrorf, err)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool {
+ return Errorf(a.t, err, msg, args...)
+}
-// Exactly asserts that two objects are equal is value and type.
-//
-// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal")
-//
+// Exactly asserts that two objects are equal in value and type.
+//
+// a.Exactly(int32(123), int64(123))
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return Exactly(a.t, expected, actual, msgAndArgs...)
}
+// Exactlyf asserts that two objects are equal in value and type.
+//
+// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return Exactlyf(a.t, expected, actual, msg, args...)
+}
// Fail reports a failure through
func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool {
return Fail(a.t, failureMessage, msgAndArgs...)
}
-
// FailNow fails test
func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool {
return FailNow(a.t, failureMessage, msgAndArgs...)
}
+// FailNowf fails test
+func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool {
+ return FailNowf(a.t, failureMessage, msg, args...)
+}
+
+// Failf reports a failure through
+func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool {
+ return Failf(a.t, failureMessage, msg, args...)
+}
// False asserts that the specified value is false.
-//
-// a.False(myBool, "myBool should be false")
-//
+//
+// a.False(myBool)
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
return False(a.t, value, msgAndArgs...)
}
+// Falsef asserts that the specified value is false.
+//
+// a.Falsef(myBool, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool {
+ return Falsef(a.t, value, msg, args...)
+}
+
+// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool {
+ return FileExists(a.t, path, msgAndArgs...)
+}
+
+// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool {
+ return FileExistsf(a.t, path, msg, args...)
+}
// HTTPBodyContains asserts that a specified handler returns a
// body that contains a string.
-//
+//
// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
-//
+//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool {
- return HTTPBodyContains(a.t, handler, method, url, values, str)
+func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+ return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...)
}
+// HTTPBodyContainsf asserts that a specified handler returns a
+// body that contains a string.
+//
+// a.HTTPBodyContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...)
+}
// HTTPBodyNotContains asserts that a specified handler returns a
// body that does not contain a string.
-//
+//
// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
-//
+//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool {
- return HTTPBodyNotContains(a.t, handler, method, url, values, str)
+func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+ return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...)
}
+// HTTPBodyNotContainsf asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// a.HTTPBodyNotContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...)
+}
// HTTPError asserts that a specified handler returns an error status code.
-//
+//
// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
+//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) bool {
- return HTTPError(a.t, handler, method, url, values)
+func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ return HTTPError(a.t, handler, method, url, values, msgAndArgs...)
}
+// HTTPErrorf asserts that a specified handler returns an error status code.
+//
+// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ return HTTPErrorf(a.t, handler, method, url, values, msg, args...)
+}
// HTTPRedirect asserts that a specified handler returns a redirect status code.
-//
+//
// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
+//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) bool {
- return HTTPRedirect(a.t, handler, method, url, values)
+func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...)
}
+// HTTPRedirectf asserts that a specified handler returns a redirect status code.
+//
+// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ return HTTPRedirectf(a.t, handler, method, url, values, msg, args...)
+}
// HTTPSuccess asserts that a specified handler returns a success status code.
-//
+//
// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
-//
+//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) bool {
- return HTTPSuccess(a.t, handler, method, url, values)
+func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...)
}
+// HTTPSuccessf asserts that a specified handler returns a success status code.
+//
+// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ return HTTPSuccessf(a.t, handler, method, url, values, msg, args...)
+}
// Implements asserts that an object is implemented by the specified interface.
-//
-// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject")
+//
+// a.Implements((*MyInterface)(nil), new(MyObject))
func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
return Implements(a.t, interfaceObject, object, msgAndArgs...)
}
+// Implementsf asserts that an object is implemented by the specified interface.
+//
+// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+ return Implementsf(a.t, interfaceObject, object, msg, args...)
+}
// InDelta asserts that the two numerals are within delta of each other.
-//
+//
// a.InDelta(math.Pi, (22 / 7.0), 0.01)
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
return InDelta(a.t, expected, actual, delta, msgAndArgs...)
}
+// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...)
+}
// InDeltaSlice is the same as InDelta, except it compares two slices.
func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
}
+// InDeltaSlicef is the same as InDelta, except it compares two slices.
+func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ return InDeltaSlicef(a.t, expected, actual, delta, msg, args...)
+}
+
+// InDeltaf asserts that the two numerals are within delta of each other.
+//
+// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ return InDeltaf(a.t, expected, actual, delta, msg, args...)
+}
// InEpsilon asserts that expected and actual have a relative error less than epsilon
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
}
+// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
+func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...)
+}
-// InEpsilonSlice is the same as InEpsilon, except it compares two slices.
-func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
- return InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...)
+// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
+func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...)
}
+// InEpsilonf asserts that expected and actual have a relative error less than epsilon
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ return InEpsilonf(a.t, expected, actual, epsilon, msg, args...)
+}
// IsType asserts that the specified objects are of the same type.
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
return IsType(a.t, expectedType, object, msgAndArgs...)
}
+// IsTypef asserts that the specified objects are of the same type.
+func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
+ return IsTypef(a.t, expectedType, object, msg, args...)
+}
// JSONEq asserts that two JSON strings are equivalent.
-//
+//
// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool {
return JSONEq(a.t, expected, actual, msgAndArgs...)
}
+// JSONEqf asserts that two JSON strings are equivalent.
+//
+// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool {
+ return JSONEqf(a.t, expected, actual, msg, args...)
+}
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
-//
-// a.Len(mySlice, 3, "The size of slice is not 3")
-//
+//
+// a.Len(mySlice, 3)
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {
return Len(a.t, object, length, msgAndArgs...)
}
+// Lenf asserts that the specified object has specific length.
+// Lenf also fails if the object has a type that len() not accept.
+//
+// a.Lenf(mySlice, 3, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool {
+ return Lenf(a.t, object, length, msg, args...)
+}
// Nil asserts that the specified object is nil.
-//
-// a.Nil(err, "err should be nothing")
-//
+//
+// a.Nil(err)
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
return Nil(a.t, object, msgAndArgs...)
}
+// Nilf asserts that the specified object is nil.
+//
+// a.Nilf(err, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool {
+ return Nilf(a.t, object, msg, args...)
+}
// NoError asserts that a function returned no error (i.e. `nil`).
-//
+//
// actualObj, err := SomeFunction()
// if a.NoError(err) {
-// assert.Equal(t, actualObj, expectedObj)
+// assert.Equal(t, expectedObj, actualObj)
// }
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {
return NoError(a.t, err, msgAndArgs...)
}
+// NoErrorf asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.NoErrorf(err, "error message %s", "formatted") {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool {
+ return NoErrorf(a.t, err, msg, args...)
+}
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
-//
-// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
-// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
-// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
-//
+//
+// a.NotContains("Hello World", "Earth")
+// a.NotContains(["Hello", "World"], "Earth")
+// a.NotContains({"Hello": "World"}, "Earth")
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
return NotContains(a.t, s, contains, msgAndArgs...)
}
+// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted")
+// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted")
+// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ return NotContainsf(a.t, s, contains, msg, args...)
+}
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
-//
+//
// if a.NotEmpty(obj) {
// assert.Equal(t, "two", obj[1])
// }
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool {
return NotEmpty(a.t, object, msgAndArgs...)
}
+// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if a.NotEmptyf(obj, "error message %s", "formatted") {
+// assert.Equal(t, "two", obj[1])
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool {
+ return NotEmptyf(a.t, object, msg, args...)
+}
// NotEqual asserts that the specified values are NOT equal.
-//
-// a.NotEqual(obj1, obj2, "two objects shouldn't be equal")
-//
+//
+// a.NotEqual(obj1, obj2)
+//
// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return NotEqual(a.t, expected, actual, msgAndArgs...)
}
+// NotEqualf asserts that the specified values are NOT equal.
+//
+// a.NotEqualf(obj1, obj2, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return NotEqualf(a.t, expected, actual, msg, args...)
+}
// NotNil asserts that the specified object is not nil.
-//
-// a.NotNil(err, "err should be something")
-//
+//
+// a.NotNil(err)
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {
return NotNil(a.t, object, msgAndArgs...)
}
+// NotNilf asserts that the specified object is not nil.
+//
+// a.NotNilf(err, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool {
+ return NotNilf(a.t, object, msg, args...)
+}
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
-//
-// a.NotPanics(func(){
-// RemainCalm()
-// }, "Calling RemainCalm() should NOT panic")
-//
+//
+// a.NotPanics(func(){ RemainCalm() })
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
return NotPanics(a.t, f, msgAndArgs...)
}
+// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
+ return NotPanicsf(a.t, f, msg, args...)
+}
// NotRegexp asserts that a specified regexp does not match a string.
-//
+//
// a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
// a.NotRegexp("^start", "it's not starting")
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
return NotRegexp(a.t, rx, str, msgAndArgs...)
}
+// NotRegexpf asserts that a specified regexp does not match a string.
+//
+// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ return NotRegexpf(a.t, rx, str, msg, args...)
+}
+
+// NotSubset asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
+ return NotSubset(a.t, list, subset, msgAndArgs...)
+}
+
+// NotSubsetf asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ return NotSubsetf(a.t, list, subset, msg, args...)
+}
// NotZero asserts that i is not the zero value for its type and returns the truth.
func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool {
return NotZero(a.t, i, msgAndArgs...)
}
+// NotZerof asserts that i is not the zero value for its type and returns the truth.
+func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool {
+ return NotZerof(a.t, i, msg, args...)
+}
// Panics asserts that the code inside the specified PanicTestFunc panics.
-//
-// a.Panics(func(){
-// GoCrazy()
-// }, "Calling GoCrazy() should panic")
-//
+//
+// a.Panics(func(){ GoCrazy() })
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
return Panics(a.t, f, msgAndArgs...)
}
+// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// a.PanicsWithValue("crazy error", func(){ GoCrazy() })
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ return PanicsWithValue(a.t, expected, f, msgAndArgs...)
+}
+
+// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
+ return PanicsWithValuef(a.t, expected, f, msg, args...)
+}
+
+// Panicsf asserts that the code inside the specified PanicTestFunc panics.
+//
+// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
+ return Panicsf(a.t, f, msg, args...)
+}
// Regexp asserts that a specified regexp matches a string.
-//
+//
// a.Regexp(regexp.MustCompile("start"), "it's starting")
// a.Regexp("start...$", "it's not starting")
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
return Regexp(a.t, rx, str, msgAndArgs...)
}
+// Regexpf asserts that a specified regexp matches a string.
+//
+// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ return Regexpf(a.t, rx, str, msg, args...)
+}
+
+// Subset asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
+ return Subset(a.t, list, subset, msgAndArgs...)
+}
+
+// Subsetf asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ return Subsetf(a.t, list, subset, msg, args...)
+}
// True asserts that the specified value is true.
-//
-// a.True(myBool, "myBool should be true")
-//
+//
+// a.True(myBool)
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
return True(a.t, value, msgAndArgs...)
}
+// Truef asserts that the specified value is true.
+//
+// a.Truef(myBool, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool {
+ return Truef(a.t, value, msg, args...)
+}
// WithinDuration asserts that the two times are within duration delta of each other.
-//
-// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
-//
+//
+// a.WithinDuration(time.Now(), time.Now(), 10*time.Second)
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
return WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
}
+// WithinDurationf asserts that the two times are within duration delta of each other.
+//
+// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
+ return WithinDurationf(a.t, expected, actual, delta, msg, args...)
+}
// Zero asserts that i is the zero value for its type and returns the truth.
func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
return Zero(a.t, i, msgAndArgs...)
}
+
+// Zerof asserts that i is the zero value for its type and returns the truth.
+func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool {
+ return Zerof(a.t, i, msg, args...)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
index b3f4e170d..9d387bc77 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -4,8 +4,10 @@ import (
"bufio"
"bytes"
"encoding/json"
+ "errors"
"fmt"
"math"
+ "os"
"reflect"
"regexp"
"runtime"
@@ -18,9 +20,7 @@ import (
"github.com/pmezard/go-difflib/difflib"
)
-func init() {
- spew.Config.SortKeys = true
-}
+//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl
// TestingT is an interface wrapper around *testing.T
type TestingT interface {
@@ -42,7 +42,15 @@ func ObjectsAreEqual(expected, actual interface{}) bool {
if expected == nil || actual == nil {
return expected == actual
}
-
+ if exp, ok := expected.([]byte); ok {
+ act, ok := actual.([]byte)
+ if !ok {
+ return false
+ } else if exp == nil || act == nil {
+ return exp == nil && act == nil
+ }
+ return bytes.Equal(exp, act)
+ }
return reflect.DeepEqual(expected, actual)
}
@@ -112,10 +120,12 @@ func CallerInfo() []string {
}
parts := strings.Split(file, "/")
- dir := parts[len(parts)-2]
file = parts[len(parts)-1]
- if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
- callers = append(callers, fmt.Sprintf("%s:%d", file, line))
+ if len(parts) > 1 {
+ dir := parts[len(parts)-2]
+ if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
+ callers = append(callers, fmt.Sprintf("%s:%d", file, line))
+ }
}
// Drop the package
@@ -157,7 +167,7 @@ func getWhitespaceString() string {
parts := strings.Split(file, "/")
file = parts[len(parts)-1]
- return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line)))
+ return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line)))
}
@@ -174,22 +184,18 @@ func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
return ""
}
-// Indents all lines of the message by appending a number of tabs to each line, in an output format compatible with Go's
-// test printing (see inner comment for specifics)
-func indentMessageLines(message string, tabs int) string {
+// Aligns the provided message so that all lines after the first line start at the same location as the first line.
+// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab).
+// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the
+// basis on which the alignment occurs).
+func indentMessageLines(message string, longestLabelLen int) string {
outBuf := new(bytes.Buffer)
for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ {
+ // no need to align first line because it starts at the correct location (after the label)
if i != 0 {
- outBuf.WriteRune('\n')
- }
- for ii := 0; ii < tabs; ii++ {
- outBuf.WriteRune('\t')
- // Bizarrely, all lines except the first need one fewer tabs prepended, so deliberately advance the counter
- // by 1 prematurely.
- if ii == 0 && i > 0 {
- ii++
- }
+ // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab
+ outBuf.WriteString("\n\r\t" + strings.Repeat(" ", longestLabelLen+1) + "\t")
}
outBuf.WriteString(scanner.Text())
}
@@ -221,42 +227,70 @@ func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool
// Fail reports a failure through
func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
+ content := []labeledContent{
+ {"Error Trace", strings.Join(CallerInfo(), "\n\r\t\t\t")},
+ {"Error", failureMessage},
+ }
- message := messageFromMsgAndArgs(msgAndArgs...)
+ // Add test name if the Go version supports it
+ if n, ok := t.(interface {
+ Name() string
+ }); ok {
+ content = append(content, labeledContent{"Test", n.Name()})
+ }
- errorTrace := strings.Join(CallerInfo(), "\n\r\t\t\t")
+ message := messageFromMsgAndArgs(msgAndArgs...)
if len(message) > 0 {
- t.Errorf("\r%s\r\tError Trace:\t%s\n"+
- "\r\tError:%s\n"+
- "\r\tMessages:\t%s\n\r",
- getWhitespaceString(),
- errorTrace,
- indentMessageLines(failureMessage, 2),
- message)
- } else {
- t.Errorf("\r%s\r\tError Trace:\t%s\n"+
- "\r\tError:%s\n\r",
- getWhitespaceString(),
- errorTrace,
- indentMessageLines(failureMessage, 2))
+ content = append(content, labeledContent{"Messages", message})
}
+ t.Errorf("%s", "\r"+getWhitespaceString()+labeledOutput(content...))
+
return false
}
+type labeledContent struct {
+ label string
+ content string
+}
+
+// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner:
+//
+// \r\t{{label}}:{{align_spaces}}\t{{content}}\n
+//
+// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label.
+// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this
+// alignment is achieved, "\t{{content}}\n" is added for the output.
+//
+// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line.
+func labeledOutput(content ...labeledContent) string {
+ longestLabel := 0
+ for _, v := range content {
+ if len(v.label) > longestLabel {
+ longestLabel = len(v.label)
+ }
+ }
+ var output string
+ for _, v := range content {
+ output += "\r\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n"
+ }
+ return output
+}
+
// Implements asserts that an object is implemented by the specified interface.
//
-// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject")
+// assert.Implements(t, (*MyInterface)(nil), new(MyObject))
func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
-
interfaceType := reflect.TypeOf(interfaceObject).Elem()
+ if object == nil {
+ return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...)
+ }
if !reflect.TypeOf(object).Implements(interfaceType) {
return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...)
}
return true
-
}
// IsType asserts that the specified objects are of the same type.
@@ -271,16 +305,25 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs
// Equal asserts that two objects are equal.
//
-// assert.Equal(t, 123, 123, "123 and 123 should be equal")
+// assert.Equal(t, 123, 123)
//
// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if err := validateEqualArgs(expected, actual); err != nil {
+ return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)",
+ expected, actual, err), msgAndArgs...)
+ }
if !ObjectsAreEqual(expected, actual) {
diff := diff(expected, actual)
expected, actual = formatUnequalValues(expected, actual)
- return Fail(t, fmt.Sprintf("Not equal: %s (expected)\n"+
- " != %s (actual)%s", expected, actual, diff), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("Not equal: \n"+
+ "expected: %s\n"+
+ "actual : %s%s", expected, actual, diff), msgAndArgs...)
}
return true
@@ -294,51 +337,38 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{})
// with the type name, and the value will be enclosed in parenthesis similar
// to a type conversion in the Go grammar.
func formatUnequalValues(expected, actual interface{}) (e string, a string) {
- aType := reflect.TypeOf(expected)
- bType := reflect.TypeOf(actual)
-
- if aType != bType && isNumericType(aType) && isNumericType(bType) {
- return fmt.Sprintf("%v(%#v)", aType, expected),
- fmt.Sprintf("%v(%#v)", bType, actual)
+ if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
+ return fmt.Sprintf("%T(%#v)", expected, expected),
+ fmt.Sprintf("%T(%#v)", actual, actual)
}
return fmt.Sprintf("%#v", expected),
fmt.Sprintf("%#v", actual)
}
-func isNumericType(t reflect.Type) bool {
- switch t.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return true
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return true
- case reflect.Float32, reflect.Float64:
- return true
- }
-
- return false
-}
-
// EqualValues asserts that two objects are equal or convertable to the same types
// and equal.
//
-// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal")
+// assert.EqualValues(t, uint32(123), int32(123))
//
// Returns whether the assertion was successful (true) or not (false).
func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
if !ObjectsAreEqualValues(expected, actual) {
- return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+
- " != %#v (actual)", expected, actual), msgAndArgs...)
+ diff := diff(expected, actual)
+ expected, actual = formatUnequalValues(expected, actual)
+ return Fail(t, fmt.Sprintf("Not equal: \n"+
+ "expected: %s\n"+
+ "actual : %s%s", expected, actual, diff), msgAndArgs...)
}
return true
}
-// Exactly asserts that two objects are equal is value and type.
+// Exactly asserts that two objects are equal in value and type.
//
-// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal")
+// assert.Exactly(t, int32(123), int64(123))
//
// Returns whether the assertion was successful (true) or not (false).
func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
@@ -356,7 +386,7 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
// NotNil asserts that the specified object is not nil.
//
-// assert.NotNil(t, err, "err should be something")
+// assert.NotNil(t, err)
//
// Returns whether the assertion was successful (true) or not (false).
func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
@@ -383,7 +413,7 @@ func isNil(object interface{}) bool {
// Nil asserts that the specified object is nil.
//
-// assert.Nil(t, err, "err should be nothing")
+// assert.Nil(t, err)
//
// Returns whether the assertion was successful (true) or not (false).
func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
@@ -393,66 +423,32 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...)
}
-var numericZeros = []interface{}{
- int(0),
- int8(0),
- int16(0),
- int32(0),
- int64(0),
- uint(0),
- uint8(0),
- uint16(0),
- uint32(0),
- uint64(0),
- float32(0),
- float64(0),
-}
-
// isEmpty gets whether the specified object is considered empty or not.
func isEmpty(object interface{}) bool {
+ // get nil case out of the way
if object == nil {
return true
- } else if object == "" {
- return true
- } else if object == false {
- return true
- }
-
- for _, v := range numericZeros {
- if object == v {
- return true
- }
}
objValue := reflect.ValueOf(object)
switch objValue.Kind() {
- case reflect.Map:
- fallthrough
- case reflect.Slice, reflect.Chan:
- {
- return (objValue.Len() == 0)
- }
- case reflect.Struct:
- switch object.(type) {
- case time.Time:
- return object.(time.Time).IsZero()
- }
+ // collection types are empty when they have no element
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ return objValue.Len() == 0
+ // pointers are empty if nil or if the value they point to is empty
case reflect.Ptr:
- {
- if objValue.IsNil() {
- return true
- }
- switch object.(type) {
- case *time.Time:
- return object.(*time.Time).IsZero()
- default:
- return false
- }
+ if objValue.IsNil() {
+ return true
}
+ deref := objValue.Elem().Interface()
+ return isEmpty(deref)
+ // for all other types, compare against the zero value
+ default:
+ zero := reflect.Zero(objValue.Type())
+ return reflect.DeepEqual(object, zero.Interface())
}
- return false
}
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
@@ -506,7 +502,7 @@ func getLen(x interface{}) (ok bool, length int) {
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
//
-// assert.Len(t, mySlice, 3, "The size of slice is not 3")
+// assert.Len(t, mySlice, 3)
//
// Returns whether the assertion was successful (true) or not (false).
func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool {
@@ -523,7 +519,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{})
// True asserts that the specified value is true.
//
-// assert.True(t, myBool, "myBool should be true")
+// assert.True(t, myBool)
//
// Returns whether the assertion was successful (true) or not (false).
func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
@@ -538,7 +534,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
// False asserts that the specified value is false.
//
-// assert.False(t, myBool, "myBool should be false")
+// assert.False(t, myBool)
//
// Returns whether the assertion was successful (true) or not (false).
func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
@@ -553,10 +549,17 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
// NotEqual asserts that the specified values are NOT equal.
//
-// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal")
+// assert.NotEqual(t, obj1, obj2)
//
// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if err := validateEqualArgs(expected, actual); err != nil {
+ return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)",
+ expected, actual, err), msgAndArgs...)
+ }
if ObjectsAreEqual(expected, actual) {
return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...)
@@ -607,9 +610,9 @@ func includeElement(list interface{}, element interface{}) (ok, found bool) {
// Contains asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
//
-// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'")
-// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
-// assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
+// assert.Contains(t, "Hello World", "World")
+// assert.Contains(t, ["Hello", "World"], "World")
+// assert.Contains(t, {"Hello": "World"}, "Hello")
//
// Returns whether the assertion was successful (true) or not (false).
func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
@@ -629,9 +632,9 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
-// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
-// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
-// assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
+// assert.NotContains(t, "Hello World", "Earth")
+// assert.NotContains(t, ["Hello", "World"], "Earth")
+// assert.NotContains(t, {"Hello": "World"}, "Earth")
//
// Returns whether the assertion was successful (true) or not (false).
func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
@@ -648,6 +651,148 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{})
}
+// Subset asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if subset == nil {
+ return true // we consider nil to be equal to the nil set
+ }
+
+ subsetValue := reflect.ValueOf(subset)
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ }
+ }()
+
+ listKind := reflect.TypeOf(list).Kind()
+ subsetKind := reflect.TypeOf(subset).Kind()
+
+ if listKind != reflect.Array && listKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
+ }
+
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
+ }
+
+ for i := 0; i < subsetValue.Len(); i++ {
+ element := subsetValue.Index(i).Interface()
+ ok, found := includeElement(list, element)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+ }
+ if !found {
+ return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...)
+ }
+ }
+
+ return true
+}
+
+// NotSubset asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if subset == nil {
+ return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...)
+ }
+
+ subsetValue := reflect.ValueOf(subset)
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ }
+ }()
+
+ listKind := reflect.TypeOf(list).Kind()
+ subsetKind := reflect.TypeOf(subset).Kind()
+
+ if listKind != reflect.Array && listKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
+ }
+
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
+ }
+
+ for i := 0; i < subsetValue.Len(); i++ {
+ element := subsetValue.Index(i).Interface()
+ ok, found := includeElement(list, element)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+ }
+ if !found {
+ return true
+ }
+ }
+
+ return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
+}
+
+// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if isEmpty(listA) && isEmpty(listB) {
+ return true
+ }
+
+ aKind := reflect.TypeOf(listA).Kind()
+ bKind := reflect.TypeOf(listB).Kind()
+
+ if aKind != reflect.Array && aKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...)
+ }
+
+ if bKind != reflect.Array && bKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...)
+ }
+
+ aValue := reflect.ValueOf(listA)
+ bValue := reflect.ValueOf(listB)
+
+ aLen := aValue.Len()
+ bLen := bValue.Len()
+
+ if aLen != bLen {
+ return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...)
+ }
+
+ // Mark indexes in bValue that we already used
+ visited := make([]bool, bLen)
+ for i := 0; i < aLen; i++ {
+ element := aValue.Index(i).Interface()
+ found := false
+ for j := 0; j < bLen; j++ {
+ if visited[j] {
+ continue
+ }
+ if ObjectsAreEqual(bValue.Index(j).Interface(), element) {
+ visited[j] = true
+ found = true
+ break
+ }
+ }
+ if !found {
+ return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...)
+ }
+ }
+
+ return true
+}
+
// Condition uses a Comparison to assert a complex condition.
func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
result := comp()
@@ -685,9 +830,7 @@ func didPanic(f PanicTestFunc) (bool, interface{}) {
// Panics asserts that the code inside the specified PanicTestFunc panics.
//
-// assert.Panics(t, func(){
-// GoCrazy()
-// }, "Calling GoCrazy() should panic")
+// assert.Panics(t, func(){ GoCrazy() })
//
// Returns whether the assertion was successful (true) or not (false).
func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
@@ -699,11 +842,28 @@ func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
return true
}
+// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
+//
+// Returns whether the assertion was successful (true) or not (false).
+func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+
+ funcDidPanic, panicValue := didPanic(f)
+ if !funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...)
+ }
+ if panicValue != expected {
+ return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%v\n\r\tPanic value:\t%v", f, expected, panicValue), msgAndArgs...)
+ }
+
+ return true
+}
+
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
//
-// assert.NotPanics(t, func(){
-// RemainCalm()
-// }, "Calling RemainCalm() should NOT panic")
+// assert.NotPanics(t, func(){ RemainCalm() })
//
// Returns whether the assertion was successful (true) or not (false).
func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
@@ -717,7 +877,7 @@ func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
// WithinDuration asserts that the two times are within duration delta of each other.
//
-// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
+// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
//
// Returns whether the assertion was successful (true) or not (false).
func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
@@ -757,6 +917,8 @@ func toFloat(x interface{}) (float64, bool) {
xf = float64(xn)
case float64:
xf = float64(xn)
+ case time.Duration:
+ xf = float64(xn)
default:
xok = false
}
@@ -779,7 +941,7 @@ func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs
}
if math.IsNaN(af) {
- return Fail(t, fmt.Sprintf("Actual must not be NaN"), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...)
}
if math.IsNaN(bf) {
@@ -806,7 +968,7 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn
expectedSlice := reflect.ValueOf(expected)
for i := 0; i < actualSlice.Len(); i++ {
- result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta)
+ result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...)
if !result {
return result
}
@@ -815,6 +977,47 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn
return true
}
+// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if expected == nil || actual == nil ||
+ reflect.TypeOf(actual).Kind() != reflect.Map ||
+ reflect.TypeOf(expected).Kind() != reflect.Map {
+ return Fail(t, "Arguments must be maps", msgAndArgs...)
+ }
+
+ expectedMap := reflect.ValueOf(expected)
+ actualMap := reflect.ValueOf(actual)
+
+ if expectedMap.Len() != actualMap.Len() {
+ return Fail(t, "Arguments must have the same numbe of keys", msgAndArgs...)
+ }
+
+ for _, k := range expectedMap.MapKeys() {
+ ev := expectedMap.MapIndex(k)
+ av := actualMap.MapIndex(k)
+
+ if !ev.IsValid() {
+ return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...)
+ }
+
+ if !av.IsValid() {
+ return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...)
+ }
+
+ if !InDelta(
+ t,
+ ev.Interface(),
+ av.Interface(),
+ delta,
+ msgAndArgs...,
+ ) {
+ return false
+ }
+ }
+
+ return true
+}
+
func calcRelativeError(expected, actual interface{}) (float64, error) {
af, aok := toFloat(expected)
if !aok {
@@ -825,7 +1028,7 @@ func calcRelativeError(expected, actual interface{}) (float64, error) {
}
bf, bok := toFloat(actual)
if !bok {
- return 0, fmt.Errorf("expected value %q cannot be converted to float", actual)
+ return 0, fmt.Errorf("actual value %q cannot be converted to float", actual)
}
return math.Abs(af-bf) / math.Abs(af), nil
@@ -841,7 +1044,7 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd
}
if actualEpsilon > epsilon {
return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+
- " < %#v (actual)", actualEpsilon, epsilon), msgAndArgs...)
+ " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...)
}
return true
@@ -876,13 +1079,13 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m
//
// actualObj, err := SomeFunction()
// if assert.NoError(t, err) {
-// assert.Equal(t, actualObj, expectedObj)
+// assert.Equal(t, expectedObj, actualObj)
// }
//
// Returns whether the assertion was successful (true) or not (false).
func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
if err != nil {
- return Fail(t, fmt.Sprintf("Received unexpected error %+v", err), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...)
}
return true
@@ -891,8 +1094,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
// Error asserts that a function returned an error (i.e. not `nil`).
//
// actualObj, err := SomeFunction()
-// if assert.Error(t, err, "An error was expected") {
-// assert.Equal(t, err, expectedError)
+// if assert.Error(t, err) {
+// assert.Equal(t, expectedError, err)
// }
//
// Returns whether the assertion was successful (true) or not (false).
@@ -909,18 +1112,22 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
// and that it is equal to the provided error.
//
// actualObj, err := SomeFunction()
-// assert.EqualError(t, err, expectedErrorString, "An error was expected")
+// assert.EqualError(t, err, expectedErrorString)
//
// Returns whether the assertion was successful (true) or not (false).
func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool {
-
- message := messageFromMsgAndArgs(msgAndArgs...)
- if !NotNil(t, theError, "An error is expected but got nil. %s", message) {
+ if !Error(t, theError, msgAndArgs...) {
return false
}
- s := "An error with value \"%s\" is expected but got \"%s\". %s"
- return Equal(t, errString, theError.Error(),
- s, errString, theError.Error(), message)
+ expected := errString
+ actual := theError.Error()
+ // don't need to use deep equals here, we know they are both strings
+ if expected != actual {
+ return Fail(t, fmt.Sprintf("Error message not equal:\n"+
+ "expected: %q\n"+
+ "actual : %q", expected, actual), msgAndArgs...)
+ }
+ return true
}
// matchRegexp return true if a specified regexp matches a string.
@@ -987,6 +1194,36 @@ func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
return true
}
+// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ info, err := os.Lstat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...)
+ }
+ return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...)
+ }
+ if info.IsDir() {
+ return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...)
+ }
+ return true
+}
+
+// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ info, err := os.Lstat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...)
+ }
+ return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...)
+ }
+ if !info.IsDir() {
+ return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...)
+ }
+ return true
+}
+
// JSONEq asserts that two JSON strings are equivalent.
//
// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
@@ -1035,8 +1272,8 @@ func diff(expected interface{}, actual interface{}) string {
return ""
}
- e := spew.Sdump(expected)
- a := spew.Sdump(actual)
+ e := spewConfig.Sdump(expected)
+ a := spewConfig.Sdump(actual)
diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
A: difflib.SplitLines(e),
@@ -1050,3 +1287,26 @@ func diff(expected interface{}, actual interface{}) string {
return "\n\nDiff:\n" + diff
}
+
+// validateEqualArgs checks whether provided arguments can be safely used in the
+// Equal/NotEqual functions.
+func validateEqualArgs(expected, actual interface{}) error {
+ if isFunction(expected) || isFunction(actual) {
+ return errors.New("cannot take func type as argument")
+ }
+ return nil
+}
+
+func isFunction(arg interface{}) bool {
+ if arg == nil {
+ return false
+ }
+ return reflect.TypeOf(arg).Kind() == reflect.Func
+}
+
+var spewConfig = spew.ConfigState{
+ Indent: " ",
+ DisablePointerAddresses: true,
+ DisableCapacities: true,
+ SortKeys: true,
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertions_test.go b/vendor/github.com/stretchr/testify/assert/assertions_test.go
index ac9b70172..6757bd138 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions_test.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions_test.go
@@ -1,12 +1,16 @@
package assert
import (
+ "bytes"
"errors"
+ "fmt"
"io"
"math"
"os"
"reflect"
"regexp"
+ "runtime"
+ "strings"
"testing"
"time"
)
@@ -151,6 +155,9 @@ func TestImplements(t *testing.T) {
if Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) {
t.Error("Implements method should return false: AssertionTesterNonConformingObject does not implements AssertionTesterInterface")
}
+ if Implements(mockT, (*AssertionTesterInterface)(nil), nil) {
+ t.Error("Implements method should return false: nil does not implement AssertionTesterInterface")
+ }
}
@@ -192,7 +199,71 @@ func TestEqual(t *testing.T) {
if !Equal(mockT, uint64(123), uint64(123)) {
t.Error("Equal should return true")
}
+ if !Equal(mockT, &struct{}{}, &struct{}{}) {
+ t.Error("Equal should return true (pointer equality is based on equality of underlying value)")
+ }
+ var m map[string]interface{}
+ if Equal(mockT, m["bar"], "something") {
+ t.Error("Equal should return false")
+ }
+}
+// bufferT implements TestingT. Its implementation of Errorf writes the output that would be produced by
+// testing.T.Errorf to an internal bytes.Buffer.
+type bufferT struct {
+ buf bytes.Buffer
+}
+
+func (t *bufferT) Errorf(format string, args ...interface{}) {
+ // implementation of decorate is copied from testing.T
+ decorate := func(s string) string {
+ _, file, line, ok := runtime.Caller(3) // decorate + log + public function.
+ if ok {
+ // Truncate file name at last file name separator.
+ if index := strings.LastIndex(file, "/"); index >= 0 {
+ file = file[index+1:]
+ } else if index = strings.LastIndex(file, "\\"); index >= 0 {
+ file = file[index+1:]
+ }
+ } else {
+ file = "???"
+ line = 1
+ }
+ buf := new(bytes.Buffer)
+ // Every line is indented at least one tab.
+ buf.WriteByte('\t')
+ fmt.Fprintf(buf, "%s:%d: ", file, line)
+ lines := strings.Split(s, "\n")
+ if l := len(lines); l > 1 && lines[l-1] == "" {
+ lines = lines[:l-1]
+ }
+ for i, line := range lines {
+ if i > 0 {
+ // Second and subsequent lines are indented an extra tab.
+ buf.WriteString("\n\t\t")
+ }
+ buf.WriteString(line)
+ }
+ buf.WriteByte('\n')
+ return buf.String()
+ }
+ t.buf.WriteString(decorate(fmt.Sprintf(format, args...)))
+}
+
+func TestEqualFormatting(t *testing.T) {
+ for i, currCase := range []struct {
+ equalWant string
+ equalGot string
+ msgAndArgs []interface{}
+ want string
+ }{
+ {equalWant: "want", equalGot: "got", want: "\tassertions.go:[0-9]+: \r \r\tError Trace:\t\n\t\t\r\tError: \tNot equal: \n\t\t\r\t \texpected: \"want\"\n\t\t\r\t \tactual : \"got\"\n"},
+ {equalWant: "want", equalGot: "got", msgAndArgs: []interface{}{"hello, %v!", "world"}, want: "\tassertions.go:[0-9]+: \r \r\tError Trace:\t\n\t\t\r\tError: \tNot equal: \n\t\t\r\t \texpected: \"want\"\n\t\t\r\t \tactual : \"got\"\n\t\t\r\tMessages: \thello, world!\n"},
+ } {
+ mockT := &bufferT{}
+ Equal(mockT, currCase.equalWant, currCase.equalGot, currCase.msgAndArgs...)
+ Regexp(t, regexp.MustCompile(currCase.want), mockT.buf.String(), "Case %d", i)
+ }
}
func TestFormatUnequalValues(t *testing.T) {
@@ -208,6 +279,10 @@ func TestFormatUnequalValues(t *testing.T) {
Equal(t, `int64(123)`, expected, "value should include type")
Equal(t, `int32(123)`, actual, "value should include type")
+ expected, actual = formatUnequalValues(int64(123), nil)
+ Equal(t, `int64(123)`, expected, "value should include type")
+ Equal(t, `<nil>(<nil>)`, actual, "value should include type")
+
type testStructType struct {
Val string
}
@@ -324,8 +399,8 @@ func TestNotEqual(t *testing.T) {
}
funcA := func() int { return 23 }
funcB := func() int { return 42 }
- if !NotEqual(mockT, funcA, funcB) {
- t.Error("NotEqual should return true")
+ if NotEqual(mockT, funcA, funcB) {
+ t.Error("NotEqual should return false")
}
if NotEqual(mockT, "Hello World", "Hello World") {
@@ -343,6 +418,9 @@ func TestNotEqual(t *testing.T) {
if NotEqual(mockT, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) {
t.Error("NotEqual should return false")
}
+ if NotEqual(mockT, &struct{}{}, &struct{}{}) {
+ t.Error("NotEqual should return false")
+ }
}
type A struct {
@@ -418,6 +496,74 @@ func TestNotContains(t *testing.T) {
}
}
+func TestSubset(t *testing.T) {
+ mockT := new(testing.T)
+
+ if !Subset(mockT, []int{1, 2, 3}, nil) {
+ t.Error("Subset should return true: given subset is nil")
+ }
+ if !Subset(mockT, []int{1, 2, 3}, []int{}) {
+ t.Error("Subset should return true: any set contains the nil set")
+ }
+ if !Subset(mockT, []int{1, 2, 3}, []int{1, 2}) {
+ t.Error("Subset should return true: [1, 2, 3] contains [1, 2]")
+ }
+ if !Subset(mockT, []int{1, 2, 3}, []int{1, 2, 3}) {
+ t.Error("Subset should return true: [1, 2, 3] contains [1, 2, 3]")
+ }
+ if !Subset(mockT, []string{"hello", "world"}, []string{"hello"}) {
+ t.Error("Subset should return true: [\"hello\", \"world\"] contains [\"hello\"]")
+ }
+
+ if Subset(mockT, []string{"hello", "world"}, []string{"hello", "testify"}) {
+ t.Error("Subset should return false: [\"hello\", \"world\"] does not contain [\"hello\", \"testify\"]")
+ }
+ if Subset(mockT, []int{1, 2, 3}, []int{4, 5}) {
+ t.Error("Subset should return false: [1, 2, 3] does not contain [4, 5]")
+ }
+ if Subset(mockT, []int{1, 2, 3}, []int{1, 5}) {
+ t.Error("Subset should return false: [1, 2, 3] does not contain [1, 5]")
+ }
+}
+
+func TestNotSubset(t *testing.T) {
+ mockT := new(testing.T)
+
+ if NotSubset(mockT, []int{1, 2, 3}, nil) {
+ t.Error("NotSubset should return false: given subset is nil")
+ }
+ if NotSubset(mockT, []int{1, 2, 3}, []int{}) {
+ t.Error("NotSubset should return false: any set contains the nil set")
+ }
+ if NotSubset(mockT, []int{1, 2, 3}, []int{1, 2}) {
+ t.Error("NotSubset should return false: [1, 2, 3] contains [1, 2]")
+ }
+ if NotSubset(mockT, []int{1, 2, 3}, []int{1, 2, 3}) {
+ t.Error("NotSubset should return false: [1, 2, 3] contains [1, 2, 3]")
+ }
+ if NotSubset(mockT, []string{"hello", "world"}, []string{"hello"}) {
+ t.Error("NotSubset should return false: [\"hello\", \"world\"] contains [\"hello\"]")
+ }
+
+ if !NotSubset(mockT, []string{"hello", "world"}, []string{"hello", "testify"}) {
+ t.Error("NotSubset should return true: [\"hello\", \"world\"] does not contain [\"hello\", \"testify\"]")
+ }
+ if !NotSubset(mockT, []int{1, 2, 3}, []int{4, 5}) {
+ t.Error("NotSubset should return true: [1, 2, 3] does not contain [4, 5]")
+ }
+ if !NotSubset(mockT, []int{1, 2, 3}, []int{1, 5}) {
+ t.Error("NotSubset should return true: [1, 2, 3] does not contain [1, 5]")
+ }
+}
+
+func TestNotSubsetNil(t *testing.T) {
+ mockT := new(testing.T)
+ NotSubset(mockT, []string{"foo"}, nil)
+ if !mockT.Failed() {
+ t.Error("NotSubset on nil set should have failed the test")
+ }
+}
+
func Test_includeElement(t *testing.T) {
list1 := []string{"Foo", "Bar"}
@@ -469,6 +615,57 @@ func Test_includeElement(t *testing.T) {
False(t, found)
}
+func TestElementsMatch(t *testing.T) {
+ mockT := new(testing.T)
+
+ if !ElementsMatch(mockT, nil, nil) {
+ t.Error("ElementsMatch should return true")
+ }
+ if !ElementsMatch(mockT, []int{}, []int{}) {
+ t.Error("ElementsMatch should return true")
+ }
+ if !ElementsMatch(mockT, []int{1}, []int{1}) {
+ t.Error("ElementsMatch should return true")
+ }
+ if !ElementsMatch(mockT, []int{1, 1}, []int{1, 1}) {
+ t.Error("ElementsMatch should return true")
+ }
+ if !ElementsMatch(mockT, []int{1, 2}, []int{1, 2}) {
+ t.Error("ElementsMatch should return true")
+ }
+ if !ElementsMatch(mockT, []int{1, 2}, []int{2, 1}) {
+ t.Error("ElementsMatch should return true")
+ }
+ if !ElementsMatch(mockT, [2]int{1, 2}, [2]int{2, 1}) {
+ t.Error("ElementsMatch should return true")
+ }
+ if !ElementsMatch(mockT, []string{"hello", "world"}, []string{"world", "hello"}) {
+ t.Error("ElementsMatch should return true")
+ }
+ if !ElementsMatch(mockT, []string{"hello", "hello"}, []string{"hello", "hello"}) {
+ t.Error("ElementsMatch should return true")
+ }
+ if !ElementsMatch(mockT, []string{"hello", "hello", "world"}, []string{"hello", "world", "hello"}) {
+ t.Error("ElementsMatch should return true")
+ }
+ if !ElementsMatch(mockT, [3]string{"hello", "hello", "world"}, [3]string{"hello", "world", "hello"}) {
+ t.Error("ElementsMatch should return true")
+ }
+ if !ElementsMatch(mockT, []int{}, nil) {
+ t.Error("ElementsMatch should return true")
+ }
+
+ if ElementsMatch(mockT, []int{1}, []int{1, 1}) {
+ t.Error("ElementsMatch should return false")
+ }
+ if ElementsMatch(mockT, []int{1, 2}, []int{2, 2}) {
+ t.Error("ElementsMatch should return false")
+ }
+ if ElementsMatch(mockT, []string{"hello", "hello"}, []string{"hello"}) {
+ t.Error("ElementsMatch should return false")
+ }
+}
+
func TestCondition(t *testing.T) {
mockT := new(testing.T)
@@ -514,6 +711,28 @@ func TestPanics(t *testing.T) {
}
+func TestPanicsWithValue(t *testing.T) {
+
+ mockT := new(testing.T)
+
+ if !PanicsWithValue(mockT, "Panic!", func() {
+ panic("Panic!")
+ }) {
+ t.Error("PanicsWithValue should return true")
+ }
+
+ if PanicsWithValue(mockT, "Panic!", func() {
+ }) {
+ t.Error("PanicsWithValue should return false")
+ }
+
+ if PanicsWithValue(mockT, "at the disco", func() {
+ panic("Panic!")
+ }) {
+ t.Error("PanicsWithValue should return false")
+ }
+}
+
func TestNotPanics(t *testing.T) {
mockT := new(testing.T)
@@ -555,7 +774,7 @@ func TestNoError(t *testing.T) {
}()
if err == nil { // err is not nil here!
- t.Errorf("Error should be nil due to empty interface", err)
+ t.Errorf("Error should be nil due to empty interface: %s", err)
}
False(t, NoError(mockT, err), "NoError should fail with empty error interface")
@@ -579,6 +798,9 @@ func TestError(t *testing.T) {
True(t, Error(mockT, err), "Error with error should return True")
+ // go vet check
+ True(t, Errorf(mockT, err, "example with %s", "formatted message"), "Errorf with error should rturn True")
+
// returning an empty error interface
err = func() error {
var err *customError
@@ -589,7 +811,7 @@ func TestError(t *testing.T) {
}()
if err == nil { // err is not nil here!
- t.Errorf("Error should be nil due to empty interface", err)
+ t.Errorf("Error should be nil due to empty interface: %s", err)
}
True(t, Error(mockT, err), "Error should pass with empty error interface")
@@ -646,6 +868,15 @@ func TestEmpty(t *testing.T) {
var tiNP time.Time
var s *string
var f *os.File
+ sP := &s
+ x := 1
+ xP := &x
+
+ type TString string
+ type TStruct struct {
+ x int
+ s []int
+ }
True(t, Empty(mockT, ""), "Empty string is empty")
True(t, Empty(mockT, nil), "Nil is empty")
@@ -657,6 +888,9 @@ func TestEmpty(t *testing.T) {
True(t, Empty(mockT, f), "Nil os.File pointer is empty")
True(t, Empty(mockT, tiP), "Nil time.Time pointer is empty")
True(t, Empty(mockT, tiNP), "time.Time is empty")
+ True(t, Empty(mockT, TStruct{}), "struct with zero values is empty")
+ True(t, Empty(mockT, TString("")), "empty aliased string is empty")
+ True(t, Empty(mockT, sP), "ptr to nil value is empty")
False(t, Empty(mockT, "something"), "Non Empty string is not empty")
False(t, Empty(mockT, errors.New("something")), "Non nil object is not empty")
@@ -664,6 +898,9 @@ func TestEmpty(t *testing.T) {
False(t, Empty(mockT, 1), "Non-zero int value is not empty")
False(t, Empty(mockT, true), "True value is not empty")
False(t, Empty(mockT, chWithValue), "Channel with values is not empty")
+ False(t, Empty(mockT, TStruct{x: 1}), "struct with initialized values is empty")
+ False(t, Empty(mockT, TString("abc")), "non-empty aliased string is empty")
+ False(t, Empty(mockT, xP), "ptr to non-nil value is not empty")
}
func TestNotEmpty(t *testing.T) {
@@ -870,6 +1107,82 @@ func TestInDeltaSlice(t *testing.T) {
False(t, InDeltaSlice(mockT, "", nil, 1), "Expected non numeral slices to fail")
}
+func TestInDeltaMapValues(t *testing.T) {
+ mockT := new(testing.T)
+
+ for _, tc := range []struct {
+ title string
+ expect interface{}
+ actual interface{}
+ f func(TestingT, bool, ...interface{}) bool
+ delta float64
+ }{
+ {
+ title: "Within delta",
+ expect: map[string]float64{
+ "foo": 1.0,
+ "bar": 2.0,
+ },
+ actual: map[string]float64{
+ "foo": 1.01,
+ "bar": 1.99,
+ },
+ delta: 0.1,
+ f: True,
+ },
+ {
+ title: "Within delta",
+ expect: map[int]float64{
+ 1: 1.0,
+ 2: 2.0,
+ },
+ actual: map[int]float64{
+ 1: 1.0,
+ 2: 1.99,
+ },
+ delta: 0.1,
+ f: True,
+ },
+ {
+ title: "Different number of keys",
+ expect: map[int]float64{
+ 1: 1.0,
+ 2: 2.0,
+ },
+ actual: map[int]float64{
+ 1: 1.0,
+ },
+ delta: 0.1,
+ f: False,
+ },
+ {
+ title: "Within delta with zero value",
+ expect: map[string]float64{
+ "zero": 0.0,
+ },
+ actual: map[string]float64{
+ "zero": 0.0,
+ },
+ delta: 0.1,
+ f: True,
+ },
+ {
+ title: "With missing key with zero value",
+ expect: map[string]float64{
+ "zero": 0.0,
+ "foo": 0.0,
+ },
+ actual: map[string]float64{
+ "zero": 0.0,
+ "bar": 0.0,
+ },
+ f: False,
+ },
+ } {
+ tc.f(t, InDeltaMapValues(mockT, tc.expect, tc.actual, tc.delta), tc.title+"\n"+diff(tc.expect, tc.actual))
+ }
+}
+
func TestInEpsilon(t *testing.T) {
mockT := new(testing.T)
@@ -885,6 +1198,7 @@ func TestInEpsilon(t *testing.T) {
{uint64(100), uint8(101), 0.01},
{0.1, -0.1, 2},
{0.1, 0, 2},
+ {time.Second, time.Second + time.Millisecond, 0.002},
}
for _, tc := range cases {
@@ -903,6 +1217,7 @@ func TestInEpsilon(t *testing.T) {
{2.1, "bla-bla", 0},
{0.1, -0.1, 1.99},
{0, 0.1, 2}, // expected must be different to zero
+ {time.Second, time.Second + 10*time.Millisecond, 0.002},
}
for _, tc := range cases {
@@ -1006,6 +1321,28 @@ func TestNotZero(t *testing.T) {
}
}
+func TestFileExists(t *testing.T) {
+ mockT := new(testing.T)
+ True(t, FileExists(mockT, "assertions.go"))
+
+ mockT = new(testing.T)
+ False(t, FileExists(mockT, "random_file"))
+
+ mockT = new(testing.T)
+ False(t, FileExists(mockT, "../_codegen"))
+}
+
+func TestDirExists(t *testing.T) {
+ mockT := new(testing.T)
+ False(t, DirExists(mockT, "assertions.go"))
+
+ mockT = new(testing.T)
+ False(t, DirExists(mockT, "random_dir"))
+
+ mockT = new(testing.T)
+ True(t, DirExists(mockT, "../_codegen"))
+}
+
func TestJSONEq_EqualSONString(t *testing.T) {
mockT := new(testing.T)
True(t, JSONEq(mockT, `{"hello": "world", "foo": "bar"}`, `{"hello": "world", "foo": "bar"}`))
@@ -1208,3 +1545,37 @@ func TestFailNowWithFullTestingT(t *testing.T) {
FailNow(mockT, "failed")
}, "should call mockT.FailNow() rather than panicking")
}
+
+func TestBytesEqual(t *testing.T) {
+ var cases = []struct {
+ a, b []byte
+ }{
+ {make([]byte, 2), make([]byte, 2)},
+ {make([]byte, 2), make([]byte, 2, 3)},
+ {nil, make([]byte, 0)},
+ }
+ for i, c := range cases {
+ Equal(t, reflect.DeepEqual(c.a, c.b), ObjectsAreEqual(c.a, c.b), "case %d failed", i+1)
+ }
+}
+
+func BenchmarkBytesEqual(b *testing.B) {
+ const size = 1024 * 8
+ s := make([]byte, size)
+ for i := range s {
+ s[i] = byte(i % 255)
+ }
+ s2 := make([]byte, size)
+ copy(s2, s)
+
+ mockT := &mockFailNowTestingT{}
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ Equal(mockT, s, s2)
+ }
+}
+
+func TestEqualArgsValidation(t *testing.T) {
+ err := validateEqualArgs(time.Now, time.Now)
+ EqualError(t, err, "cannot take func type as argument")
+}
diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go
index b867e95ea..9ad56851d 100644
--- a/vendor/github.com/stretchr/testify/assert/forward_assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go
@@ -13,4 +13,4 @@ func New(t TestingT) *Assertions {
}
}
-//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl
+//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs
diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go
index fa7ab89b1..3101e78dd 100644
--- a/vendor/github.com/stretchr/testify/assert/http_assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go
@@ -8,16 +8,16 @@ import (
"strings"
)
-// httpCode is a helper that returns HTTP code of the response. It returns -1
-// if building a new request fails.
-func httpCode(handler http.HandlerFunc, method, url string, values url.Values) int {
+// httpCode is a helper that returns HTTP code of the response. It returns -1 and
+// an error if building a new request fails.
+func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) {
w := httptest.NewRecorder()
req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
if err != nil {
- return -1
+ return -1, err
}
handler(w, req)
- return w.Code
+ return w.Code, nil
}
// HTTPSuccess asserts that a specified handler returns a success status code.
@@ -25,12 +25,19 @@ func httpCode(handler http.HandlerFunc, method, url string, values url.Values) i
// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool {
- code := httpCode(handler, method, url, values)
- if code == -1 {
+func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
return false
}
- return code >= http.StatusOK && code <= http.StatusPartialContent
+
+ isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
+ if !isSuccessCode {
+ Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code))
+ }
+
+ return isSuccessCode
}
// HTTPRedirect asserts that a specified handler returns a redirect status code.
@@ -38,12 +45,19 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value
// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool {
- code := httpCode(handler, method, url, values)
- if code == -1 {
+func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
return false
}
- return code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
+
+ isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
+ if !isRedirectCode {
+ Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code))
+ }
+
+ return isRedirectCode
}
// HTTPError asserts that a specified handler returns an error status code.
@@ -51,12 +65,19 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu
// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool {
- code := httpCode(handler, method, url, values)
- if code == -1 {
+func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
return false
}
- return code >= http.StatusBadRequest
+
+ isErrorCode := code >= http.StatusBadRequest
+ if !isErrorCode {
+ Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code))
+ }
+
+ return isErrorCode
}
// HTTPBody is a helper that returns HTTP body of the response. It returns
@@ -77,7 +98,7 @@ func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) s
// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool {
+func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
body := HTTPBody(handler, method, url, values)
contains := strings.Contains(body, fmt.Sprint(str))
@@ -94,7 +115,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string,
// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool {
+func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
body := HTTPBody(handler, method, url, values)
contains := strings.Contains(body, fmt.Sprint(str))
diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions_test.go b/vendor/github.com/stretchr/testify/assert/http_assertions_test.go
index 684c2d5d1..3ab76830f 100644
--- a/vendor/github.com/stretchr/testify/assert/http_assertions_test.go
+++ b/vendor/github.com/stretchr/testify/assert/http_assertions_test.go
@@ -19,21 +19,52 @@ func httpError(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
}
-func TestHTTPStatuses(t *testing.T) {
+func TestHTTPSuccess(t *testing.T) {
+ assert := New(t)
+
+ mockT1 := new(testing.T)
+ assert.Equal(HTTPSuccess(mockT1, httpOK, "GET", "/", nil), true)
+ assert.False(mockT1.Failed())
+
+ mockT2 := new(testing.T)
+ assert.Equal(HTTPSuccess(mockT2, httpRedirect, "GET", "/", nil), false)
+ assert.True(mockT2.Failed())
+
+ mockT3 := new(testing.T)
+ assert.Equal(HTTPSuccess(mockT3, httpError, "GET", "/", nil), false)
+ assert.True(mockT3.Failed())
+}
+
+func TestHTTPRedirect(t *testing.T) {
+ assert := New(t)
+
+ mockT1 := new(testing.T)
+ assert.Equal(HTTPRedirect(mockT1, httpOK, "GET", "/", nil), false)
+ assert.True(mockT1.Failed())
+
+ mockT2 := new(testing.T)
+ assert.Equal(HTTPRedirect(mockT2, httpRedirect, "GET", "/", nil), true)
+ assert.False(mockT2.Failed())
+
+ mockT3 := new(testing.T)
+ assert.Equal(HTTPRedirect(mockT3, httpError, "GET", "/", nil), false)
+ assert.True(mockT3.Failed())
+}
+
+func TestHTTPError(t *testing.T) {
assert := New(t)
- mockT := new(testing.T)
- assert.Equal(HTTPSuccess(mockT, httpOK, "GET", "/", nil), true)
- assert.Equal(HTTPSuccess(mockT, httpRedirect, "GET", "/", nil), false)
- assert.Equal(HTTPSuccess(mockT, httpError, "GET", "/", nil), false)
+ mockT1 := new(testing.T)
+ assert.Equal(HTTPError(mockT1, httpOK, "GET", "/", nil), false)
+ assert.True(mockT1.Failed())
- assert.Equal(HTTPRedirect(mockT, httpOK, "GET", "/", nil), false)
- assert.Equal(HTTPRedirect(mockT, httpRedirect, "GET", "/", nil), true)
- assert.Equal(HTTPRedirect(mockT, httpError, "GET", "/", nil), false)
+ mockT2 := new(testing.T)
+ assert.Equal(HTTPError(mockT2, httpRedirect, "GET", "/", nil), false)
+ assert.True(mockT2.Failed())
- assert.Equal(HTTPError(mockT, httpOK, "GET", "/", nil), false)
- assert.Equal(HTTPError(mockT, httpRedirect, "GET", "/", nil), false)
- assert.Equal(HTTPError(mockT, httpError, "GET", "/", nil), true)
+ mockT3 := new(testing.T)
+ assert.Equal(HTTPError(mockT3, httpError, "GET", "/", nil), true)
+ assert.False(mockT3.Failed())
}
func TestHTTPStatusesWrapper(t *testing.T) {
diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go
index 20d7b8b1f..208b838a3 100644
--- a/vendor/github.com/stretchr/testify/mock/mock.go
+++ b/vendor/github.com/stretchr/testify/mock/mock.go
@@ -1,6 +1,7 @@
package mock
import (
+ "errors"
"fmt"
"reflect"
"regexp"
@@ -15,10 +16,6 @@ import (
"github.com/stretchr/testify/assert"
)
-func inin() {
- spew.Config.SortKeys = true
-}
-
// TestingT is an interface wrapper around *testing.T
type TestingT interface {
Logf(format string, args ...interface{})
@@ -52,10 +49,15 @@ type Call struct {
// Amount of times this call has been called
totalCalls int
+ // Call to this method can be optional
+ optional bool
+
// Holds a channel that will be used to block the Return until it either
// receives a message or is closed. nil means it returns immediately.
WaitFor <-chan time.Time
+ waitTime time.Duration
+
// Holds a handler used to manipulate arguments content that are passed by
// reference. It's useful when mocking methods such as unmarshalers or
// decoders.
@@ -134,7 +136,10 @@ func (c *Call) WaitUntil(w <-chan time.Time) *Call {
//
// Mock.On("MyMethod", arg1, arg2).After(time.Second)
func (c *Call) After(d time.Duration) *Call {
- return c.WaitUntil(time.After(d))
+ c.lock()
+ defer c.unlock()
+ c.waitTime = d
+ return c
}
// Run sets a handler to be called before returning. It can be used when
@@ -145,13 +150,22 @@ func (c *Call) After(d time.Duration) *Call {
// arg := args.Get(0).(*map[string]interface{})
// arg["foo"] = "bar"
// })
-func (c *Call) Run(fn func(Arguments)) *Call {
+func (c *Call) Run(fn func(args Arguments)) *Call {
c.lock()
defer c.unlock()
c.RunFn = fn
return c
}
+// Maybe allows the method call to be optional. Not calling an optional method
+// will not cause an error while asserting expectations
+func (c *Call) Maybe() *Call {
+ c.lock()
+ defer c.unlock()
+ c.optional = true
+ return c
+}
+
// On chains a new expectation description onto the mocked interface. This
// allows syntax like.
//
@@ -218,8 +232,6 @@ func (m *Mock) On(methodName string, arguments ...interface{}) *Call {
// */
func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) {
- m.mutex.Lock()
- defer m.mutex.Unlock()
for i, call := range m.ExpectedCalls {
if call.Method == method && call.Repeatability > -1 {
@@ -283,7 +295,7 @@ func (m *Mock) Called(arguments ...interface{}) Arguments {
functionPath := runtime.FuncForPC(pc).Name()
//Next four lines are required to use GCCGO function naming conventions.
//For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock
- //uses inteface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree
+ //uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree
//With GCCGO we need to remove interface information starting from pN<dd>.
re := regexp.MustCompile("\\.pN\\d+_")
if re.MatchString(functionPath) {
@@ -291,8 +303,16 @@ func (m *Mock) Called(arguments ...interface{}) Arguments {
}
parts := strings.Split(functionPath, ".")
functionName := parts[len(parts)-1]
+ return m.MethodCalled(functionName, arguments...)
+}
- found, call := m.findExpectedCall(functionName, arguments...)
+// MethodCalled tells the mock object that the given method has been called, and gets
+// an array of arguments to return. Panics if the call is unexpected (i.e. not preceded
+// by appropriate .On .Return() calls)
+// If Call.WaitFor is set, blocks until the channel is closed or receives a message.
+func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Arguments {
+ m.mutex.Lock()
+ found, call := m.findExpectedCall(methodName, arguments...)
if found < 0 {
// we have to fail here - because we don't know what to do
@@ -302,45 +322,47 @@ func (m *Mock) Called(arguments ...interface{}) Arguments {
// b) the arguments are not what was expected, or
// c) the developer has forgotten to add an accompanying On...Return pair.
- closestFound, closestCall := m.findClosestCall(functionName, arguments...)
+ closestFound, closestCall := m.findClosestCall(methodName, arguments...)
+ m.mutex.Unlock()
if closestFound {
- panic(fmt.Sprintf("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\n", callString(functionName, arguments, true), callString(functionName, closestCall.Arguments, true), diffArguments(arguments, closestCall.Arguments)))
+ panic(fmt.Sprintf("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\n", callString(methodName, arguments, true), callString(methodName, closestCall.Arguments, true), diffArguments(closestCall.Arguments, arguments)))
} else {
- panic(fmt.Sprintf("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", functionName, functionName, callString(functionName, arguments, true), assert.CallerInfo()))
- }
- } else {
- m.mutex.Lock()
- switch {
- case call.Repeatability == 1:
- call.Repeatability = -1
- call.totalCalls++
-
- case call.Repeatability > 1:
- call.Repeatability--
- call.totalCalls++
-
- case call.Repeatability == 0:
- call.totalCalls++
+ panic(fmt.Sprintf("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo()))
}
- m.mutex.Unlock()
}
+ if call.Repeatability == 1 {
+ call.Repeatability = -1
+ } else if call.Repeatability > 1 {
+ call.Repeatability--
+ }
+ call.totalCalls++
+
// add the call
- m.mutex.Lock()
- m.Calls = append(m.Calls, *newCall(m, functionName, arguments...))
+ m.Calls = append(m.Calls, *newCall(m, methodName, arguments...))
m.mutex.Unlock()
// block if specified
if call.WaitFor != nil {
<-call.WaitFor
+ } else {
+ time.Sleep(call.waitTime)
}
- if call.RunFn != nil {
- call.RunFn(arguments)
+ m.mutex.Lock()
+ runFn := call.RunFn
+ m.mutex.Unlock()
+
+ if runFn != nil {
+ runFn(arguments)
}
- return call.ReturnArguments
+ m.mutex.Lock()
+ returnArgs := call.ReturnArguments
+ m.mutex.Unlock()
+
+ return returnArgs
}
/*
@@ -372,25 +394,25 @@ func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool {
// AssertExpectations asserts that everything specified with On and Return was
// in fact called as expected. Calls may have occurred in any order.
func (m *Mock) AssertExpectations(t TestingT) bool {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
var somethingMissing bool
var failedExpectations int
// iterate through each expectation
expectedCalls := m.expectedCalls()
for _, expectedCall := range expectedCalls {
- if !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments) && expectedCall.totalCalls == 0 {
+ if !expectedCall.optional && !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments) && expectedCall.totalCalls == 0 {
somethingMissing = true
failedExpectations++
t.Logf("\u274C\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String())
} else {
- m.mutex.Lock()
if expectedCall.Repeatability > 0 {
somethingMissing = true
failedExpectations++
} else {
t.Logf("\u2705\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String())
}
- m.mutex.Unlock()
}
}
@@ -403,6 +425,8 @@ func (m *Mock) AssertExpectations(t TestingT) bool {
// AssertNumberOfCalls asserts that the method was called expectedCalls times.
func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
var actualCalls int
for _, call := range m.calls() {
if call.Method == methodName {
@@ -415,6 +439,8 @@ func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls
// AssertCalled asserts that the method was called.
// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method.
func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
if !assert.True(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method should have been called with %d argument(s), but was not.", methodName, len(arguments))) {
t.Logf("%v", m.expectedCalls())
return false
@@ -425,6 +451,8 @@ func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interfac
// AssertNotCalled asserts that the method was not called.
// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method.
func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
if !assert.False(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method was called with %d argument(s), but should NOT have been.", methodName, len(arguments))) {
t.Logf("%v", m.expectedCalls())
return false
@@ -450,14 +478,10 @@ func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool {
}
func (m *Mock) expectedCalls() []*Call {
- m.mutex.Lock()
- defer m.mutex.Unlock()
return append([]*Call{}, m.ExpectedCalls...)
}
func (m *Mock) calls() []Call {
- m.mutex.Lock()
- defer m.mutex.Unlock()
return append([]Call{}, m.Calls...)
}
@@ -496,9 +520,25 @@ type argumentMatcher struct {
func (f argumentMatcher) Matches(argument interface{}) bool {
expectType := f.fn.Type().In(0)
+ expectTypeNilSupported := false
+ switch expectType.Kind() {
+ case reflect.Interface, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Ptr:
+ expectTypeNilSupported = true
+ }
- if reflect.TypeOf(argument).AssignableTo(expectType) {
- result := f.fn.Call([]reflect.Value{reflect.ValueOf(argument)})
+ argType := reflect.TypeOf(argument)
+ var arg reflect.Value
+ if argType == nil {
+ arg = reflect.New(expectType).Elem()
+ } else {
+ arg = reflect.ValueOf(argument)
+ }
+
+ if argType == nil && !expectTypeNilSupported {
+ panic(errors.New("attempting to call matcher with nil for non-nil expected type"))
+ }
+ if argType == nil || argType.AssignableTo(expectType) {
+ result := f.fn.Call([]reflect.Value{arg})
return result[0].Bool()
}
return false
@@ -518,7 +558,7 @@ func (f argumentMatcher) String() string {
//
// |fn|, must be a function accepting a single argument (of the expected type)
// which returns a bool. If |fn| doesn't match the required signature,
-// MathedBy() panics.
+// MatchedBy() panics.
func MatchedBy(fn interface{}) argumentMatcher {
fnType := reflect.TypeOf(fn)
@@ -719,6 +759,10 @@ func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) {
}
func diffArguments(expected Arguments, actual Arguments) string {
+ if len(expected) != len(actual) {
+ return fmt.Sprintf("Provided %v arguments, mocked for %v arguments", len(expected), len(actual))
+ }
+
for x := range expected {
if diffString := diff(expected[x], actual[x]); diffString != "" {
return fmt.Sprintf("Difference found in argument %v:\n\n%s", x, diffString)
@@ -746,8 +790,8 @@ func diff(expected interface{}, actual interface{}) string {
return ""
}
- e := spew.Sdump(expected)
- a := spew.Sdump(actual)
+ e := spewConfig.Sdump(expected)
+ a := spewConfig.Sdump(actual)
diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
A: difflib.SplitLines(e),
@@ -761,3 +805,10 @@ func diff(expected interface{}, actual interface{}) string {
return diff
}
+
+var spewConfig = spew.ConfigState{
+ Indent: " ",
+ DisablePointerAddresses: true,
+ DisableCapacities: true,
+ SortKeys: true,
+}
diff --git a/vendor/github.com/stretchr/testify/mock/mock_test.go b/vendor/github.com/stretchr/testify/mock/mock_test.go
index 8cb4615db..cb245ba59 100644
--- a/vendor/github.com/stretchr/testify/mock/mock_test.go
+++ b/vendor/github.com/stretchr/testify/mock/mock_test.go
@@ -2,10 +2,13 @@ package mock
import (
"errors"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
+ "fmt"
+ "sync"
"testing"
"time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
/*
@@ -40,6 +43,26 @@ func (i *TestExampleImplementation) TheExampleMethod3(et *ExampleType) error {
return args.Error(0)
}
+func (i *TestExampleImplementation) TheExampleMethod4(v ExampleInterface) error {
+ args := i.Called(v)
+ return args.Error(0)
+}
+
+func (i *TestExampleImplementation) TheExampleMethod5(ch chan struct{}) error {
+ args := i.Called(ch)
+ return args.Error(0)
+}
+
+func (i *TestExampleImplementation) TheExampleMethod6(m map[string]bool) error {
+ args := i.Called(m)
+ return args.Error(0)
+}
+
+func (i *TestExampleImplementation) TheExampleMethod7(slice []bool) error {
+ args := i.Called(slice)
+ return args.Error(0)
+}
+
func (i *TestExampleImplementation) TheExampleMethodFunc(fn func(string) error) error {
args := i.Called(fn)
return args.Error(0)
@@ -55,6 +78,11 @@ func (i *TestExampleImplementation) TheExampleMethodVariadicInterface(a ...inter
return args.Error(0)
}
+func (i *TestExampleImplementation) TheExampleMethodMixedVariadic(a int, b ...int) error {
+ args := i.Called(a, b)
+ return args.Error(0)
+}
+
type ExampleFuncType func(string) error
func (i *TestExampleImplementation) TheExampleMethodFuncType(fn ExampleFuncType) error {
@@ -174,15 +202,20 @@ func Test_Mock_On_WithPtrArgMatcher(t *testing.T) {
var mockedService TestExampleImplementation
mockedService.On("TheExampleMethod3",
- MatchedBy(func(a *ExampleType) bool { return a.ran == true }),
+ MatchedBy(func(a *ExampleType) bool { return a != nil && a.ran == true }),
).Return(nil)
mockedService.On("TheExampleMethod3",
- MatchedBy(func(a *ExampleType) bool { return a.ran == false }),
+ MatchedBy(func(a *ExampleType) bool { return a != nil && a.ran == false }),
).Return(errors.New("error"))
+ mockedService.On("TheExampleMethod3",
+ MatchedBy(func(a *ExampleType) bool { return a == nil }),
+ ).Return(errors.New("error2"))
+
assert.Equal(t, mockedService.TheExampleMethod3(&ExampleType{true}), nil)
assert.EqualError(t, mockedService.TheExampleMethod3(&ExampleType{false}), "error")
+ assert.EqualError(t, mockedService.TheExampleMethod3(nil), "error2")
}
func Test_Mock_On_WithFuncArgMatcher(t *testing.T) {
@@ -191,17 +224,62 @@ func Test_Mock_On_WithFuncArgMatcher(t *testing.T) {
fixture1, fixture2 := errors.New("fixture1"), errors.New("fixture2")
mockedService.On("TheExampleMethodFunc",
- MatchedBy(func(a func(string) error) bool { return a("string") == fixture1 }),
+ MatchedBy(func(a func(string) error) bool { return a != nil && a("string") == fixture1 }),
).Return(errors.New("fixture1"))
mockedService.On("TheExampleMethodFunc",
- MatchedBy(func(a func(string) error) bool { return a("string") == fixture2 }),
+ MatchedBy(func(a func(string) error) bool { return a != nil && a("string") == fixture2 }),
).Return(errors.New("fixture2"))
+ mockedService.On("TheExampleMethodFunc",
+ MatchedBy(func(a func(string) error) bool { return a == nil }),
+ ).Return(errors.New("fixture3"))
+
assert.EqualError(t, mockedService.TheExampleMethodFunc(
func(string) error { return fixture1 }), "fixture1")
assert.EqualError(t, mockedService.TheExampleMethodFunc(
func(string) error { return fixture2 }), "fixture2")
+ assert.EqualError(t, mockedService.TheExampleMethodFunc(nil), "fixture3")
+}
+
+func Test_Mock_On_WithInterfaceArgMatcher(t *testing.T) {
+ var mockedService TestExampleImplementation
+
+ mockedService.On("TheExampleMethod4",
+ MatchedBy(func(a ExampleInterface) bool { return a == nil }),
+ ).Return(errors.New("fixture1"))
+
+ assert.EqualError(t, mockedService.TheExampleMethod4(nil), "fixture1")
+}
+
+func Test_Mock_On_WithChannelArgMatcher(t *testing.T) {
+ var mockedService TestExampleImplementation
+
+ mockedService.On("TheExampleMethod5",
+ MatchedBy(func(ch chan struct{}) bool { return ch == nil }),
+ ).Return(errors.New("fixture1"))
+
+ assert.EqualError(t, mockedService.TheExampleMethod5(nil), "fixture1")
+}
+
+func Test_Mock_On_WithMapArgMatcher(t *testing.T) {
+ var mockedService TestExampleImplementation
+
+ mockedService.On("TheExampleMethod6",
+ MatchedBy(func(m map[string]bool) bool { return m == nil }),
+ ).Return(errors.New("fixture1"))
+
+ assert.EqualError(t, mockedService.TheExampleMethod6(nil), "fixture1")
+}
+
+func Test_Mock_On_WithSliceArgMatcher(t *testing.T) {
+ var mockedService TestExampleImplementation
+
+ mockedService.On("TheExampleMethod7",
+ MatchedBy(func(slice []bool) bool { return slice == nil }),
+ ).Return(errors.New("fixture1"))
+
+ assert.EqualError(t, mockedService.TheExampleMethod7(nil), "fixture1")
}
func Test_Mock_On_WithVariadicFunc(t *testing.T) {
@@ -226,6 +304,29 @@ func Test_Mock_On_WithVariadicFunc(t *testing.T) {
}
+func Test_Mock_On_WithMixedVariadicFunc(t *testing.T) {
+
+ // make a test impl object
+ var mockedService = new(TestExampleImplementation)
+
+ c := mockedService.
+ On("TheExampleMethodMixedVariadic", 1, []int{2, 3, 4}).
+ Return(nil)
+
+ assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls)
+ assert.Equal(t, 2, len(c.Arguments))
+ assert.Equal(t, 1, c.Arguments[0])
+ assert.Equal(t, []int{2, 3, 4}, c.Arguments[1])
+
+ assert.NotPanics(t, func() {
+ mockedService.TheExampleMethodMixedVariadic(1, 2, 3, 4)
+ })
+ assert.Panics(t, func() {
+ mockedService.TheExampleMethodMixedVariadic(1, 2, 3, 5)
+ })
+
+}
+
func Test_Mock_On_WithVariadicFuncWithInterface(t *testing.T) {
// make a test impl object
@@ -726,7 +827,7 @@ func Test_AssertExpectationsForObjects_Helper(t *testing.T) {
mockedService2.Called(2)
mockedService3.Called(3)
- assert.True(t, AssertExpectationsForObjects(t, mockedService1.Mock, mockedService2.Mock, mockedService3.Mock))
+ assert.True(t, AssertExpectationsForObjects(t, &mockedService1.Mock, &mockedService2.Mock, &mockedService3.Mock))
assert.True(t, AssertExpectationsForObjects(t, mockedService1, mockedService2, mockedService3))
}
@@ -745,7 +846,7 @@ func Test_AssertExpectationsForObjects_Helper_Failed(t *testing.T) {
mockedService3.Called(3)
tt := new(testing.T)
- assert.False(t, AssertExpectationsForObjects(tt, mockedService1.Mock, mockedService2.Mock, mockedService3.Mock))
+ assert.False(t, AssertExpectationsForObjects(tt, &mockedService1.Mock, &mockedService2.Mock, &mockedService3.Mock))
assert.False(t, AssertExpectationsForObjects(tt, mockedService1, mockedService2, mockedService3))
}
@@ -969,6 +1070,31 @@ func Test_Mock_AssertNotCalled(t *testing.T) {
}
+func Test_Mock_AssertOptional(t *testing.T) {
+ // Optional called
+ var ms1 = new(TestExampleImplementation)
+ ms1.On("TheExampleMethod", 1, 2, 3).Maybe().Return(4, nil)
+ ms1.TheExampleMethod(1, 2, 3)
+
+ tt1 := new(testing.T)
+ assert.Equal(t, true, ms1.AssertExpectations(tt1))
+
+ // Optional not called
+ var ms2 = new(TestExampleImplementation)
+ ms2.On("TheExampleMethod", 1, 2, 3).Maybe().Return(4, nil)
+
+ tt2 := new(testing.T)
+ assert.Equal(t, true, ms2.AssertExpectations(tt2))
+
+ // Non-optional called
+ var ms3 = new(TestExampleImplementation)
+ ms3.On("TheExampleMethod", 1, 2, 3).Return(4, nil)
+ ms3.TheExampleMethod(1, 2, 3)
+
+ tt3 := new(testing.T)
+ assert.Equal(t, true, ms3.AssertExpectations(tt3))
+}
+
/*
Arguments helper methods
*/
@@ -1130,3 +1256,97 @@ func Test_Arguments_Bool(t *testing.T) {
assert.Equal(t, true, args.Bool(2))
}
+
+func Test_WaitUntil_Parallel(t *testing.T) {
+
+ // make a test impl object
+ var mockedService *TestExampleImplementation = new(TestExampleImplementation)
+
+ ch1 := make(chan time.Time)
+ ch2 := make(chan time.Time)
+
+ mockedService.Mock.On("TheExampleMethod2", true).Return().WaitUntil(ch2).Run(func(args Arguments) {
+ ch1 <- time.Now()
+ })
+
+ mockedService.Mock.On("TheExampleMethod2", false).Return().WaitUntil(ch1)
+
+ // Lock both goroutines on the .WaitUntil method
+ go func() {
+ mockedService.TheExampleMethod2(false)
+ }()
+ go func() {
+ mockedService.TheExampleMethod2(true)
+ }()
+
+ // Allow the first call to execute, so the second one executes afterwards
+ ch2 <- time.Now()
+}
+
+func Test_MockMethodCalled(t *testing.T) {
+ m := new(Mock)
+ m.On("foo", "hello").Return("world")
+
+ retArgs := m.MethodCalled("foo", "hello")
+ require.True(t, len(retArgs) == 1)
+ require.Equal(t, "world", retArgs[0])
+ m.AssertExpectations(t)
+}
+
+// Test to validate fix for racy concurrent call access in MethodCalled()
+func Test_MockReturnAndCalledConcurrent(t *testing.T) {
+ iterations := 1000
+ m := &Mock{}
+ call := m.On("ConcurrencyTestMethod")
+
+ wg := sync.WaitGroup{}
+ wg.Add(2)
+
+ go func() {
+ for i := 0; i < iterations; i++ {
+ call.Return(10)
+ }
+ wg.Done()
+ }()
+ go func() {
+ for i := 0; i < iterations; i++ {
+ ConcurrencyTestMethod(m)
+ }
+ wg.Done()
+ }()
+ wg.Wait()
+}
+
+type timer struct{ Mock }
+
+func (s *timer) GetTime(i int) string {
+ return s.Called(i).Get(0).(string)
+}
+
+func TestAfterTotalWaitTimeWhileExecution(t *testing.T) {
+ waitDuration := 1
+ total, waitMs := 5, time.Millisecond*time.Duration(waitDuration)
+ aTimer := new(timer)
+ for i := 0; i < total; i++ {
+ aTimer.On("GetTime", i).After(waitMs).Return(fmt.Sprintf("Time%d", i)).Once()
+ }
+ time.Sleep(waitMs)
+ start := time.Now()
+ var results []string
+
+ for i := 0; i < total; i++ {
+ results = append(results, aTimer.GetTime(i))
+ }
+
+ end := time.Now()
+ elapsedTime := end.Sub(start)
+ assert.True(t, elapsedTime > waitMs, fmt.Sprintf("Total elapsed time:%v should be atleast greater than %v", elapsedTime, waitMs))
+ assert.Equal(t, total, len(results))
+ for i, _ := range results {
+ assert.Equal(t, fmt.Sprintf("Time%d", i), results[i], "Return value of method should be same")
+ }
+}
+
+func ConcurrencyTestMethod(m *Mock) {
+ m.Called()
+}
diff --git a/vendor/github.com/stretchr/testify/require/forward_requirements.go b/vendor/github.com/stretchr/testify/require/forward_requirements.go
index d3c2ab9bc..ac71d4058 100644
--- a/vendor/github.com/stretchr/testify/require/forward_requirements.go
+++ b/vendor/github.com/stretchr/testify/require/forward_requirements.go
@@ -13,4 +13,4 @@ func New(t TestingT) *Assertions {
}
}
-//go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl
+//go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl -include-format-funcs
diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go
index 1bcfcb0d9..a21d02f81 100644
--- a/vendor/github.com/stretchr/testify/require/require.go
+++ b/vendor/github.com/stretchr/testify/require/require.go
@@ -1,464 +1,979 @@
/*
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
* THIS FILE MUST NOT BE EDITED BY HAND
-*/
+ */
package require
import (
-
assert "github.com/stretchr/testify/assert"
http "net/http"
url "net/url"
time "time"
)
-
// Condition uses a Comparison to assert a complex condition.
func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) {
- if !assert.Condition(t, comp, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.Condition(t, comp, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// Conditionf uses a Comparison to assert a complex condition.
+func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interface{}) {
+ if !assert.Conditionf(t, comp, msg, args...) {
+ t.FailNow()
+ }
+}
// Contains asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
-//
-// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'")
-// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
-// assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
-//
+//
+// assert.Contains(t, "Hello World", "World")
+// assert.Contains(t, ["Hello", "World"], "World")
+// assert.Contains(t, {"Hello": "World"}, "Hello")
+//
// Returns whether the assertion was successful (true) or not (false).
func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) {
- if !assert.Contains(t, s, contains, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.Contains(t, s, contains, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Containsf asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
+// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
+// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) {
+ if !assert.Containsf(t, s, contains, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func DirExists(t TestingT, path string, msgAndArgs ...interface{}) {
+ if !assert.DirExists(t, path, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func DirExistsf(t TestingT, path string, msg string, args ...interface{}) {
+ if !assert.DirExistsf(t, path, msg, args...) {
+ t.FailNow()
+ }
}
+// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) {
+ if !assert.ElementsMatch(t, listA, listB, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted"))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) {
+ if !assert.ElementsMatchf(t, listA, listB, msg, args...) {
+ t.FailNow()
+ }
+}
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
// a slice or a channel with len == 0.
-//
+//
// assert.Empty(t, obj)
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
- if !assert.Empty(t, object, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.Empty(t, object, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// assert.Emptyf(t, obj, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) {
+ if !assert.Emptyf(t, object, msg, args...) {
+ t.FailNow()
+ }
+}
// Equal asserts that two objects are equal.
-//
-// assert.Equal(t, 123, 123, "123 and 123 should be equal")
-//
+//
+// assert.Equal(t, 123, 123)
+//
// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- if !assert.Equal(t, expected, actual, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.Equal(t, expected, actual, msgAndArgs...) {
+ t.FailNow()
+ }
}
-
// EqualError asserts that a function returned an error (i.e. not `nil`)
// and that it is equal to the provided error.
-//
+//
// actualObj, err := SomeFunction()
-// if assert.Error(t, err, "An error was expected") {
-// assert.Equal(t, err, expectedError)
-// }
-//
+// assert.EqualError(t, err, expectedErrorString)
+//
// Returns whether the assertion was successful (true) or not (false).
func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) {
- if !assert.EqualError(t, theError, errString, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.EqualError(t, theError, errString, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) {
+ if !assert.EqualErrorf(t, theError, errString, msg, args...) {
+ t.FailNow()
+ }
+}
// EqualValues asserts that two objects are equal or convertable to the same types
// and equal.
-//
-// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal")
-//
+//
+// assert.EqualValues(t, uint32(123), int32(123))
+//
// Returns whether the assertion was successful (true) or not (false).
func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- if !assert.EqualValues(t, expected, actual, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.EqualValues(t, expected, actual, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// EqualValuesf asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ if !assert.EqualValuesf(t, expected, actual, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// Equalf asserts that two objects are equal.
+//
+// assert.Equalf(t, 123, 123, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ if !assert.Equalf(t, expected, actual, msg, args...) {
+ t.FailNow()
+ }
+}
// Error asserts that a function returned an error (i.e. not `nil`).
-//
+//
// actualObj, err := SomeFunction()
-// if assert.Error(t, err, "An error was expected") {
-// assert.Equal(t, err, expectedError)
+// if assert.Error(t, err) {
+// assert.Equal(t, expectedError, err)
// }
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func Error(t TestingT, err error, msgAndArgs ...interface{}) {
- if !assert.Error(t, err, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.Error(t, err, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// Errorf asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.Errorf(t, err, "error message %s", "formatted") {
+// assert.Equal(t, expectedErrorf, err)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Errorf(t TestingT, err error, msg string, args ...interface{}) {
+ if !assert.Errorf(t, err, msg, args...) {
+ t.FailNow()
+ }
+}
-// Exactly asserts that two objects are equal is value and type.
-//
-// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal")
-//
+// Exactly asserts that two objects are equal in value and type.
+//
+// assert.Exactly(t, int32(123), int64(123))
+//
// Returns whether the assertion was successful (true) or not (false).
func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- if !assert.Exactly(t, expected, actual, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.Exactly(t, expected, actual, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// Exactlyf asserts that two objects are equal in value and type.
+//
+// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ if !assert.Exactlyf(t, expected, actual, msg, args...) {
+ t.FailNow()
+ }
+}
// Fail reports a failure through
func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
- if !assert.Fail(t, failureMessage, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.Fail(t, failureMessage, msgAndArgs...) {
+ t.FailNow()
+ }
}
-
// FailNow fails test
func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
- if !assert.FailNow(t, failureMessage, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.FailNow(t, failureMessage, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// FailNowf fails test
+func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) {
+ if !assert.FailNowf(t, failureMessage, msg, args...) {
+ t.FailNow()
+ }
}
+// Failf reports a failure through
+func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) {
+ if !assert.Failf(t, failureMessage, msg, args...) {
+ t.FailNow()
+ }
+}
// False asserts that the specified value is false.
-//
-// assert.False(t, myBool, "myBool should be false")
-//
+//
+// assert.False(t, myBool)
+//
// Returns whether the assertion was successful (true) or not (false).
func False(t TestingT, value bool, msgAndArgs ...interface{}) {
- if !assert.False(t, value, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.False(t, value, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// Falsef asserts that the specified value is false.
+//
+// assert.Falsef(t, myBool, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Falsef(t TestingT, value bool, msg string, args ...interface{}) {
+ if !assert.Falsef(t, value, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func FileExists(t TestingT, path string, msgAndArgs ...interface{}) {
+ if !assert.FileExists(t, path, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func FileExistsf(t TestingT, path string, msg string, args ...interface{}) {
+ if !assert.FileExistsf(t, path, msg, args...) {
+ t.FailNow()
+ }
+}
// HTTPBodyContains asserts that a specified handler returns a
// body that contains a string.
-//
+//
// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
-//
+//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
- if !assert.HTTPBodyContains(t, handler, method, url, values, str) {
- t.FailNow()
- }
+func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) {
+ if !assert.HTTPBodyContains(t, handler, method, url, values, str, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// HTTPBodyContainsf asserts that a specified handler returns a
+// body that contains a string.
+//
+// assert.HTTPBodyContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) {
+ if !assert.HTTPBodyContainsf(t, handler, method, url, values, str, msg, args...) {
+ t.FailNow()
+ }
+}
// HTTPBodyNotContains asserts that a specified handler returns a
// body that does not contain a string.
-//
+//
// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
-//
+//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
- if !assert.HTTPBodyNotContains(t, handler, method, url, values, str) {
- t.FailNow()
- }
+func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) {
+ if !assert.HTTPBodyNotContains(t, handler, method, url, values, str, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// HTTPBodyNotContainsf asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// assert.HTTPBodyNotContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) {
+ if !assert.HTTPBodyNotContainsf(t, handler, method, url, values, str, msg, args...) {
+ t.FailNow()
+ }
+}
// HTTPError asserts that a specified handler returns an error status code.
-//
+//
// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
+//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) {
- if !assert.HTTPError(t, handler, method, url, values) {
- t.FailNow()
- }
+func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) {
+ if !assert.HTTPError(t, handler, method, url, values, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// HTTPErrorf asserts that a specified handler returns an error status code.
+//
+// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
+ if !assert.HTTPErrorf(t, handler, method, url, values, msg, args...) {
+ t.FailNow()
+ }
+}
// HTTPRedirect asserts that a specified handler returns a redirect status code.
-//
+//
// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
+//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) {
- if !assert.HTTPRedirect(t, handler, method, url, values) {
- t.FailNow()
- }
+func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) {
+ if !assert.HTTPRedirect(t, handler, method, url, values, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// HTTPRedirectf asserts that a specified handler returns a redirect status code.
+//
+// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
+ if !assert.HTTPRedirectf(t, handler, method, url, values, msg, args...) {
+ t.FailNow()
+ }
+}
// HTTPSuccess asserts that a specified handler returns a success status code.
-//
+//
// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
-//
+//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) {
- if !assert.HTTPSuccess(t, handler, method, url, values) {
- t.FailNow()
- }
+func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) {
+ if !assert.HTTPSuccess(t, handler, method, url, values, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// HTTPSuccessf asserts that a specified handler returns a success status code.
+//
+// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
+ if !assert.HTTPSuccessf(t, handler, method, url, values, msg, args...) {
+ t.FailNow()
+ }
+}
// Implements asserts that an object is implemented by the specified interface.
-//
-// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject")
+//
+// assert.Implements(t, (*MyInterface)(nil), new(MyObject))
func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
- if !assert.Implements(t, interfaceObject, object, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.Implements(t, interfaceObject, object, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// Implementsf asserts that an object is implemented by the specified interface.
+//
+// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) {
+ if !assert.Implementsf(t, interfaceObject, object, msg, args...) {
+ t.FailNow()
+ }
+}
// InDelta asserts that the two numerals are within delta of each other.
-//
+//
// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
- if !assert.InDelta(t, expected, actual, delta, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.InDelta(t, expected, actual, delta, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func InDeltaMapValues(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
+ if !assert.InDeltaMapValues(t, expected, actual, delta, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
+ if !assert.InDeltaMapValuesf(t, expected, actual, delta, msg, args...) {
+ t.FailNow()
+ }
+}
// InDeltaSlice is the same as InDelta, except it compares two slices.
func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
- if !assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// InDeltaSlicef is the same as InDelta, except it compares two slices.
+func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
+ if !assert.InDeltaSlicef(t, expected, actual, delta, msg, args...) {
+ t.FailNow()
+ }
}
+// InDeltaf asserts that the two numerals are within delta of each other.
+//
+// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
+ if !assert.InDeltaf(t, expected, actual, delta, msg, args...) {
+ t.FailNow()
+ }
+}
// InEpsilon asserts that expected and actual have a relative error less than epsilon
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
- if !assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
+func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
+ if !assert.InEpsilonSlice(t, expected, actual, epsilon, msgAndArgs...) {
+ t.FailNow()
+ }
+}
-// InEpsilonSlice is the same as InEpsilon, except it compares two slices.
-func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
- if !assert.InEpsilonSlice(t, expected, actual, delta, msgAndArgs...) {
- t.FailNow()
- }
+// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
+func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) {
+ if !assert.InEpsilonSlicef(t, expected, actual, epsilon, msg, args...) {
+ t.FailNow()
+ }
}
+// InEpsilonf asserts that expected and actual have a relative error less than epsilon
+//
+// Returns whether the assertion was successful (true) or not (false).
+func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) {
+ if !assert.InEpsilonf(t, expected, actual, epsilon, msg, args...) {
+ t.FailNow()
+ }
+}
// IsType asserts that the specified objects are of the same type.
func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
- if !assert.IsType(t, expectedType, object, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.IsType(t, expectedType, object, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// IsTypef asserts that the specified objects are of the same type.
+func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) {
+ if !assert.IsTypef(t, expectedType, object, msg, args...) {
+ t.FailNow()
+ }
+}
// JSONEq asserts that two JSON strings are equivalent.
-//
+//
// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) {
- if !assert.JSONEq(t, expected, actual, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.JSONEq(t, expected, actual, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// JSONEqf asserts that two JSON strings are equivalent.
+//
+// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) {
+ if !assert.JSONEqf(t, expected, actual, msg, args...) {
+ t.FailNow()
+ }
+}
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
-//
-// assert.Len(t, mySlice, 3, "The size of slice is not 3")
-//
+//
+// assert.Len(t, mySlice, 3)
+//
// Returns whether the assertion was successful (true) or not (false).
func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) {
- if !assert.Len(t, object, length, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.Len(t, object, length, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// Lenf asserts that the specified object has specific length.
+// Lenf also fails if the object has a type that len() not accept.
+//
+// assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) {
+ if !assert.Lenf(t, object, length, msg, args...) {
+ t.FailNow()
+ }
+}
// Nil asserts that the specified object is nil.
-//
-// assert.Nil(t, err, "err should be nothing")
-//
+//
+// assert.Nil(t, err)
+//
// Returns whether the assertion was successful (true) or not (false).
func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
- if !assert.Nil(t, object, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.Nil(t, object, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// Nilf asserts that the specified object is nil.
+//
+// assert.Nilf(t, err, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) {
+ if !assert.Nilf(t, object, msg, args...) {
+ t.FailNow()
+ }
+}
// NoError asserts that a function returned no error (i.e. `nil`).
-//
+//
// actualObj, err := SomeFunction()
// if assert.NoError(t, err) {
-// assert.Equal(t, actualObj, expectedObj)
+// assert.Equal(t, expectedObj, actualObj)
// }
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func NoError(t TestingT, err error, msgAndArgs ...interface{}) {
- if !assert.NoError(t, err, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.NoError(t, err, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// NoErrorf asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.NoErrorf(t, err, "error message %s", "formatted") {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NoErrorf(t TestingT, err error, msg string, args ...interface{}) {
+ if !assert.NoErrorf(t, err, msg, args...) {
+ t.FailNow()
+ }
+}
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
-//
-// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
-// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
-// assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
-//
+//
+// assert.NotContains(t, "Hello World", "Earth")
+// assert.NotContains(t, ["Hello", "World"], "Earth")
+// assert.NotContains(t, {"Hello": "World"}, "Earth")
+//
// Returns whether the assertion was successful (true) or not (false).
func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) {
- if !assert.NotContains(t, s, contains, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.NotContains(t, s, contains, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
+// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
+// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) {
+ if !assert.NotContainsf(t, s, contains, msg, args...) {
+ t.FailNow()
+ }
+}
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
-//
+//
// if assert.NotEmpty(t, obj) {
// assert.Equal(t, "two", obj[1])
// }
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
- if !assert.NotEmpty(t, object, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.NotEmpty(t, object, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
+// assert.Equal(t, "two", obj[1])
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) {
+ if !assert.NotEmptyf(t, object, msg, args...) {
+ t.FailNow()
+ }
+}
// NotEqual asserts that the specified values are NOT equal.
-//
-// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal")
-//
+//
+// assert.NotEqual(t, obj1, obj2)
+//
// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- if !assert.NotEqual(t, expected, actual, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.NotEqual(t, expected, actual, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// NotEqualf asserts that the specified values are NOT equal.
+//
+// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ if !assert.NotEqualf(t, expected, actual, msg, args...) {
+ t.FailNow()
+ }
+}
// NotNil asserts that the specified object is not nil.
-//
-// assert.NotNil(t, err, "err should be something")
-//
+//
+// assert.NotNil(t, err)
+//
// Returns whether the assertion was successful (true) or not (false).
func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
- if !assert.NotNil(t, object, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.NotNil(t, object, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// NotNilf asserts that the specified object is not nil.
+//
+// assert.NotNilf(t, err, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) {
+ if !assert.NotNilf(t, object, msg, args...) {
+ t.FailNow()
+ }
+}
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
-//
-// assert.NotPanics(t, func(){
-// RemainCalm()
-// }, "Calling RemainCalm() should NOT panic")
-//
+//
+// assert.NotPanics(t, func(){ RemainCalm() })
+//
// Returns whether the assertion was successful (true) or not (false).
func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
- if !assert.NotPanics(t, f, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.NotPanics(t, f, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) {
+ if !assert.NotPanicsf(t, f, msg, args...) {
+ t.FailNow()
+ }
+}
// NotRegexp asserts that a specified regexp does not match a string.
-//
+//
// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
// assert.NotRegexp(t, "^start", "it's not starting")
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) {
- if !assert.NotRegexp(t, rx, str, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.NotRegexp(t, rx, str, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// NotRegexpf asserts that a specified regexp does not match a string.
+//
+// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) {
+ if !assert.NotRegexpf(t, rx, str, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// NotSubset asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) {
+ if !assert.NotSubset(t, list, subset, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// NotSubsetf asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) {
+ if !assert.NotSubsetf(t, list, subset, msg, args...) {
+ t.FailNow()
+ }
+}
// NotZero asserts that i is not the zero value for its type and returns the truth.
func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
- if !assert.NotZero(t, i, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.NotZero(t, i, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// NotZerof asserts that i is not the zero value for its type and returns the truth.
+func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) {
+ if !assert.NotZerof(t, i, msg, args...) {
+ t.FailNow()
+ }
+}
// Panics asserts that the code inside the specified PanicTestFunc panics.
-//
-// assert.Panics(t, func(){
-// GoCrazy()
-// }, "Calling GoCrazy() should panic")
-//
+//
+// assert.Panics(t, func(){ GoCrazy() })
+//
// Returns whether the assertion was successful (true) or not (false).
func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
- if !assert.Panics(t, f, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.Panics(t, f, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
+//
+// Returns whether the assertion was successful (true) or not (false).
+func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
+ if !assert.PanicsWithValue(t, expected, f, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) {
+ if !assert.PanicsWithValuef(t, expected, f, msg, args...) {
+ t.FailNow()
+ }
+}
+
+// Panicsf asserts that the code inside the specified PanicTestFunc panics.
+//
+// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) {
+ if !assert.Panicsf(t, f, msg, args...) {
+ t.FailNow()
+ }
+}
// Regexp asserts that a specified regexp matches a string.
-//
+//
// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
// assert.Regexp(t, "start...$", "it's not starting")
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) {
- if !assert.Regexp(t, rx, str, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.Regexp(t, rx, str, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Regexpf asserts that a specified regexp matches a string.
+//
+// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) {
+ if !assert.Regexpf(t, rx, str, msg, args...) {
+ t.FailNow()
+ }
}
+// Subset asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) {
+ if !assert.Subset(t, list, subset, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Subsetf asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) {
+ if !assert.Subsetf(t, list, subset, msg, args...) {
+ t.FailNow()
+ }
+}
// True asserts that the specified value is true.
-//
-// assert.True(t, myBool, "myBool should be true")
-//
+//
+// assert.True(t, myBool)
+//
// Returns whether the assertion was successful (true) or not (false).
func True(t TestingT, value bool, msgAndArgs ...interface{}) {
- if !assert.True(t, value, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.True(t, value, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// Truef asserts that the specified value is true.
+//
+// assert.Truef(t, myBool, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Truef(t TestingT, value bool, msg string, args ...interface{}) {
+ if !assert.Truef(t, value, msg, args...) {
+ t.FailNow()
+ }
+}
// WithinDuration asserts that the two times are within duration delta of each other.
-//
-// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
-//
+//
+// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
+//
// Returns whether the assertion was successful (true) or not (false).
func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) {
- if !assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) {
+ t.FailNow()
+ }
}
+// WithinDurationf asserts that the two times are within duration delta of each other.
+//
+// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) {
+ if !assert.WithinDurationf(t, expected, actual, delta, msg, args...) {
+ t.FailNow()
+ }
+}
// Zero asserts that i is the zero value for its type and returns the truth.
func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
- if !assert.Zero(t, i, msgAndArgs...) {
- t.FailNow()
- }
+ if !assert.Zero(t, i, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Zerof asserts that i is the zero value for its type and returns the truth.
+func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) {
+ if !assert.Zerof(t, i, msg, args...) {
+ t.FailNow()
+ }
}
diff --git a/vendor/github.com/stretchr/testify/require/require.go.tmpl b/vendor/github.com/stretchr/testify/require/require.go.tmpl
index ab1b1e9fd..d2c38f6f2 100644
--- a/vendor/github.com/stretchr/testify/require/require.go.tmpl
+++ b/vendor/github.com/stretchr/testify/require/require.go.tmpl
@@ -1,6 +1,6 @@
{{.Comment}}
func {{.DocInfo.Name}}(t TestingT, {{.Params}}) {
- if !assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) {
- t.FailNow()
- }
+ if !assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) {
+ t.FailNow()
+ }
}
diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go
index 58324f105..769408503 100644
--- a/vendor/github.com/stretchr/testify/require/require_forward.go
+++ b/vendor/github.com/stretchr/testify/require/require_forward.go
@@ -1,388 +1,799 @@
/*
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
* THIS FILE MUST NOT BE EDITED BY HAND
-*/
+ */
package require
import (
-
assert "github.com/stretchr/testify/assert"
http "net/http"
url "net/url"
time "time"
)
-
// Condition uses a Comparison to assert a complex condition.
func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{}) {
Condition(a.t, comp, msgAndArgs...)
}
+// Conditionf uses a Comparison to assert a complex condition.
+func (a *Assertions) Conditionf(comp assert.Comparison, msg string, args ...interface{}) {
+ Conditionf(a.t, comp, msg, args...)
+}
// Contains asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
-//
-// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'")
-// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
-// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
-//
+//
+// a.Contains("Hello World", "World")
+// a.Contains(["Hello", "World"], "World")
+// a.Contains({"Hello": "World"}, "Hello")
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) {
Contains(a.t, s, contains, msgAndArgs...)
}
+// Containsf asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// a.Containsf("Hello World", "World", "error message %s", "formatted")
+// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted")
+// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) {
+ Containsf(a.t, s, contains, msg, args...)
+}
+
+// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) {
+ DirExists(a.t, path, msgAndArgs...)
+}
+
+// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) {
+ DirExistsf(a.t, path, msg, args...)
+}
+
+// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) {
+ ElementsMatch(a.t, listA, listB, msgAndArgs...)
+}
+
+// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted"))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) {
+ ElementsMatchf(a.t, listA, listB, msg, args...)
+}
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
// a slice or a channel with len == 0.
-//
+//
// a.Empty(obj)
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) {
Empty(a.t, object, msgAndArgs...)
}
+// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// a.Emptyf(obj, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) {
+ Emptyf(a.t, object, msg, args...)
+}
// Equal asserts that two objects are equal.
-//
-// a.Equal(123, 123, "123 and 123 should be equal")
-//
+//
+// a.Equal(123, 123)
+//
// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
Equal(a.t, expected, actual, msgAndArgs...)
}
-
// EqualError asserts that a function returned an error (i.e. not `nil`)
// and that it is equal to the provided error.
-//
+//
// actualObj, err := SomeFunction()
-// if assert.Error(t, err, "An error was expected") {
-// assert.Equal(t, err, expectedError)
-// }
-//
+// a.EqualError(err, expectedErrorString)
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) {
EqualError(a.t, theError, errString, msgAndArgs...)
}
+// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) {
+ EqualErrorf(a.t, theError, errString, msg, args...)
+}
// EqualValues asserts that two objects are equal or convertable to the same types
// and equal.
-//
-// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal")
-//
+//
+// a.EqualValues(uint32(123), int32(123))
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
EqualValues(a.t, expected, actual, msgAndArgs...)
}
+// EqualValuesf asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ EqualValuesf(a.t, expected, actual, msg, args...)
+}
+
+// Equalf asserts that two objects are equal.
+//
+// a.Equalf(123, 123, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ Equalf(a.t, expected, actual, msg, args...)
+}
// Error asserts that a function returned an error (i.e. not `nil`).
-//
+//
// actualObj, err := SomeFunction()
-// if a.Error(err, "An error was expected") {
-// assert.Equal(t, err, expectedError)
+// if a.Error(err) {
+// assert.Equal(t, expectedError, err)
// }
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Error(err error, msgAndArgs ...interface{}) {
Error(a.t, err, msgAndArgs...)
}
+// Errorf asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.Errorf(err, "error message %s", "formatted") {
+// assert.Equal(t, expectedErrorf, err)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Errorf(err error, msg string, args ...interface{}) {
+ Errorf(a.t, err, msg, args...)
+}
-// Exactly asserts that two objects are equal is value and type.
-//
-// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal")
-//
+// Exactly asserts that two objects are equal in value and type.
+//
+// a.Exactly(int32(123), int64(123))
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
Exactly(a.t, expected, actual, msgAndArgs...)
}
+// Exactlyf asserts that two objects are equal in value and type.
+//
+// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ Exactlyf(a.t, expected, actual, msg, args...)
+}
// Fail reports a failure through
func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) {
Fail(a.t, failureMessage, msgAndArgs...)
}
-
// FailNow fails test
func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) {
FailNow(a.t, failureMessage, msgAndArgs...)
}
+// FailNowf fails test
+func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) {
+ FailNowf(a.t, failureMessage, msg, args...)
+}
+
+// Failf reports a failure through
+func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) {
+ Failf(a.t, failureMessage, msg, args...)
+}
// False asserts that the specified value is false.
-//
-// a.False(myBool, "myBool should be false")
-//
+//
+// a.False(myBool)
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) False(value bool, msgAndArgs ...interface{}) {
False(a.t, value, msgAndArgs...)
}
+// Falsef asserts that the specified value is false.
+//
+// a.Falsef(myBool, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) {
+ Falsef(a.t, value, msg, args...)
+}
+
+// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) {
+ FileExists(a.t, path, msgAndArgs...)
+}
+
+// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) {
+ FileExistsf(a.t, path, msg, args...)
+}
// HTTPBodyContains asserts that a specified handler returns a
// body that contains a string.
-//
+//
// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
-//
+//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
- HTTPBodyContains(a.t, handler, method, url, values, str)
+func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) {
+ HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...)
}
+// HTTPBodyContainsf asserts that a specified handler returns a
+// body that contains a string.
+//
+// a.HTTPBodyContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) {
+ HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...)
+}
// HTTPBodyNotContains asserts that a specified handler returns a
// body that does not contain a string.
-//
+//
// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
-//
+//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
- HTTPBodyNotContains(a.t, handler, method, url, values, str)
+func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) {
+ HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...)
}
+// HTTPBodyNotContainsf asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// a.HTTPBodyNotContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) {
+ HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...)
+}
// HTTPError asserts that a specified handler returns an error status code.
-//
+//
// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
+//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) {
- HTTPError(a.t, handler, method, url, values)
+func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) {
+ HTTPError(a.t, handler, method, url, values, msgAndArgs...)
}
+// HTTPErrorf asserts that a specified handler returns an error status code.
+//
+// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
+ HTTPErrorf(a.t, handler, method, url, values, msg, args...)
+}
// HTTPRedirect asserts that a specified handler returns a redirect status code.
-//
+//
// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
+//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) {
- HTTPRedirect(a.t, handler, method, url, values)
+func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) {
+ HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...)
}
+// HTTPRedirectf asserts that a specified handler returns a redirect status code.
+//
+// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
+ HTTPRedirectf(a.t, handler, method, url, values, msg, args...)
+}
// HTTPSuccess asserts that a specified handler returns a success status code.
-//
+//
// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
-//
+//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) {
- HTTPSuccess(a.t, handler, method, url, values)
+func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) {
+ HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...)
}
+// HTTPSuccessf asserts that a specified handler returns a success status code.
+//
+// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
+ HTTPSuccessf(a.t, handler, method, url, values, msg, args...)
+}
// Implements asserts that an object is implemented by the specified interface.
-//
-// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject")
+//
+// a.Implements((*MyInterface)(nil), new(MyObject))
func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
Implements(a.t, interfaceObject, object, msgAndArgs...)
}
+// Implementsf asserts that an object is implemented by the specified interface.
+//
+// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) {
+ Implementsf(a.t, interfaceObject, object, msg, args...)
+}
// InDelta asserts that the two numerals are within delta of each other.
-//
+//
// a.InDelta(math.Pi, (22 / 7.0), 0.01)
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
InDelta(a.t, expected, actual, delta, msgAndArgs...)
}
+// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
+ InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
+ InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...)
+}
// InDeltaSlice is the same as InDelta, except it compares two slices.
func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
}
+// InDeltaSlicef is the same as InDelta, except it compares two slices.
+func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
+ InDeltaSlicef(a.t, expected, actual, delta, msg, args...)
+}
+
+// InDeltaf asserts that the two numerals are within delta of each other.
+//
+// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) {
+ InDeltaf(a.t, expected, actual, delta, msg, args...)
+}
// InEpsilon asserts that expected and actual have a relative error less than epsilon
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
}
+// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
+func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
+ InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...)
+}
-// InEpsilonSlice is the same as InEpsilon, except it compares two slices.
-func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
- InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...)
+// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
+func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) {
+ InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...)
}
+// InEpsilonf asserts that expected and actual have a relative error less than epsilon
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) {
+ InEpsilonf(a.t, expected, actual, epsilon, msg, args...)
+}
// IsType asserts that the specified objects are of the same type.
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
IsType(a.t, expectedType, object, msgAndArgs...)
}
+// IsTypef asserts that the specified objects are of the same type.
+func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) {
+ IsTypef(a.t, expectedType, object, msg, args...)
+}
// JSONEq asserts that two JSON strings are equivalent.
-//
+//
// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) {
JSONEq(a.t, expected, actual, msgAndArgs...)
}
+// JSONEqf asserts that two JSON strings are equivalent.
+//
+// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) {
+ JSONEqf(a.t, expected, actual, msg, args...)
+}
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
-//
-// a.Len(mySlice, 3, "The size of slice is not 3")
-//
+//
+// a.Len(mySlice, 3)
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) {
Len(a.t, object, length, msgAndArgs...)
}
+// Lenf asserts that the specified object has specific length.
+// Lenf also fails if the object has a type that len() not accept.
+//
+// a.Lenf(mySlice, 3, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) {
+ Lenf(a.t, object, length, msg, args...)
+}
// Nil asserts that the specified object is nil.
-//
-// a.Nil(err, "err should be nothing")
-//
+//
+// a.Nil(err)
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) {
Nil(a.t, object, msgAndArgs...)
}
+// Nilf asserts that the specified object is nil.
+//
+// a.Nilf(err, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) {
+ Nilf(a.t, object, msg, args...)
+}
// NoError asserts that a function returned no error (i.e. `nil`).
-//
+//
// actualObj, err := SomeFunction()
// if a.NoError(err) {
-// assert.Equal(t, actualObj, expectedObj)
+// assert.Equal(t, expectedObj, actualObj)
// }
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) {
NoError(a.t, err, msgAndArgs...)
}
+// NoErrorf asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.NoErrorf(err, "error message %s", "formatted") {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) {
+ NoErrorf(a.t, err, msg, args...)
+}
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
-//
-// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
-// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
-// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
-//
+//
+// a.NotContains("Hello World", "Earth")
+// a.NotContains(["Hello", "World"], "Earth")
+// a.NotContains({"Hello": "World"}, "Earth")
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) {
NotContains(a.t, s, contains, msgAndArgs...)
}
+// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted")
+// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted")
+// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) {
+ NotContainsf(a.t, s, contains, msg, args...)
+}
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
-//
+//
// if a.NotEmpty(obj) {
// assert.Equal(t, "two", obj[1])
// }
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) {
NotEmpty(a.t, object, msgAndArgs...)
}
+// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if a.NotEmptyf(obj, "error message %s", "formatted") {
+// assert.Equal(t, "two", obj[1])
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) {
+ NotEmptyf(a.t, object, msg, args...)
+}
// NotEqual asserts that the specified values are NOT equal.
-//
-// a.NotEqual(obj1, obj2, "two objects shouldn't be equal")
-//
+//
+// a.NotEqual(obj1, obj2)
+//
// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
NotEqual(a.t, expected, actual, msgAndArgs...)
}
+// NotEqualf asserts that the specified values are NOT equal.
+//
+// a.NotEqualf(obj1, obj2, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ NotEqualf(a.t, expected, actual, msg, args...)
+}
// NotNil asserts that the specified object is not nil.
-//
-// a.NotNil(err, "err should be something")
-//
+//
+// a.NotNil(err)
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) {
NotNil(a.t, object, msgAndArgs...)
}
+// NotNilf asserts that the specified object is not nil.
+//
+// a.NotNilf(err, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) {
+ NotNilf(a.t, object, msg, args...)
+}
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
-//
-// a.NotPanics(func(){
-// RemainCalm()
-// }, "Calling RemainCalm() should NOT panic")
-//
+//
+// a.NotPanics(func(){ RemainCalm() })
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}) {
NotPanics(a.t, f, msgAndArgs...)
}
+// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotPanicsf(f assert.PanicTestFunc, msg string, args ...interface{}) {
+ NotPanicsf(a.t, f, msg, args...)
+}
// NotRegexp asserts that a specified regexp does not match a string.
-//
+//
// a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
// a.NotRegexp("^start", "it's not starting")
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) {
NotRegexp(a.t, rx, str, msgAndArgs...)
}
+// NotRegexpf asserts that a specified regexp does not match a string.
+//
+// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) {
+ NotRegexpf(a.t, rx, str, msg, args...)
+}
+
+// NotSubset asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) {
+ NotSubset(a.t, list, subset, msgAndArgs...)
+}
+
+// NotSubsetf asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) {
+ NotSubsetf(a.t, list, subset, msg, args...)
+}
// NotZero asserts that i is not the zero value for its type and returns the truth.
func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) {
NotZero(a.t, i, msgAndArgs...)
}
+// NotZerof asserts that i is not the zero value for its type and returns the truth.
+func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) {
+ NotZerof(a.t, i, msg, args...)
+}
// Panics asserts that the code inside the specified PanicTestFunc panics.
-//
-// a.Panics(func(){
-// GoCrazy()
-// }, "Calling GoCrazy() should panic")
-//
+//
+// a.Panics(func(){ GoCrazy() })
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) {
Panics(a.t, f, msgAndArgs...)
}
+// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// a.PanicsWithValue("crazy error", func(){ GoCrazy() })
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) PanicsWithValue(expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
+ PanicsWithValue(a.t, expected, f, msgAndArgs...)
+}
+
+// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) PanicsWithValuef(expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) {
+ PanicsWithValuef(a.t, expected, f, msg, args...)
+}
+
+// Panicsf asserts that the code inside the specified PanicTestFunc panics.
+//
+// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Panicsf(f assert.PanicTestFunc, msg string, args ...interface{}) {
+ Panicsf(a.t, f, msg, args...)
+}
// Regexp asserts that a specified regexp matches a string.
-//
+//
// a.Regexp(regexp.MustCompile("start"), "it's starting")
// a.Regexp("start...$", "it's not starting")
-//
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) {
Regexp(a.t, rx, str, msgAndArgs...)
}
+// Regexpf asserts that a specified regexp matches a string.
+//
+// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) {
+ Regexpf(a.t, rx, str, msg, args...)
+}
+
+// Subset asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) {
+ Subset(a.t, list, subset, msgAndArgs...)
+}
+
+// Subsetf asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) {
+ Subsetf(a.t, list, subset, msg, args...)
+}
// True asserts that the specified value is true.
-//
-// a.True(myBool, "myBool should be true")
-//
+//
+// a.True(myBool)
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) True(value bool, msgAndArgs ...interface{}) {
True(a.t, value, msgAndArgs...)
}
+// Truef asserts that the specified value is true.
+//
+// a.Truef(myBool, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Truef(value bool, msg string, args ...interface{}) {
+ Truef(a.t, value, msg, args...)
+}
// WithinDuration asserts that the two times are within duration delta of each other.
-//
-// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
-//
+//
+// a.WithinDuration(time.Now(), time.Now(), 10*time.Second)
+//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) {
WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
}
+// WithinDurationf asserts that the two times are within duration delta of each other.
+//
+// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) {
+ WithinDurationf(a.t, expected, actual, delta, msg, args...)
+}
// Zero asserts that i is the zero value for its type and returns the truth.
func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) {
Zero(a.t, i, msgAndArgs...)
}
+
+// Zerof asserts that i is the zero value for its type and returns the truth.
+func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) {
+ Zerof(a.t, i, msg, args...)
+}
diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go
index 41147562d..e404f016d 100644
--- a/vendor/github.com/stretchr/testify/require/requirements.go
+++ b/vendor/github.com/stretchr/testify/require/requirements.go
@@ -6,4 +6,4 @@ type TestingT interface {
FailNow()
}
-//go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl
+//go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl -include-format-funcs
diff --git a/vendor/github.com/stretchr/testify/suite/interfaces.go b/vendor/github.com/stretchr/testify/suite/interfaces.go
index 20969472c..b37cb0409 100644
--- a/vendor/github.com/stretchr/testify/suite/interfaces.go
+++ b/vendor/github.com/stretchr/testify/suite/interfaces.go
@@ -32,3 +32,15 @@ type TearDownAllSuite interface {
type TearDownTestSuite interface {
TearDownTest()
}
+
+// BeforeTest has a function to be executed right before the test
+// starts and receives the suite and test names as input
+type BeforeTest interface {
+ BeforeTest(suiteName, testName string)
+}
+
+// AfterTest has a function to be executed right after the test
+// finishes and receives the suite and test names as input
+type AfterTest interface {
+ AfterTest(suiteName, testName string)
+}
diff --git a/vendor/github.com/stretchr/testify/suite/suite.go b/vendor/github.com/stretchr/testify/suite/suite.go
index db7413000..e20afbc21 100644
--- a/vendor/github.com/stretchr/testify/suite/suite.go
+++ b/vendor/github.com/stretchr/testify/suite/suite.go
@@ -12,6 +12,7 @@ import (
"github.com/stretchr/testify/require"
)
+var allTestsFilter = func(_, _ string) (bool, error) { return true, nil }
var matchMethod = flag.String("testify.m", "", "regular expression to select tests of the testify suite to run")
// Suite is a basic testing suite with methods for storing and
@@ -86,7 +87,13 @@ func Run(t *testing.T, suite TestingSuite) {
if setupTestSuite, ok := suite.(SetupTestSuite); ok {
setupTestSuite.SetupTest()
}
+ if beforeTestSuite, ok := suite.(BeforeTest); ok {
+ beforeTestSuite.BeforeTest(methodFinder.Elem().Name(), method.Name)
+ }
defer func() {
+ if afterTestSuite, ok := suite.(AfterTest); ok {
+ afterTestSuite.AfterTest(methodFinder.Elem().Name(), method.Name)
+ }
if tearDownTestSuite, ok := suite.(TearDownTestSuite); ok {
tearDownTestSuite.TearDownTest()
}
@@ -98,10 +105,20 @@ func Run(t *testing.T, suite TestingSuite) {
tests = append(tests, test)
}
}
+ runTests(t, tests)
+}
+
+func runTests(t testing.TB, tests []testing.InternalTest) {
+ r, ok := t.(runner)
+ if !ok { // backwards compatibility with Go 1.6 and below
+ if !testing.RunTests(allTestsFilter, tests) {
+ t.Fail()
+ }
+ return
+ }
- if !testing.RunTests(func(_, _ string) (bool, error) { return true, nil },
- tests) {
- t.Fail()
+ for _, test := range tests {
+ r.Run(test.Name, test.F)
}
}
@@ -113,3 +130,7 @@ func methodFilter(name string) (bool, error) {
}
return regexp.MatchString(*matchMethod, name)
}
+
+type runner interface {
+ Run(name string, f func(t *testing.T)) bool
+}
diff --git a/vendor/github.com/stretchr/testify/suite/suite_test.go b/vendor/github.com/stretchr/testify/suite/suite_test.go
index c7c4e88f7..b75fa4ac1 100644
--- a/vendor/github.com/stretchr/testify/suite/suite_test.go
+++ b/vendor/github.com/stretchr/testify/suite/suite_test.go
@@ -5,8 +5,10 @@ import (
"io/ioutil"
"os"
"testing"
+ "time"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
// SuiteRequireTwice is intended to test the usage of suite.Require in two
@@ -18,7 +20,7 @@ type SuiteRequireTwice struct{ Suite }
// A regression would result on these tests panicking rather than failing.
func TestSuiteRequireTwice(t *testing.T) {
ok := testing.RunTests(
- func(_, _ string) (bool, error) { return true, nil },
+ allTestsFilter,
[]testing.InternalTest{{
Name: "TestSuiteRequireTwice",
F: func(t *testing.T) {
@@ -58,6 +60,15 @@ type SuiteTester struct {
TestOneRunCount int
TestTwoRunCount int
NonTestMethodRunCount int
+
+ SuiteNameBefore []string
+ TestNameBefore []string
+
+ SuiteNameAfter []string
+ TestNameAfter []string
+
+ TimeBefore []time.Time
+ TimeAfter []time.Time
}
type SuiteSkipTester struct {
@@ -75,6 +86,18 @@ func (suite *SuiteTester) SetupSuite() {
suite.SetupSuiteRunCount++
}
+func (suite *SuiteTester) BeforeTest(suiteName, testName string) {
+ suite.SuiteNameBefore = append(suite.SuiteNameBefore, suiteName)
+ suite.TestNameBefore = append(suite.TestNameBefore, testName)
+ suite.TimeBefore = append(suite.TimeBefore, time.Now())
+}
+
+func (suite *SuiteTester) AfterTest(suiteName, testName string) {
+ suite.SuiteNameAfter = append(suite.SuiteNameAfter, suiteName)
+ suite.TestNameAfter = append(suite.TestNameAfter, testName)
+ suite.TimeAfter = append(suite.TimeAfter, time.Now())
+}
+
func (suite *SuiteSkipTester) SetupSuite() {
suite.SetupSuiteRunCount++
suite.T().Skip()
@@ -145,6 +168,35 @@ func TestRunSuite(t *testing.T) {
assert.Equal(t, suiteTester.SetupSuiteRunCount, 1)
assert.Equal(t, suiteTester.TearDownSuiteRunCount, 1)
+ assert.Equal(t, len(suiteTester.SuiteNameAfter), 3)
+ assert.Equal(t, len(suiteTester.SuiteNameBefore), 3)
+ assert.Equal(t, len(suiteTester.TestNameAfter), 3)
+ assert.Equal(t, len(suiteTester.TestNameBefore), 3)
+
+ assert.Contains(t, suiteTester.TestNameAfter, "TestOne")
+ assert.Contains(t, suiteTester.TestNameAfter, "TestTwo")
+ assert.Contains(t, suiteTester.TestNameAfter, "TestSkip")
+
+ assert.Contains(t, suiteTester.TestNameBefore, "TestOne")
+ assert.Contains(t, suiteTester.TestNameBefore, "TestTwo")
+ assert.Contains(t, suiteTester.TestNameBefore, "TestSkip")
+
+ for _, suiteName := range suiteTester.SuiteNameAfter {
+ assert.Equal(t, "SuiteTester", suiteName)
+ }
+
+ for _, suiteName := range suiteTester.SuiteNameBefore {
+ assert.Equal(t, "SuiteTester", suiteName)
+ }
+
+ for _, when := range suiteTester.TimeAfter {
+ assert.False(t, when.IsZero())
+ }
+
+ for _, when := range suiteTester.TimeBefore {
+ assert.False(t, when.IsZero())
+ }
+
// There are three test methods (TestOne, TestTwo, and TestSkip), so
// the SetupTest and TearDownTest methods (which should be run once for
// each test) should have been run three times.
@@ -216,16 +268,19 @@ func (sc *StdoutCapture) StopCapture() (string, error) {
}
func TestSuiteLogging(t *testing.T) {
- testT := testing.T{}
-
suiteLoggingTester := new(SuiteLoggingTester)
-
capture := StdoutCapture{}
+ internalTest := testing.InternalTest{
+ Name: "SomeTest",
+ F: func(subT *testing.T) {
+ Run(subT, suiteLoggingTester)
+ },
+ }
capture.StartCapture()
- Run(&testT, suiteLoggingTester)
+ testing.RunTests(allTestsFilter, []testing.InternalTest{internalTest})
output, err := capture.StopCapture()
-
- assert.Nil(t, err, "Got an error trying to capture stdout!")
+ require.NoError(t, err, "Got an error trying to capture stdout and stderr!")
+ require.NotEmpty(t, output, "output content must not be empty")
// Failed tests' output is always printed
assert.Contains(t, output, "TESTLOGFAIL")
diff --git a/vendor/github.com/cpanato/html2text/.gitignore b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/.gitignore
index daf913b1b..00268614f 100644
--- a/vendor/github.com/cpanato/html2text/.gitignore
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/.gitignore
@@ -20,5 +20,3 @@ _cgo_export.*
_testmain.go
*.exe
-*.test
-*.prof
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/.travis.yml b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/.travis.yml
new file mode 100644
index 000000000..984e0736e
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+go:
+ - 1.5.4
+ - 1.6.3
+ - 1.7
+install:
+ - go get -v golang.org/x/tools/cmd/cover
+script:
+ - go test -v -tags=safe ./spew
+ - go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov
+after_success:
+ - go get -v github.com/mattn/goveralls
+ - export PATH=$PATH:$HOME/gopath/bin
+ - goveralls -coverprofile=profile.cov -service=travis-ci
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/LICENSE
index bb6733231..c83641619 100644
--- a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/LICENSE
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/LICENSE
@@ -1,6 +1,6 @@
ISC License
-Copyright (c) 2012-2013 Dave Collins <dave@davec.name>
+Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/README.md b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/README.md
new file mode 100644
index 000000000..262430449
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/README.md
@@ -0,0 +1,205 @@
+go-spew
+=======
+
+[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)]
+(https://travis-ci.org/davecgh/go-spew) [![ISC License]
+(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status]
+(https://img.shields.io/coveralls/davecgh/go-spew.svg)]
+(https://coveralls.io/r/davecgh/go-spew?branch=master)
+
+
+Go-spew implements a deep pretty printer for Go data structures to aid in
+debugging. A comprehensive suite of tests with 100% test coverage is provided
+to ensure proper functionality. See `test_coverage.txt` for the gocov coverage
+report. Go-spew is licensed under the liberal ISC license, so it may be used in
+open source or commercial projects.
+
+If you're interested in reading about how this package came to life and some
+of the challenges involved in providing a deep pretty printer, there is a blog
+post about it
+[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/).
+
+## Documentation
+
+[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)]
+(http://godoc.org/github.com/davecgh/go-spew/spew)
+
+Full `go doc` style documentation for the project can be viewed online without
+installing this package by using the excellent GoDoc site here:
+http://godoc.org/github.com/davecgh/go-spew/spew
+
+You can also view the documentation locally once the package is installed with
+the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to
+http://localhost:6060/pkg/github.com/davecgh/go-spew/spew
+
+## Installation
+
+```bash
+$ go get -u github.com/davecgh/go-spew/spew
+```
+
+## Quick Start
+
+Add this import line to the file you're working in:
+
+```Go
+import "github.com/davecgh/go-spew/spew"
+```
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+
+```Go
+spew.Dump(myVar1, myVar2, ...)
+spew.Fdump(someWriter, myVar1, myVar2, ...)
+str := spew.Sdump(myVar1, myVar2, ...)
+```
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most
+compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types
+and pointer addresses):
+
+```Go
+spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+```
+
+## Debugging a Web Application Example
+
+Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production.
+
+```Go
+package main
+
+import (
+ "fmt"
+ "html"
+ "net/http"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/html")
+ fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:])
+ fmt.Fprintf(w, "<!--\n" + html.EscapeString(spew.Sdump(w)) + "\n-->")
+}
+
+func main() {
+ http.HandleFunc("/", handler)
+ http.ListenAndServe(":8080", nil)
+}
+```
+
+## Sample Dump Output
+
+```
+(main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+ flag: (main.Flag) flagTwo,
+ data: (uintptr) <nil>
+ }),
+ ExportedField: (map[interface {}]interface {}) {
+ (string) "one": (bool) true
+ }
+}
+([]uint8) {
+ 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ 00000020 31 32 |12|
+}
+```
+
+## Sample Formatter Output
+
+Double pointer to a uint8:
+```
+ %v: <**>5
+ %+v: <**>(0xf8400420d0->0xf8400420c8)5
+ %#v: (**uint8)5
+ %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+```
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+```
+ %v: <*>{1 <*><shown>}
+ %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
+ %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
+ %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
+```
+
+## Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available via the
+spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+```
+* Indent
+ String to use for each indentation level for Dump functions.
+ It is a single space by default. A popular alternative is "\t".
+
+* MaxDepth
+ Maximum number of levels to descend into nested data structures.
+ There is no limit by default.
+
+* DisableMethods
+ Disables invocation of error and Stringer interface methods.
+ Method invocation is enabled by default.
+
+* DisablePointerMethods
+ Disables invocation of error and Stringer interface methods on types
+ which only accept pointer receivers from non-pointer variables. This option
+ relies on access to the unsafe package, so it will not have any effect when
+ running in environments without access to the unsafe package such as Google
+ App Engine or with the "safe" build tag specified.
+ Pointer method invocation is enabled by default.
+
+* DisablePointerAddresses
+ DisablePointerAddresses specifies whether to disable the printing of
+ pointer addresses. This is useful when diffing data structures in tests.
+
+* DisableCapacities
+ DisableCapacities specifies whether to disable the printing of capacities
+ for arrays, slices, maps and channels. This is useful when diffing data
+ structures in tests.
+
+* ContinueOnMethod
+ Enables recursion into types after invoking error and Stringer interface
+ methods. Recursion after method invocation is disabled by default.
+
+* SortKeys
+ Specifies map keys should be sorted before being printed. Use
+ this to have a more deterministic, diffable output. Note that
+ only native types (bool, int, uint, floats, uintptr and string)
+ and types which implement error or Stringer interfaces are supported,
+ with other types sorted according to the reflect.Value.String() output
+ which guarantees display stability. Natural map order is used by
+ default.
+
+* SpewKeys
+ SpewKeys specifies that, as a last resort attempt, map keys should be
+ spewed to strings and sorted by those strings. This is only considered
+ if SortKeys is true.
+
+```
+
+## Unsafe Package Dependency
+
+This package relies on the unsafe package to perform some of the more advanced
+features, however it also supports a "limited" mode which allows it to work in
+environments where the unsafe package is not available. By default, it will
+operate in this mode on Google App Engine and when compiled with GopherJS. The
+"safe" build tag may also be specified to force the package to build without
+using the unsafe package.
+
+## License
+
+Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License.
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/cov_report.sh b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/cov_report.sh
new file mode 100644
index 000000000..9579497e4
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/cov_report.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+# This script uses gocov to generate a test coverage report.
+# The gocov tool my be obtained with the following command:
+# go get github.com/axw/gocov/gocov
+#
+# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
+
+# Check for gocov.
+if ! type gocov >/dev/null 2>&1; then
+ echo >&2 "This script requires the gocov tool."
+ echo >&2 "You may obtain it with the following command:"
+ echo >&2 "go get github.com/axw/gocov/gocov"
+ exit 1
+fi
+
+# Only run the cgo tests if gcc is installed.
+if type gcc >/dev/null 2>&1; then
+ (cd spew && gocov test -tags testcgo | gocov report)
+else
+ (cd spew && gocov test | gocov report)
+fi
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypass.go
index d42a0bc4a..8a4a6589a 100644
--- a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypass.go
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2015 Dave Collins <dave@davec.name>
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
index e47a4e795..1fe3cf3d5 100644
--- a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2015 Dave Collins <dave@davec.name>
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/common.go
index 14f02dc15..7c519ff47 100644
--- a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/common.go
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/common_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/common_test.go
new file mode 100644
index 000000000..0f5ce47dc
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/common_test.go
@@ -0,0 +1,298 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew_test
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+// custom type to test Stinger interface on non-pointer receiver.
+type stringer string
+
+// String implements the Stringer interface for testing invocation of custom
+// stringers on types with non-pointer receivers.
+func (s stringer) String() string {
+ return "stringer " + string(s)
+}
+
+// custom type to test Stinger interface on pointer receiver.
+type pstringer string
+
+// String implements the Stringer interface for testing invocation of custom
+// stringers on types with only pointer receivers.
+func (s *pstringer) String() string {
+ return "stringer " + string(*s)
+}
+
+// xref1 and xref2 are cross referencing structs for testing circular reference
+// detection.
+type xref1 struct {
+ ps2 *xref2
+}
+type xref2 struct {
+ ps1 *xref1
+}
+
+// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular
+// reference for testing detection.
+type indirCir1 struct {
+ ps2 *indirCir2
+}
+type indirCir2 struct {
+ ps3 *indirCir3
+}
+type indirCir3 struct {
+ ps1 *indirCir1
+}
+
+// embed is used to test embedded structures.
+type embed struct {
+ a string
+}
+
+// embedwrap is used to test embedded structures.
+type embedwrap struct {
+ *embed
+ e *embed
+}
+
+// panicer is used to intentionally cause a panic for testing spew properly
+// handles them
+type panicer int
+
+func (p panicer) String() string {
+ panic("test panic")
+}
+
+// customError is used to test custom error interface invocation.
+type customError int
+
+func (e customError) Error() string {
+ return fmt.Sprintf("error: %d", int(e))
+}
+
+// stringizeWants converts a slice of wanted test output into a format suitable
+// for a test error message.
+func stringizeWants(wants []string) string {
+ s := ""
+ for i, want := range wants {
+ if i > 0 {
+ s += fmt.Sprintf("want%d: %s", i+1, want)
+ } else {
+ s += "want: " + want
+ }
+ }
+ return s
+}
+
+// testFailed returns whether or not a test failed by checking if the result
+// of the test is in the slice of wanted strings.
+func testFailed(result string, wants []string) bool {
+ for _, want := range wants {
+ if result == want {
+ return false
+ }
+ }
+ return true
+}
+
+type sortableStruct struct {
+ x int
+}
+
+func (ss sortableStruct) String() string {
+ return fmt.Sprintf("ss.%d", ss.x)
+}
+
+type unsortableStruct struct {
+ x int
+}
+
+type sortTestCase struct {
+ input []reflect.Value
+ expected []reflect.Value
+}
+
+func helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) {
+ getInterfaces := func(values []reflect.Value) []interface{} {
+ interfaces := []interface{}{}
+ for _, v := range values {
+ interfaces = append(interfaces, v.Interface())
+ }
+ return interfaces
+ }
+
+ for _, test := range tests {
+ spew.SortValues(test.input, cs)
+ // reflect.DeepEqual cannot really make sense of reflect.Value,
+ // probably because of all the pointer tricks. For instance,
+ // v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{}
+ // instead.
+ input := getInterfaces(test.input)
+ expected := getInterfaces(test.expected)
+ if !reflect.DeepEqual(input, expected) {
+ t.Errorf("Sort mismatch:\n %v != %v", input, expected)
+ }
+ }
+}
+
+// TestSortValues ensures the sort functionality for relect.Value based sorting
+// works as intended.
+func TestSortValues(t *testing.T) {
+ v := reflect.ValueOf
+
+ a := v("a")
+ b := v("b")
+ c := v("c")
+ embedA := v(embed{"a"})
+ embedB := v(embed{"b"})
+ embedC := v(embed{"c"})
+ tests := []sortTestCase{
+ // No values.
+ {
+ []reflect.Value{},
+ []reflect.Value{},
+ },
+ // Bools.
+ {
+ []reflect.Value{v(false), v(true), v(false)},
+ []reflect.Value{v(false), v(false), v(true)},
+ },
+ // Ints.
+ {
+ []reflect.Value{v(2), v(1), v(3)},
+ []reflect.Value{v(1), v(2), v(3)},
+ },
+ // Uints.
+ {
+ []reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))},
+ []reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))},
+ },
+ // Floats.
+ {
+ []reflect.Value{v(2.0), v(1.0), v(3.0)},
+ []reflect.Value{v(1.0), v(2.0), v(3.0)},
+ },
+ // Strings.
+ {
+ []reflect.Value{b, a, c},
+ []reflect.Value{a, b, c},
+ },
+ // Array
+ {
+ []reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})},
+ []reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})},
+ },
+ // Uintptrs.
+ {
+ []reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))},
+ []reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))},
+ },
+ // SortableStructs.
+ {
+ // Note: not sorted - DisableMethods is set.
+ []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
+ []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
+ },
+ // UnsortableStructs.
+ {
+ // Note: not sorted - SpewKeys is false.
+ []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
+ []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
+ },
+ // Invalid.
+ {
+ []reflect.Value{embedB, embedA, embedC},
+ []reflect.Value{embedB, embedA, embedC},
+ },
+ }
+ cs := spew.ConfigState{DisableMethods: true, SpewKeys: false}
+ helpTestSortValues(tests, &cs, t)
+}
+
+// TestSortValuesWithMethods ensures the sort functionality for relect.Value
+// based sorting works as intended when using string methods.
+func TestSortValuesWithMethods(t *testing.T) {
+ v := reflect.ValueOf
+
+ a := v("a")
+ b := v("b")
+ c := v("c")
+ tests := []sortTestCase{
+ // Ints.
+ {
+ []reflect.Value{v(2), v(1), v(3)},
+ []reflect.Value{v(1), v(2), v(3)},
+ },
+ // Strings.
+ {
+ []reflect.Value{b, a, c},
+ []reflect.Value{a, b, c},
+ },
+ // SortableStructs.
+ {
+ []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
+ []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},
+ },
+ // UnsortableStructs.
+ {
+ // Note: not sorted - SpewKeys is false.
+ []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
+ []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
+ },
+ }
+ cs := spew.ConfigState{DisableMethods: false, SpewKeys: false}
+ helpTestSortValues(tests, &cs, t)
+}
+
+// TestSortValuesWithSpew ensures the sort functionality for relect.Value
+// based sorting works as intended when using spew to stringify keys.
+func TestSortValuesWithSpew(t *testing.T) {
+ v := reflect.ValueOf
+
+ a := v("a")
+ b := v("b")
+ c := v("c")
+ tests := []sortTestCase{
+ // Ints.
+ {
+ []reflect.Value{v(2), v(1), v(3)},
+ []reflect.Value{v(1), v(2), v(3)},
+ },
+ // Strings.
+ {
+ []reflect.Value{b, a, c},
+ []reflect.Value{a, b, c},
+ },
+ // SortableStructs.
+ {
+ []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
+ []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},
+ },
+ // UnsortableStructs.
+ {
+ []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
+ []reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})},
+ },
+ }
+ cs := spew.ConfigState{DisableMethods: true, SpewKeys: true}
+ helpTestSortValues(tests, &cs, t)
+}
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/config.go
index 555282723..2e3d22f31 100644
--- a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/config.go
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -67,6 +67,15 @@ type ConfigState struct {
// Google App Engine or with the "safe" build tag specified.
DisablePointerMethods bool
+ // DisablePointerAddresses specifies whether to disable the printing of
+ // pointer addresses. This is useful when diffing data structures in tests.
+ DisablePointerAddresses bool
+
+ // DisableCapacities specifies whether to disable the printing of capacities
+ // for arrays, slices, maps and channels. This is useful when diffing
+ // data structures in tests.
+ DisableCapacities bool
+
// ContinueOnMethod specifies whether or not recursion should continue once
// a custom error or Stringer interface is invoked. The default, false,
// means it will print the results of invoking the custom error or Stringer
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/doc.go
index 5be0c4060..aacaac6f1 100644
--- a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/doc.go
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -91,6 +91,15 @@ The following configuration options are available:
which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
+ * DisablePointerAddresses
+ DisablePointerAddresses specifies whether to disable the printing of
+ pointer addresses. This is useful when diffing data structures in tests.
+
+ * DisableCapacities
+ DisableCapacities specifies whether to disable the printing of
+ capacities for arrays, slices, maps and channels. This is useful when
+ diffing data structures in tests.
+
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dump.go
index a0ff95e27..df1d582a7 100644
--- a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dump.go
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -129,7 +129,7 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
d.w.Write(closeParenBytes)
// Display pointer information.
- if len(pointerChain) > 0 {
+ if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
d.w.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
@@ -282,13 +282,13 @@ func (d *dumpState) dump(v reflect.Value) {
case reflect.Map, reflect.String:
valueLen = v.Len()
}
- if valueLen != 0 || valueCap != 0 {
+ if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
d.w.Write(openParenBytes)
if valueLen != 0 {
d.w.Write(lenEqualsBytes)
printInt(d.w, int64(valueLen), 10)
}
- if valueCap != 0 {
+ if !d.cs.DisableCapacities && valueCap != 0 {
if valueLen != 0 {
d.w.Write(spaceBytes)
}
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dump_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dump_test.go
new file mode 100644
index 000000000..5aad9c7af
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dump_test.go
@@ -0,0 +1,1042 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Test Summary:
+NOTE: For each test, a nil pointer, a single pointer and double pointer to the
+base test element are also tested to ensure proper indirection across all types.
+
+- Max int8, int16, int32, int64, int
+- Max uint8, uint16, uint32, uint64, uint
+- Boolean true and false
+- Standard complex64 and complex128
+- Array containing standard ints
+- Array containing type with custom formatter on pointer receiver only
+- Array containing interfaces
+- Array containing bytes
+- Slice containing standard float32 values
+- Slice containing type with custom formatter on pointer receiver only
+- Slice containing interfaces
+- Slice containing bytes
+- Nil slice
+- Standard string
+- Nil interface
+- Sub-interface
+- Map with string keys and int vals
+- Map with custom formatter type on pointer receiver only keys and vals
+- Map with interface keys and values
+- Map with nil interface value
+- Struct with primitives
+- Struct that contains another struct
+- Struct that contains custom type with Stringer pointer interface via both
+ exported and unexported fields
+- Struct that contains embedded struct and field to same struct
+- Uintptr to 0 (null pointer)
+- Uintptr address of real variable
+- Unsafe.Pointer to 0 (null pointer)
+- Unsafe.Pointer to address of real variable
+- Nil channel
+- Standard int channel
+- Function with no params and no returns
+- Function with param and no returns
+- Function with multiple params and multiple returns
+- Struct that is circular through self referencing
+- Structs that are circular through cross referencing
+- Structs that are indirectly circular
+- Type that panics in its Stringer interface
+*/
+
+package spew_test
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+ "unsafe"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+// dumpTest is used to describe a test to be performed against the Dump method.
+type dumpTest struct {
+ in interface{}
+ wants []string
+}
+
+// dumpTests houses all of the tests to be performed against the Dump method.
+var dumpTests = make([]dumpTest, 0)
+
+// addDumpTest is a helper method to append the passed input and desired result
+// to dumpTests
+func addDumpTest(in interface{}, wants ...string) {
+ test := dumpTest{in, wants}
+ dumpTests = append(dumpTests, test)
+}
+
+func addIntDumpTests() {
+ // Max int8.
+ v := int8(127)
+ nv := (*int8)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "int8"
+ vs := "127"
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+
+ // Max int16.
+ v2 := int16(32767)
+ nv2 := (*int16)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "int16"
+ v2s := "32767"
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+ addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
+
+ // Max int32.
+ v3 := int32(2147483647)
+ nv3 := (*int32)(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "int32"
+ v3s := "2147483647"
+ addDumpTest(v3, "("+v3t+") "+v3s+"\n")
+ addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
+ addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
+ addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
+
+ // Max int64.
+ v4 := int64(9223372036854775807)
+ nv4 := (*int64)(nil)
+ pv4 := &v4
+ v4Addr := fmt.Sprintf("%p", pv4)
+ pv4Addr := fmt.Sprintf("%p", &pv4)
+ v4t := "int64"
+ v4s := "9223372036854775807"
+ addDumpTest(v4, "("+v4t+") "+v4s+"\n")
+ addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
+ addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
+ addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
+
+ // Max int.
+ v5 := int(2147483647)
+ nv5 := (*int)(nil)
+ pv5 := &v5
+ v5Addr := fmt.Sprintf("%p", pv5)
+ pv5Addr := fmt.Sprintf("%p", &pv5)
+ v5t := "int"
+ v5s := "2147483647"
+ addDumpTest(v5, "("+v5t+") "+v5s+"\n")
+ addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n")
+ addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n")
+ addDumpTest(nv5, "(*"+v5t+")(<nil>)\n")
+}
+
+func addUintDumpTests() {
+ // Max uint8.
+ v := uint8(255)
+ nv := (*uint8)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "uint8"
+ vs := "255"
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+
+ // Max uint16.
+ v2 := uint16(65535)
+ nv2 := (*uint16)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "uint16"
+ v2s := "65535"
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+ addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
+
+ // Max uint32.
+ v3 := uint32(4294967295)
+ nv3 := (*uint32)(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "uint32"
+ v3s := "4294967295"
+ addDumpTest(v3, "("+v3t+") "+v3s+"\n")
+ addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
+ addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
+ addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
+
+ // Max uint64.
+ v4 := uint64(18446744073709551615)
+ nv4 := (*uint64)(nil)
+ pv4 := &v4
+ v4Addr := fmt.Sprintf("%p", pv4)
+ pv4Addr := fmt.Sprintf("%p", &pv4)
+ v4t := "uint64"
+ v4s := "18446744073709551615"
+ addDumpTest(v4, "("+v4t+") "+v4s+"\n")
+ addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
+ addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
+ addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
+
+ // Max uint.
+ v5 := uint(4294967295)
+ nv5 := (*uint)(nil)
+ pv5 := &v5
+ v5Addr := fmt.Sprintf("%p", pv5)
+ pv5Addr := fmt.Sprintf("%p", &pv5)
+ v5t := "uint"
+ v5s := "4294967295"
+ addDumpTest(v5, "("+v5t+") "+v5s+"\n")
+ addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n")
+ addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n")
+ addDumpTest(nv5, "(*"+v5t+")(<nil>)\n")
+}
+
+func addBoolDumpTests() {
+ // Boolean true.
+ v := bool(true)
+ nv := (*bool)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "bool"
+ vs := "true"
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+
+ // Boolean false.
+ v2 := bool(false)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "bool"
+ v2s := "false"
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+}
+
+func addFloatDumpTests() {
+ // Standard float32.
+ v := float32(3.1415)
+ nv := (*float32)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "float32"
+ vs := "3.1415"
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+
+ // Standard float64.
+ v2 := float64(3.1415926)
+ nv2 := (*float64)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "float64"
+ v2s := "3.1415926"
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+ addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
+}
+
+func addComplexDumpTests() {
+ // Standard complex64.
+ v := complex(float32(6), -2)
+ nv := (*complex64)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "complex64"
+ vs := "(6-2i)"
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+
+ // Standard complex128.
+ v2 := complex(float64(-6), 2)
+ nv2 := (*complex128)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "complex128"
+ v2s := "(-6+2i)"
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+ addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
+}
+
+func addArrayDumpTests() {
+ // Array containing standard ints.
+ v := [3]int{1, 2, 3}
+ vLen := fmt.Sprintf("%d", len(v))
+ vCap := fmt.Sprintf("%d", cap(v))
+ nv := (*[3]int)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "int"
+ vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 1,\n (" +
+ vt + ") 2,\n (" + vt + ") 3\n}"
+ addDumpTest(v, "([3]"+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*[3]"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**[3]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*[3]"+vt+")(<nil>)\n")
+
+ // Array containing type with custom formatter on pointer receiver only.
+ v2i0 := pstringer("1")
+ v2i1 := pstringer("2")
+ v2i2 := pstringer("3")
+ v2 := [3]pstringer{v2i0, v2i1, v2i2}
+ v2i0Len := fmt.Sprintf("%d", len(v2i0))
+ v2i1Len := fmt.Sprintf("%d", len(v2i1))
+ v2i2Len := fmt.Sprintf("%d", len(v2i2))
+ v2Len := fmt.Sprintf("%d", len(v2))
+ v2Cap := fmt.Sprintf("%d", cap(v2))
+ nv2 := (*[3]pstringer)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "spew_test.pstringer"
+ v2sp := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t +
+ ") (len=" + v2i0Len + ") stringer 1,\n (" + v2t +
+ ") (len=" + v2i1Len + ") stringer 2,\n (" + v2t +
+ ") (len=" + v2i2Len + ") " + "stringer 3\n}"
+ v2s := v2sp
+ if spew.UnsafeDisabled {
+ v2s = "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t +
+ ") (len=" + v2i0Len + ") \"1\",\n (" + v2t + ") (len=" +
+ v2i1Len + ") \"2\",\n (" + v2t + ") (len=" + v2i2Len +
+ ") " + "\"3\"\n}"
+ }
+ addDumpTest(v2, "([3]"+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*[3]"+v2t+")("+v2Addr+")("+v2sp+")\n")
+ addDumpTest(&pv2, "(**[3]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2sp+")\n")
+ addDumpTest(nv2, "(*[3]"+v2t+")(<nil>)\n")
+
+ // Array containing interfaces.
+ v3i0 := "one"
+ v3 := [3]interface{}{v3i0, int(2), uint(3)}
+ v3i0Len := fmt.Sprintf("%d", len(v3i0))
+ v3Len := fmt.Sprintf("%d", len(v3))
+ v3Cap := fmt.Sprintf("%d", cap(v3))
+ nv3 := (*[3]interface{})(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "[3]interface {}"
+ v3t2 := "string"
+ v3t3 := "int"
+ v3t4 := "uint"
+ v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " +
+ "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" +
+ v3t4 + ") 3\n}"
+ addDumpTest(v3, "("+v3t+") "+v3s+"\n")
+ addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
+ addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
+ addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
+
+ // Array containing bytes.
+ v4 := [34]byte{
+ 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
+ 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
+ 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
+ 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
+ 0x31, 0x32,
+ }
+ v4Len := fmt.Sprintf("%d", len(v4))
+ v4Cap := fmt.Sprintf("%d", cap(v4))
+ nv4 := (*[34]byte)(nil)
+ pv4 := &v4
+ v4Addr := fmt.Sprintf("%p", pv4)
+ pv4Addr := fmt.Sprintf("%p", &pv4)
+ v4t := "[34]uint8"
+ v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
+ "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" +
+ " |............... |\n" +
+ " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" +
+ " |!\"#$%&'()*+,-./0|\n" +
+ " 00000020 31 32 " +
+ " |12|\n}"
+ addDumpTest(v4, "("+v4t+") "+v4s+"\n")
+ addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
+ addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
+ addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
+}
+
+func addSliceDumpTests() {
+ // Slice containing standard float32 values.
+ v := []float32{3.14, 6.28, 12.56}
+ vLen := fmt.Sprintf("%d", len(v))
+ vCap := fmt.Sprintf("%d", cap(v))
+ nv := (*[]float32)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "float32"
+ vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 3.14,\n (" +
+ vt + ") 6.28,\n (" + vt + ") 12.56\n}"
+ addDumpTest(v, "([]"+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*[]"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**[]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*[]"+vt+")(<nil>)\n")
+
+ // Slice containing type with custom formatter on pointer receiver only.
+ v2i0 := pstringer("1")
+ v2i1 := pstringer("2")
+ v2i2 := pstringer("3")
+ v2 := []pstringer{v2i0, v2i1, v2i2}
+ v2i0Len := fmt.Sprintf("%d", len(v2i0))
+ v2i1Len := fmt.Sprintf("%d", len(v2i1))
+ v2i2Len := fmt.Sprintf("%d", len(v2i2))
+ v2Len := fmt.Sprintf("%d", len(v2))
+ v2Cap := fmt.Sprintf("%d", cap(v2))
+ nv2 := (*[]pstringer)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "spew_test.pstringer"
+ v2s := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + ") (len=" +
+ v2i0Len + ") stringer 1,\n (" + v2t + ") (len=" + v2i1Len +
+ ") stringer 2,\n (" + v2t + ") (len=" + v2i2Len + ") " +
+ "stringer 3\n}"
+ addDumpTest(v2, "([]"+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*[]"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**[]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+ addDumpTest(nv2, "(*[]"+v2t+")(<nil>)\n")
+
+ // Slice containing interfaces.
+ v3i0 := "one"
+ v3 := []interface{}{v3i0, int(2), uint(3), nil}
+ v3i0Len := fmt.Sprintf("%d", len(v3i0))
+ v3Len := fmt.Sprintf("%d", len(v3))
+ v3Cap := fmt.Sprintf("%d", cap(v3))
+ nv3 := (*[]interface{})(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "[]interface {}"
+ v3t2 := "string"
+ v3t3 := "int"
+ v3t4 := "uint"
+ v3t5 := "interface {}"
+ v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " +
+ "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" +
+ v3t4 + ") 3,\n (" + v3t5 + ") <nil>\n}"
+ addDumpTest(v3, "("+v3t+") "+v3s+"\n")
+ addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
+ addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
+ addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
+
+ // Slice containing bytes.
+ v4 := []byte{
+ 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
+ 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
+ 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
+ 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
+ 0x31, 0x32,
+ }
+ v4Len := fmt.Sprintf("%d", len(v4))
+ v4Cap := fmt.Sprintf("%d", cap(v4))
+ nv4 := (*[]byte)(nil)
+ pv4 := &v4
+ v4Addr := fmt.Sprintf("%p", pv4)
+ pv4Addr := fmt.Sprintf("%p", &pv4)
+ v4t := "[]uint8"
+ v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
+ "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" +
+ " |............... |\n" +
+ " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" +
+ " |!\"#$%&'()*+,-./0|\n" +
+ " 00000020 31 32 " +
+ " |12|\n}"
+ addDumpTest(v4, "("+v4t+") "+v4s+"\n")
+ addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
+ addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
+ addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
+
+ // Nil slice.
+ v5 := []int(nil)
+ nv5 := (*[]int)(nil)
+ pv5 := &v5
+ v5Addr := fmt.Sprintf("%p", pv5)
+ pv5Addr := fmt.Sprintf("%p", &pv5)
+ v5t := "[]int"
+ v5s := "<nil>"
+ addDumpTest(v5, "("+v5t+") "+v5s+"\n")
+ addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n")
+ addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n")
+ addDumpTest(nv5, "(*"+v5t+")(<nil>)\n")
+}
+
+func addStringDumpTests() {
+ // Standard string.
+ v := "test"
+ vLen := fmt.Sprintf("%d", len(v))
+ nv := (*string)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "string"
+ vs := "(len=" + vLen + ") \"test\""
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+}
+
+func addInterfaceDumpTests() {
+ // Nil interface.
+ var v interface{}
+ nv := (*interface{})(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "interface {}"
+ vs := "<nil>"
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+
+ // Sub-interface.
+ v2 := interface{}(uint16(65535))
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "uint16"
+ v2s := "65535"
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+}
+
+func addMapDumpTests() {
+ // Map with string keys and int vals.
+ k := "one"
+ kk := "two"
+ m := map[string]int{k: 1, kk: 2}
+ klen := fmt.Sprintf("%d", len(k)) // not kLen to shut golint up
+ kkLen := fmt.Sprintf("%d", len(kk))
+ mLen := fmt.Sprintf("%d", len(m))
+ nilMap := map[string]int(nil)
+ nm := (*map[string]int)(nil)
+ pm := &m
+ mAddr := fmt.Sprintf("%p", pm)
+ pmAddr := fmt.Sprintf("%p", &pm)
+ mt := "map[string]int"
+ mt1 := "string"
+ mt2 := "int"
+ ms := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + klen + ") " +
+ "\"one\": (" + mt2 + ") 1,\n (" + mt1 + ") (len=" + kkLen +
+ ") \"two\": (" + mt2 + ") 2\n}"
+ ms2 := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + kkLen + ") " +
+ "\"two\": (" + mt2 + ") 2,\n (" + mt1 + ") (len=" + klen +
+ ") \"one\": (" + mt2 + ") 1\n}"
+ addDumpTest(m, "("+mt+") "+ms+"\n", "("+mt+") "+ms2+"\n")
+ addDumpTest(pm, "(*"+mt+")("+mAddr+")("+ms+")\n",
+ "(*"+mt+")("+mAddr+")("+ms2+")\n")
+ addDumpTest(&pm, "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms+")\n",
+ "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms2+")\n")
+ addDumpTest(nm, "(*"+mt+")(<nil>)\n")
+ addDumpTest(nilMap, "("+mt+") <nil>\n")
+
+ // Map with custom formatter type on pointer receiver only keys and vals.
+ k2 := pstringer("one")
+ v2 := pstringer("1")
+ m2 := map[pstringer]pstringer{k2: v2}
+ k2Len := fmt.Sprintf("%d", len(k2))
+ v2Len := fmt.Sprintf("%d", len(v2))
+ m2Len := fmt.Sprintf("%d", len(m2))
+ nilMap2 := map[pstringer]pstringer(nil)
+ nm2 := (*map[pstringer]pstringer)(nil)
+ pm2 := &m2
+ m2Addr := fmt.Sprintf("%p", pm2)
+ pm2Addr := fmt.Sprintf("%p", &pm2)
+ m2t := "map[spew_test.pstringer]spew_test.pstringer"
+ m2t1 := "spew_test.pstringer"
+ m2t2 := "spew_test.pstringer"
+ m2s := "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + ") " +
+ "stringer one: (" + m2t2 + ") (len=" + v2Len + ") stringer 1\n}"
+ if spew.UnsafeDisabled {
+ m2s = "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len +
+ ") " + "\"one\": (" + m2t2 + ") (len=" + v2Len +
+ ") \"1\"\n}"
+ }
+ addDumpTest(m2, "("+m2t+") "+m2s+"\n")
+ addDumpTest(pm2, "(*"+m2t+")("+m2Addr+")("+m2s+")\n")
+ addDumpTest(&pm2, "(**"+m2t+")("+pm2Addr+"->"+m2Addr+")("+m2s+")\n")
+ addDumpTest(nm2, "(*"+m2t+")(<nil>)\n")
+ addDumpTest(nilMap2, "("+m2t+") <nil>\n")
+
+ // Map with interface keys and values.
+ k3 := "one"
+ k3Len := fmt.Sprintf("%d", len(k3))
+ m3 := map[interface{}]interface{}{k3: 1}
+ m3Len := fmt.Sprintf("%d", len(m3))
+ nilMap3 := map[interface{}]interface{}(nil)
+ nm3 := (*map[interface{}]interface{})(nil)
+ pm3 := &m3
+ m3Addr := fmt.Sprintf("%p", pm3)
+ pm3Addr := fmt.Sprintf("%p", &pm3)
+ m3t := "map[interface {}]interface {}"
+ m3t1 := "string"
+ m3t2 := "int"
+ m3s := "(len=" + m3Len + ") {\n (" + m3t1 + ") (len=" + k3Len + ") " +
+ "\"one\": (" + m3t2 + ") 1\n}"
+ addDumpTest(m3, "("+m3t+") "+m3s+"\n")
+ addDumpTest(pm3, "(*"+m3t+")("+m3Addr+")("+m3s+")\n")
+ addDumpTest(&pm3, "(**"+m3t+")("+pm3Addr+"->"+m3Addr+")("+m3s+")\n")
+ addDumpTest(nm3, "(*"+m3t+")(<nil>)\n")
+ addDumpTest(nilMap3, "("+m3t+") <nil>\n")
+
+ // Map with nil interface value.
+ k4 := "nil"
+ k4Len := fmt.Sprintf("%d", len(k4))
+ m4 := map[string]interface{}{k4: nil}
+ m4Len := fmt.Sprintf("%d", len(m4))
+ nilMap4 := map[string]interface{}(nil)
+ nm4 := (*map[string]interface{})(nil)
+ pm4 := &m4
+ m4Addr := fmt.Sprintf("%p", pm4)
+ pm4Addr := fmt.Sprintf("%p", &pm4)
+ m4t := "map[string]interface {}"
+ m4t1 := "string"
+ m4t2 := "interface {}"
+ m4s := "(len=" + m4Len + ") {\n (" + m4t1 + ") (len=" + k4Len + ")" +
+ " \"nil\": (" + m4t2 + ") <nil>\n}"
+ addDumpTest(m4, "("+m4t+") "+m4s+"\n")
+ addDumpTest(pm4, "(*"+m4t+")("+m4Addr+")("+m4s+")\n")
+ addDumpTest(&pm4, "(**"+m4t+")("+pm4Addr+"->"+m4Addr+")("+m4s+")\n")
+ addDumpTest(nm4, "(*"+m4t+")(<nil>)\n")
+ addDumpTest(nilMap4, "("+m4t+") <nil>\n")
+}
+
+func addStructDumpTests() {
+ // Struct with primitives.
+ type s1 struct {
+ a int8
+ b uint8
+ }
+ v := s1{127, 255}
+ nv := (*s1)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "spew_test.s1"
+ vt2 := "int8"
+ vt3 := "uint8"
+ vs := "{\n a: (" + vt2 + ") 127,\n b: (" + vt3 + ") 255\n}"
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+
+ // Struct that contains another struct.
+ type s2 struct {
+ s1 s1
+ b bool
+ }
+ v2 := s2{s1{127, 255}, true}
+ nv2 := (*s2)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "spew_test.s2"
+ v2t2 := "spew_test.s1"
+ v2t3 := "int8"
+ v2t4 := "uint8"
+ v2t5 := "bool"
+ v2s := "{\n s1: (" + v2t2 + ") {\n a: (" + v2t3 + ") 127,\n b: (" +
+ v2t4 + ") 255\n },\n b: (" + v2t5 + ") true\n}"
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+ addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
+
+ // Struct that contains custom type with Stringer pointer interface via both
+ // exported and unexported fields.
+ type s3 struct {
+ s pstringer
+ S pstringer
+ }
+ v3 := s3{"test", "test2"}
+ nv3 := (*s3)(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "spew_test.s3"
+ v3t2 := "spew_test.pstringer"
+ v3s := "{\n s: (" + v3t2 + ") (len=4) stringer test,\n S: (" + v3t2 +
+ ") (len=5) stringer test2\n}"
+ v3sp := v3s
+ if spew.UnsafeDisabled {
+ v3s = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" +
+ v3t2 + ") (len=5) \"test2\"\n}"
+ v3sp = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" +
+ v3t2 + ") (len=5) stringer test2\n}"
+ }
+ addDumpTest(v3, "("+v3t+") "+v3s+"\n")
+ addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3sp+")\n")
+ addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3sp+")\n")
+ addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
+
+ // Struct that contains embedded struct and field to same struct.
+ e := embed{"embedstr"}
+ eLen := fmt.Sprintf("%d", len("embedstr"))
+ v4 := embedwrap{embed: &e, e: &e}
+ nv4 := (*embedwrap)(nil)
+ pv4 := &v4
+ eAddr := fmt.Sprintf("%p", &e)
+ v4Addr := fmt.Sprintf("%p", pv4)
+ pv4Addr := fmt.Sprintf("%p", &pv4)
+ v4t := "spew_test.embedwrap"
+ v4t2 := "spew_test.embed"
+ v4t3 := "string"
+ v4s := "{\n embed: (*" + v4t2 + ")(" + eAddr + ")({\n a: (" + v4t3 +
+ ") (len=" + eLen + ") \"embedstr\"\n }),\n e: (*" + v4t2 +
+ ")(" + eAddr + ")({\n a: (" + v4t3 + ") (len=" + eLen + ")" +
+ " \"embedstr\"\n })\n}"
+ addDumpTest(v4, "("+v4t+") "+v4s+"\n")
+ addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
+ addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
+ addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
+}
+
+func addUintptrDumpTests() {
+ // Null pointer.
+ v := uintptr(0)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "uintptr"
+ vs := "<nil>"
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+
+ // Address of real variable.
+ i := 1
+ v2 := uintptr(unsafe.Pointer(&i))
+ nv2 := (*uintptr)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "uintptr"
+ v2s := fmt.Sprintf("%p", &i)
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+ addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
+}
+
+func addUnsafePointerDumpTests() {
+ // Null pointer.
+ v := unsafe.Pointer(uintptr(0))
+ nv := (*unsafe.Pointer)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "unsafe.Pointer"
+ vs := "<nil>"
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+
+ // Address of real variable.
+ i := 1
+ v2 := unsafe.Pointer(&i)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "unsafe.Pointer"
+ v2s := fmt.Sprintf("%p", &i)
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+ addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+}
+
+func addChanDumpTests() {
+ // Nil channel.
+ var v chan int
+ pv := &v
+ nv := (*chan int)(nil)
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "chan int"
+ vs := "<nil>"
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+
+ // Real channel.
+ v2 := make(chan int)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "chan int"
+ v2s := fmt.Sprintf("%p", v2)
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+}
+
+func addFuncDumpTests() {
+ // Function with no params and no returns.
+ v := addIntDumpTests
+ nv := (*func())(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "func()"
+ vs := fmt.Sprintf("%p", v)
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+
+ // Function with param and no returns.
+ v2 := TestDump
+ nv2 := (*func(*testing.T))(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "func(*testing.T)"
+ v2s := fmt.Sprintf("%p", v2)
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
+ addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
+
+ // Function with multiple params and multiple returns.
+ var v3 = func(i int, s string) (b bool, err error) {
+ return true, nil
+ }
+ nv3 := (*func(int, string) (bool, error))(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "func(int, string) (bool, error)"
+ v3s := fmt.Sprintf("%p", v3)
+ addDumpTest(v3, "("+v3t+") "+v3s+"\n")
+ addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
+ addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
+ addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
+}
+
+func addCircularDumpTests() {
+ // Struct that is circular through self referencing.
+ type circular struct {
+ c *circular
+ }
+ v := circular{nil}
+ v.c = &v
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "spew_test.circular"
+ vs := "{\n c: (*" + vt + ")(" + vAddr + ")({\n c: (*" + vt + ")(" +
+ vAddr + ")(<already shown>)\n })\n}"
+ vs2 := "{\n c: (*" + vt + ")(" + vAddr + ")(<already shown>)\n}"
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs2+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs2+")\n")
+
+ // Structs that are circular through cross referencing.
+ v2 := xref1{nil}
+ ts2 := xref2{&v2}
+ v2.ps2 = &ts2
+ pv2 := &v2
+ ts2Addr := fmt.Sprintf("%p", &ts2)
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "spew_test.xref1"
+ v2t2 := "spew_test.xref2"
+ v2s := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t +
+ ")(" + v2Addr + ")({\n ps2: (*" + v2t2 + ")(" + ts2Addr +
+ ")(<already shown>)\n })\n })\n}"
+ v2s2 := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t +
+ ")(" + v2Addr + ")(<already shown>)\n })\n}"
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+ addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s2+")\n")
+ addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s2+")\n")
+
+ // Structs that are indirectly circular.
+ v3 := indirCir1{nil}
+ tic2 := indirCir2{nil}
+ tic3 := indirCir3{&v3}
+ tic2.ps3 = &tic3
+ v3.ps2 = &tic2
+ pv3 := &v3
+ tic2Addr := fmt.Sprintf("%p", &tic2)
+ tic3Addr := fmt.Sprintf("%p", &tic3)
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "spew_test.indirCir1"
+ v3t2 := "spew_test.indirCir2"
+ v3t3 := "spew_test.indirCir3"
+ v3s := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 +
+ ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr +
+ ")({\n ps2: (*" + v3t2 + ")(" + tic2Addr +
+ ")(<already shown>)\n })\n })\n })\n}"
+ v3s2 := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 +
+ ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr +
+ ")(<already shown>)\n })\n })\n}"
+ addDumpTest(v3, "("+v3t+") "+v3s+"\n")
+ addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s2+")\n")
+ addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s2+")\n")
+}
+
+func addPanicDumpTests() {
+ // Type that panics in its Stringer interface.
+ v := panicer(127)
+ nv := (*panicer)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "spew_test.panicer"
+ vs := "(PANIC=test panic)127"
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+}
+
+func addErrorDumpTests() {
+ // Type that has a custom Error interface.
+ v := customError(127)
+ nv := (*customError)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "spew_test.customError"
+ vs := "error: 127"
+ addDumpTest(v, "("+vt+") "+vs+"\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
+ addDumpTest(nv, "(*"+vt+")(<nil>)\n")
+}
+
+// TestDump executes all of the tests described by dumpTests.
+func TestDump(t *testing.T) {
+ // Setup tests.
+ addIntDumpTests()
+ addUintDumpTests()
+ addBoolDumpTests()
+ addFloatDumpTests()
+ addComplexDumpTests()
+ addArrayDumpTests()
+ addSliceDumpTests()
+ addStringDumpTests()
+ addInterfaceDumpTests()
+ addMapDumpTests()
+ addStructDumpTests()
+ addUintptrDumpTests()
+ addUnsafePointerDumpTests()
+ addChanDumpTests()
+ addFuncDumpTests()
+ addCircularDumpTests()
+ addPanicDumpTests()
+ addErrorDumpTests()
+ addCgoDumpTests()
+
+ t.Logf("Running %d tests", len(dumpTests))
+ for i, test := range dumpTests {
+ buf := new(bytes.Buffer)
+ spew.Fdump(buf, test.in)
+ s := buf.String()
+ if testFailed(s, test.wants) {
+ t.Errorf("Dump #%d\n got: %s %s", i, s, stringizeWants(test.wants))
+ continue
+ }
+ }
+}
+
+func TestDumpSortedKeys(t *testing.T) {
+ cfg := spew.ConfigState{SortKeys: true}
+ s := cfg.Sdump(map[int]string{1: "1", 3: "3", 2: "2"})
+ expected := "(map[int]string) (len=3) {\n(int) 1: (string) (len=1) " +
+ "\"1\",\n(int) 2: (string) (len=1) \"2\",\n(int) 3: (string) " +
+ "(len=1) \"3\"\n" +
+ "}\n"
+ if s != expected {
+ t.Errorf("Sorted keys mismatch:\n %v %v", s, expected)
+ }
+
+ s = cfg.Sdump(map[stringer]int{"1": 1, "3": 3, "2": 2})
+ expected = "(map[spew_test.stringer]int) (len=3) {\n" +
+ "(spew_test.stringer) (len=1) stringer 1: (int) 1,\n" +
+ "(spew_test.stringer) (len=1) stringer 2: (int) 2,\n" +
+ "(spew_test.stringer) (len=1) stringer 3: (int) 3\n" +
+ "}\n"
+ if s != expected {
+ t.Errorf("Sorted keys mismatch:\n %v %v", s, expected)
+ }
+
+ s = cfg.Sdump(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2})
+ expected = "(map[spew_test.pstringer]int) (len=3) {\n" +
+ "(spew_test.pstringer) (len=1) stringer 1: (int) 1,\n" +
+ "(spew_test.pstringer) (len=1) stringer 2: (int) 2,\n" +
+ "(spew_test.pstringer) (len=1) stringer 3: (int) 3\n" +
+ "}\n"
+ if spew.UnsafeDisabled {
+ expected = "(map[spew_test.pstringer]int) (len=3) {\n" +
+ "(spew_test.pstringer) (len=1) \"1\": (int) 1,\n" +
+ "(spew_test.pstringer) (len=1) \"2\": (int) 2,\n" +
+ "(spew_test.pstringer) (len=1) \"3\": (int) 3\n" +
+ "}\n"
+ }
+ if s != expected {
+ t.Errorf("Sorted keys mismatch:\n %v %v", s, expected)
+ }
+
+ s = cfg.Sdump(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2})
+ expected = "(map[spew_test.customError]int) (len=3) {\n" +
+ "(spew_test.customError) error: 1: (int) 1,\n" +
+ "(spew_test.customError) error: 2: (int) 2,\n" +
+ "(spew_test.customError) error: 3: (int) 3\n" +
+ "}\n"
+ if s != expected {
+ t.Errorf("Sorted keys mismatch:\n %v %v", s, expected)
+ }
+
+}
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go
new file mode 100644
index 000000000..6ab180809
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go
@@ -0,0 +1,99 @@
+// Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when both cgo is supported and "-tags testcgo" is added to the go test
+// command line. This means the cgo tests are only added (and hence run) when
+// specifially requested. This configuration is used because spew itself
+// does not require cgo to run even though it does handle certain cgo types
+// specially. Rather than forcing all clients to require cgo and an external
+// C compiler just to run the tests, this scheme makes them optional.
+// +build cgo,testcgo
+
+package spew_test
+
+import (
+ "fmt"
+
+ "github.com/davecgh/go-spew/spew/testdata"
+)
+
+func addCgoDumpTests() {
+ // C char pointer.
+ v := testdata.GetCgoCharPointer()
+ nv := testdata.GetCgoNullCharPointer()
+ pv := &v
+ vcAddr := fmt.Sprintf("%p", v)
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "*testdata._Ctype_char"
+ vs := "116"
+ addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n")
+ addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n")
+ addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n")
+ addDumpTest(nv, "("+vt+")(<nil>)\n")
+
+ // C char array.
+ v2, v2l, v2c := testdata.GetCgoCharArray()
+ v2Len := fmt.Sprintf("%d", v2l)
+ v2Cap := fmt.Sprintf("%d", v2c)
+ v2t := "[6]testdata._Ctype_char"
+ v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " +
+ "{\n 00000000 74 65 73 74 32 00 " +
+ " |test2.|\n}"
+ addDumpTest(v2, "("+v2t+") "+v2s+"\n")
+
+ // C unsigned char array.
+ v3, v3l, v3c := testdata.GetCgoUnsignedCharArray()
+ v3Len := fmt.Sprintf("%d", v3l)
+ v3Cap := fmt.Sprintf("%d", v3c)
+ v3t := "[6]testdata._Ctype_unsignedchar"
+ v3t2 := "[6]testdata._Ctype_uchar"
+ v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " +
+ "{\n 00000000 74 65 73 74 33 00 " +
+ " |test3.|\n}"
+ addDumpTest(v3, "("+v3t+") "+v3s+"\n", "("+v3t2+") "+v3s+"\n")
+
+ // C signed char array.
+ v4, v4l, v4c := testdata.GetCgoSignedCharArray()
+ v4Len := fmt.Sprintf("%d", v4l)
+ v4Cap := fmt.Sprintf("%d", v4c)
+ v4t := "[6]testdata._Ctype_schar"
+ v4t2 := "testdata._Ctype_schar"
+ v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
+ "{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 +
+ ") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 +
+ ") 0\n}"
+ addDumpTest(v4, "("+v4t+") "+v4s+"\n")
+
+ // C uint8_t array.
+ v5, v5l, v5c := testdata.GetCgoUint8tArray()
+ v5Len := fmt.Sprintf("%d", v5l)
+ v5Cap := fmt.Sprintf("%d", v5c)
+ v5t := "[6]testdata._Ctype_uint8_t"
+ v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " +
+ "{\n 00000000 74 65 73 74 35 00 " +
+ " |test5.|\n}"
+ addDumpTest(v5, "("+v5t+") "+v5s+"\n")
+
+ // C typedefed unsigned char array.
+ v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray()
+ v6Len := fmt.Sprintf("%d", v6l)
+ v6Cap := fmt.Sprintf("%d", v6c)
+ v6t := "[6]testdata._Ctype_custom_uchar_t"
+ v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " +
+ "{\n 00000000 74 65 73 74 36 00 " +
+ " |test6.|\n}"
+ addDumpTest(v6, "("+v6t+") "+v6s+"\n")
+}
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go
new file mode 100644
index 000000000..52a0971fb
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go
@@ -0,0 +1,26 @@
+// Copyright (c) 2013 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when either cgo is not supported or "-tags testcgo" is not added to the go
+// test command line. This file intentionally does not setup any cgo tests in
+// this scenario.
+// +build !cgo !testcgo
+
+package spew_test
+
+func addCgoDumpTests() {
+ // Don't add any tests for cgo since this file is only compiled when
+ // there should not be any cgo tests.
+}
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/example_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/example_test.go
new file mode 100644
index 000000000..c6ec8c6d5
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/example_test.go
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew_test
+
+import (
+ "fmt"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+type Flag int
+
+const (
+ flagOne Flag = iota
+ flagTwo
+)
+
+var flagStrings = map[Flag]string{
+ flagOne: "flagOne",
+ flagTwo: "flagTwo",
+}
+
+func (f Flag) String() string {
+ if s, ok := flagStrings[f]; ok {
+ return s
+ }
+ return fmt.Sprintf("Unknown flag (%d)", int(f))
+}
+
+type Bar struct {
+ data uintptr
+}
+
+type Foo struct {
+ unexportedField Bar
+ ExportedField map[interface{}]interface{}
+}
+
+// This example demonstrates how to use Dump to dump variables to stdout.
+func ExampleDump() {
+ // The following package level declarations are assumed for this example:
+ /*
+ type Flag int
+
+ const (
+ flagOne Flag = iota
+ flagTwo
+ )
+
+ var flagStrings = map[Flag]string{
+ flagOne: "flagOne",
+ flagTwo: "flagTwo",
+ }
+
+ func (f Flag) String() string {
+ if s, ok := flagStrings[f]; ok {
+ return s
+ }
+ return fmt.Sprintf("Unknown flag (%d)", int(f))
+ }
+
+ type Bar struct {
+ data uintptr
+ }
+
+ type Foo struct {
+ unexportedField Bar
+ ExportedField map[interface{}]interface{}
+ }
+ */
+
+ // Setup some sample data structures for the example.
+ bar := Bar{uintptr(0)}
+ s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
+ f := Flag(5)
+ b := []byte{
+ 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
+ 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
+ 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
+ 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
+ 0x31, 0x32,
+ }
+
+ // Dump!
+ spew.Dump(s1, f, b)
+
+ // Output:
+ // (spew_test.Foo) {
+ // unexportedField: (spew_test.Bar) {
+ // data: (uintptr) <nil>
+ // },
+ // ExportedField: (map[interface {}]interface {}) (len=1) {
+ // (string) (len=3) "one": (bool) true
+ // }
+ // }
+ // (spew_test.Flag) Unknown flag (5)
+ // ([]uint8) (len=34 cap=34) {
+ // 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ // 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ // 00000020 31 32 |12|
+ // }
+ //
+}
+
+// This example demonstrates how to use Printf to display a variable with a
+// format string and inline formatting.
+func ExamplePrintf() {
+ // Create a double pointer to a uint 8.
+ ui8 := uint8(5)
+ pui8 := &ui8
+ ppui8 := &pui8
+
+ // Create a circular data type.
+ type circular struct {
+ ui8 uint8
+ c *circular
+ }
+ c := circular{ui8: 1}
+ c.c = &c
+
+ // Print!
+ spew.Printf("ppui8: %v\n", ppui8)
+ spew.Printf("circular: %v\n", c)
+
+ // Output:
+ // ppui8: <**>5
+ // circular: {1 <*>{1 <*><shown>}}
+}
+
+// This example demonstrates how to use a ConfigState.
+func ExampleConfigState() {
+ // Modify the indent level of the ConfigState only. The global
+ // configuration is not modified.
+ scs := spew.ConfigState{Indent: "\t"}
+
+ // Output using the ConfigState instance.
+ v := map[string]int{"one": 1}
+ scs.Printf("v: %v\n", v)
+ scs.Dump(v)
+
+ // Output:
+ // v: map[one:1]
+ // (map[string]int) (len=1) {
+ // (string) (len=3) "one": (int) 1
+ // }
+}
+
+// This example demonstrates how to use ConfigState.Dump to dump variables to
+// stdout
+func ExampleConfigState_Dump() {
+ // See the top-level Dump example for details on the types used in this
+ // example.
+
+ // Create two ConfigState instances with different indentation.
+ scs := spew.ConfigState{Indent: "\t"}
+ scs2 := spew.ConfigState{Indent: " "}
+
+ // Setup some sample data structures for the example.
+ bar := Bar{uintptr(0)}
+ s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
+
+ // Dump using the ConfigState instances.
+ scs.Dump(s1)
+ scs2.Dump(s1)
+
+ // Output:
+ // (spew_test.Foo) {
+ // unexportedField: (spew_test.Bar) {
+ // data: (uintptr) <nil>
+ // },
+ // ExportedField: (map[interface {}]interface {}) (len=1) {
+ // (string) (len=3) "one": (bool) true
+ // }
+ // }
+ // (spew_test.Foo) {
+ // unexportedField: (spew_test.Bar) {
+ // data: (uintptr) <nil>
+ // },
+ // ExportedField: (map[interface {}]interface {}) (len=1) {
+ // (string) (len=3) "one": (bool) true
+ // }
+ // }
+ //
+}
+
+// This example demonstrates how to use ConfigState.Printf to display a variable
+// with a format string and inline formatting.
+func ExampleConfigState_Printf() {
+ // See the top-level Dump example for details on the types used in this
+ // example.
+
+ // Create two ConfigState instances and modify the method handling of the
+ // first ConfigState only.
+ scs := spew.NewDefaultConfig()
+ scs2 := spew.NewDefaultConfig()
+ scs.DisableMethods = true
+
+ // Alternatively
+ // scs := spew.ConfigState{Indent: " ", DisableMethods: true}
+ // scs2 := spew.ConfigState{Indent: " "}
+
+ // This is of type Flag which implements a Stringer and has raw value 1.
+ f := flagTwo
+
+ // Dump using the ConfigState instances.
+ scs.Printf("f: %v\n", f)
+ scs2.Printf("f: %v\n", f)
+
+ // Output:
+ // f: 1
+ // f: flagTwo
+}
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/format.go
index ecf3b80e2..c49875bac 100644
--- a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/format.go
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/format_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/format_test.go
new file mode 100644
index 000000000..f9b93abe8
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/format_test.go
@@ -0,0 +1,1558 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Test Summary:
+NOTE: For each test, a nil pointer, a single pointer and double pointer to the
+base test element are also tested to ensure proper indirection across all types.
+
+- Max int8, int16, int32, int64, int
+- Max uint8, uint16, uint32, uint64, uint
+- Boolean true and false
+- Standard complex64 and complex128
+- Array containing standard ints
+- Array containing type with custom formatter on pointer receiver only
+- Array containing interfaces
+- Slice containing standard float32 values
+- Slice containing type with custom formatter on pointer receiver only
+- Slice containing interfaces
+- Nil slice
+- Standard string
+- Nil interface
+- Sub-interface
+- Map with string keys and int vals
+- Map with custom formatter type on pointer receiver only keys and vals
+- Map with interface keys and values
+- Map with nil interface value
+- Struct with primitives
+- Struct that contains another struct
+- Struct that contains custom type with Stringer pointer interface via both
+ exported and unexported fields
+- Struct that contains embedded struct and field to same struct
+- Uintptr to 0 (null pointer)
+- Uintptr address of real variable
+- Unsafe.Pointer to 0 (null pointer)
+- Unsafe.Pointer to address of real variable
+- Nil channel
+- Standard int channel
+- Function with no params and no returns
+- Function with param and no returns
+- Function with multiple params and multiple returns
+- Struct that is circular through self referencing
+- Structs that are circular through cross referencing
+- Structs that are indirectly circular
+- Type that panics in its Stringer interface
+- Type that has a custom Error interface
+- %x passthrough with uint
+- %#x passthrough with uint
+- %f passthrough with precision
+- %f passthrough with width and precision
+- %d passthrough with width
+- %q passthrough with string
+*/
+
+package spew_test
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+ "unsafe"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+// formatterTest is used to describe a test to be performed against NewFormatter.
+type formatterTest struct {
+ format string
+ in interface{}
+ wants []string
+}
+
+// formatterTests houses all of the tests to be performed against NewFormatter.
+var formatterTests = make([]formatterTest, 0)
+
+// addFormatterTest is a helper method to append the passed input and desired
+// result to formatterTests.
+func addFormatterTest(format string, in interface{}, wants ...string) {
+ test := formatterTest{format, in, wants}
+ formatterTests = append(formatterTests, test)
+}
+
+func addIntFormatterTests() {
+ // Max int8.
+ v := int8(127)
+ nv := (*int8)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "int8"
+ vs := "127"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%v", nv, "<nil>")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+ // Max int16.
+ v2 := int16(32767)
+ nv2 := (*int16)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "int16"
+ v2s := "32767"
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%v", nv2, "<nil>")
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%+v", nv2, "<nil>")
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+ addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
+
+ // Max int32.
+ v3 := int32(2147483647)
+ nv3 := (*int32)(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "int32"
+ v3s := "2147483647"
+ addFormatterTest("%v", v3, v3s)
+ addFormatterTest("%v", pv3, "<*>"+v3s)
+ addFormatterTest("%v", &pv3, "<**>"+v3s)
+ addFormatterTest("%v", nv3, "<nil>")
+ addFormatterTest("%+v", v3, v3s)
+ addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
+ addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
+ addFormatterTest("%+v", nv3, "<nil>")
+ addFormatterTest("%#v", v3, "("+v3t+")"+v3s)
+ addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s)
+ addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s)
+ addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
+ addFormatterTest("%#+v", v3, "("+v3t+")"+v3s)
+ addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s)
+ addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s)
+ addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
+
+ // Max int64.
+ v4 := int64(9223372036854775807)
+ nv4 := (*int64)(nil)
+ pv4 := &v4
+ v4Addr := fmt.Sprintf("%p", pv4)
+ pv4Addr := fmt.Sprintf("%p", &pv4)
+ v4t := "int64"
+ v4s := "9223372036854775807"
+ addFormatterTest("%v", v4, v4s)
+ addFormatterTest("%v", pv4, "<*>"+v4s)
+ addFormatterTest("%v", &pv4, "<**>"+v4s)
+ addFormatterTest("%v", nv4, "<nil>")
+ addFormatterTest("%+v", v4, v4s)
+ addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
+ addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
+ addFormatterTest("%+v", nv4, "<nil>")
+ addFormatterTest("%#v", v4, "("+v4t+")"+v4s)
+ addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s)
+ addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s)
+ addFormatterTest("%#v", nv4, "(*"+v4t+")"+"<nil>")
+ addFormatterTest("%#+v", v4, "("+v4t+")"+v4s)
+ addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s)
+ addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s)
+ addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"<nil>")
+
+ // Max int.
+ v5 := int(2147483647)
+ nv5 := (*int)(nil)
+ pv5 := &v5
+ v5Addr := fmt.Sprintf("%p", pv5)
+ pv5Addr := fmt.Sprintf("%p", &pv5)
+ v5t := "int"
+ v5s := "2147483647"
+ addFormatterTest("%v", v5, v5s)
+ addFormatterTest("%v", pv5, "<*>"+v5s)
+ addFormatterTest("%v", &pv5, "<**>"+v5s)
+ addFormatterTest("%v", nv5, "<nil>")
+ addFormatterTest("%+v", v5, v5s)
+ addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s)
+ addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s)
+ addFormatterTest("%+v", nv5, "<nil>")
+ addFormatterTest("%#v", v5, "("+v5t+")"+v5s)
+ addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s)
+ addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s)
+ addFormatterTest("%#v", nv5, "(*"+v5t+")"+"<nil>")
+ addFormatterTest("%#+v", v5, "("+v5t+")"+v5s)
+ addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s)
+ addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s)
+ addFormatterTest("%#+v", nv5, "(*"+v5t+")"+"<nil>")
+}
+
+func addUintFormatterTests() {
+ // Max uint8.
+ v := uint8(255)
+ nv := (*uint8)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "uint8"
+ vs := "255"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%v", nv, "<nil>")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+ // Max uint16.
+ v2 := uint16(65535)
+ nv2 := (*uint16)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "uint16"
+ v2s := "65535"
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%v", nv2, "<nil>")
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%+v", nv2, "<nil>")
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+ addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
+
+ // Max uint32.
+ v3 := uint32(4294967295)
+ nv3 := (*uint32)(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "uint32"
+ v3s := "4294967295"
+ addFormatterTest("%v", v3, v3s)
+ addFormatterTest("%v", pv3, "<*>"+v3s)
+ addFormatterTest("%v", &pv3, "<**>"+v3s)
+ addFormatterTest("%v", nv3, "<nil>")
+ addFormatterTest("%+v", v3, v3s)
+ addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
+ addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
+ addFormatterTest("%+v", nv3, "<nil>")
+ addFormatterTest("%#v", v3, "("+v3t+")"+v3s)
+ addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s)
+ addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s)
+ addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
+ addFormatterTest("%#+v", v3, "("+v3t+")"+v3s)
+ addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s)
+ addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s)
+ addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
+
+ // Max uint64.
+ v4 := uint64(18446744073709551615)
+ nv4 := (*uint64)(nil)
+ pv4 := &v4
+ v4Addr := fmt.Sprintf("%p", pv4)
+ pv4Addr := fmt.Sprintf("%p", &pv4)
+ v4t := "uint64"
+ v4s := "18446744073709551615"
+ addFormatterTest("%v", v4, v4s)
+ addFormatterTest("%v", pv4, "<*>"+v4s)
+ addFormatterTest("%v", &pv4, "<**>"+v4s)
+ addFormatterTest("%v", nv4, "<nil>")
+ addFormatterTest("%+v", v4, v4s)
+ addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
+ addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
+ addFormatterTest("%+v", nv4, "<nil>")
+ addFormatterTest("%#v", v4, "("+v4t+")"+v4s)
+ addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s)
+ addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s)
+ addFormatterTest("%#v", nv4, "(*"+v4t+")"+"<nil>")
+ addFormatterTest("%#+v", v4, "("+v4t+")"+v4s)
+ addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s)
+ addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s)
+ addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"<nil>")
+
+ // Max uint.
+ v5 := uint(4294967295)
+ nv5 := (*uint)(nil)
+ pv5 := &v5
+ v5Addr := fmt.Sprintf("%p", pv5)
+ pv5Addr := fmt.Sprintf("%p", &pv5)
+ v5t := "uint"
+ v5s := "4294967295"
+ addFormatterTest("%v", v5, v5s)
+ addFormatterTest("%v", pv5, "<*>"+v5s)
+ addFormatterTest("%v", &pv5, "<**>"+v5s)
+ addFormatterTest("%v", nv5, "<nil>")
+ addFormatterTest("%+v", v5, v5s)
+ addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s)
+ addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s)
+ addFormatterTest("%+v", nv5, "<nil>")
+ addFormatterTest("%#v", v5, "("+v5t+")"+v5s)
+ addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s)
+ addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s)
+ addFormatterTest("%#v", nv5, "(*"+v5t+")"+"<nil>")
+ addFormatterTest("%#+v", v5, "("+v5t+")"+v5s)
+ addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s)
+ addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s)
+ addFormatterTest("%#v", nv5, "(*"+v5t+")"+"<nil>")
+}
+
+func addBoolFormatterTests() {
+ // Boolean true.
+ v := bool(true)
+ nv := (*bool)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "bool"
+ vs := "true"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%v", nv, "<nil>")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+ // Boolean false.
+ v2 := bool(false)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "bool"
+ v2s := "false"
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+}
+
+func addFloatFormatterTests() {
+ // Standard float32.
+ v := float32(3.1415)
+ nv := (*float32)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "float32"
+ vs := "3.1415"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%v", nv, "<nil>")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+ // Standard float64.
+ v2 := float64(3.1415926)
+ nv2 := (*float64)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "float64"
+ v2s := "3.1415926"
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%+v", nv2, "<nil>")
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%+v", nv2, "<nil>")
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+ addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
+}
+
+func addComplexFormatterTests() {
+ // Standard complex64.
+ v := complex(float32(6), -2)
+ nv := (*complex64)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "complex64"
+ vs := "(6-2i)"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+ // Standard complex128.
+ v2 := complex(float64(-6), 2)
+ nv2 := (*complex128)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "complex128"
+ v2s := "(-6+2i)"
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%+v", nv2, "<nil>")
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%+v", nv2, "<nil>")
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+ addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
+}
+
+func addArrayFormatterTests() {
+ // Array containing standard ints.
+ v := [3]int{1, 2, 3}
+ nv := (*[3]int)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "[3]int"
+ vs := "[1 2 3]"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+ // Array containing type with custom formatter on pointer receiver only.
+ v2 := [3]pstringer{"1", "2", "3"}
+ nv2 := (*[3]pstringer)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "[3]spew_test.pstringer"
+ v2sp := "[stringer 1 stringer 2 stringer 3]"
+ v2s := v2sp
+ if spew.UnsafeDisabled {
+ v2s = "[1 2 3]"
+ }
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2sp)
+ addFormatterTest("%v", &pv2, "<**>"+v2sp)
+ addFormatterTest("%+v", nv2, "<nil>")
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2sp)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2sp)
+ addFormatterTest("%+v", nv2, "<nil>")
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2sp)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2sp)
+ addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2sp)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2sp)
+ addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
+
+ // Array containing interfaces.
+ v3 := [3]interface{}{"one", int(2), uint(3)}
+ nv3 := (*[3]interface{})(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "[3]interface {}"
+ v3t2 := "string"
+ v3t3 := "int"
+ v3t4 := "uint"
+ v3s := "[one 2 3]"
+ v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3]"
+ addFormatterTest("%v", v3, v3s)
+ addFormatterTest("%v", pv3, "<*>"+v3s)
+ addFormatterTest("%v", &pv3, "<**>"+v3s)
+ addFormatterTest("%+v", nv3, "<nil>")
+ addFormatterTest("%+v", v3, v3s)
+ addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
+ addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
+ addFormatterTest("%+v", nv3, "<nil>")
+ addFormatterTest("%#v", v3, "("+v3t+")"+v3s2)
+ addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2)
+ addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2)
+ addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
+ addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2)
+ addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2)
+ addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2)
+ addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"<nil>")
+}
+
+func addSliceFormatterTests() {
+ // Slice containing standard float32 values.
+ v := []float32{3.14, 6.28, 12.56}
+ nv := (*[]float32)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "[]float32"
+ vs := "[3.14 6.28 12.56]"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+ // Slice containing type with custom formatter on pointer receiver only.
+ v2 := []pstringer{"1", "2", "3"}
+ nv2 := (*[]pstringer)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "[]spew_test.pstringer"
+ v2s := "[stringer 1 stringer 2 stringer 3]"
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%+v", nv2, "<nil>")
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%+v", nv2, "<nil>")
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+ addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
+
+ // Slice containing interfaces.
+ v3 := []interface{}{"one", int(2), uint(3), nil}
+ nv3 := (*[]interface{})(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "[]interface {}"
+ v3t2 := "string"
+ v3t3 := "int"
+ v3t4 := "uint"
+ v3t5 := "interface {}"
+ v3s := "[one 2 3 <nil>]"
+ v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3 (" + v3t5 +
+ ")<nil>]"
+ addFormatterTest("%v", v3, v3s)
+ addFormatterTest("%v", pv3, "<*>"+v3s)
+ addFormatterTest("%v", &pv3, "<**>"+v3s)
+ addFormatterTest("%+v", nv3, "<nil>")
+ addFormatterTest("%+v", v3, v3s)
+ addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
+ addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
+ addFormatterTest("%+v", nv3, "<nil>")
+ addFormatterTest("%#v", v3, "("+v3t+")"+v3s2)
+ addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2)
+ addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2)
+ addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
+ addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2)
+ addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2)
+ addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2)
+ addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"<nil>")
+
+ // Nil slice.
+ var v4 []int
+ nv4 := (*[]int)(nil)
+ pv4 := &v4
+ v4Addr := fmt.Sprintf("%p", pv4)
+ pv4Addr := fmt.Sprintf("%p", &pv4)
+ v4t := "[]int"
+ v4s := "<nil>"
+ addFormatterTest("%v", v4, v4s)
+ addFormatterTest("%v", pv4, "<*>"+v4s)
+ addFormatterTest("%v", &pv4, "<**>"+v4s)
+ addFormatterTest("%+v", nv4, "<nil>")
+ addFormatterTest("%+v", v4, v4s)
+ addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
+ addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
+ addFormatterTest("%+v", nv4, "<nil>")
+ addFormatterTest("%#v", v4, "("+v4t+")"+v4s)
+ addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s)
+ addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s)
+ addFormatterTest("%#v", nv4, "(*"+v4t+")"+"<nil>")
+ addFormatterTest("%#+v", v4, "("+v4t+")"+v4s)
+ addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s)
+ addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s)
+ addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"<nil>")
+}
+
+func addStringFormatterTests() {
+ // Standard string.
+ v := "test"
+ nv := (*string)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "string"
+ vs := "test"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+}
+
+func addInterfaceFormatterTests() {
+ // Nil interface.
+ var v interface{}
+ nv := (*interface{})(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "interface {}"
+ vs := "<nil>"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+ // Sub-interface.
+ v2 := interface{}(uint16(65535))
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "uint16"
+ v2s := "65535"
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+}
+
+func addMapFormatterTests() {
+ // Map with string keys and int vals.
+ v := map[string]int{"one": 1, "two": 2}
+ nilMap := map[string]int(nil)
+ nv := (*map[string]int)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "map[string]int"
+ vs := "map[one:1 two:2]"
+ vs2 := "map[two:2 one:1]"
+ addFormatterTest("%v", v, vs, vs2)
+ addFormatterTest("%v", pv, "<*>"+vs, "<*>"+vs2)
+ addFormatterTest("%v", &pv, "<**>"+vs, "<**>"+vs2)
+ addFormatterTest("%+v", nilMap, "<nil>")
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%+v", v, vs, vs2)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs, "<*>("+vAddr+")"+vs2)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs,
+ "<**>("+pvAddr+"->"+vAddr+")"+vs2)
+ addFormatterTest("%+v", nilMap, "<nil>")
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%#v", v, "("+vt+")"+vs, "("+vt+")"+vs2)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs, "(*"+vt+")"+vs2)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs, "(**"+vt+")"+vs2)
+ addFormatterTest("%#v", nilMap, "("+vt+")"+"<nil>")
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs, "("+vt+")"+vs2)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs,
+ "(*"+vt+")("+vAddr+")"+vs2)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs,
+ "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs2)
+ addFormatterTest("%#+v", nilMap, "("+vt+")"+"<nil>")
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+ // Map with custom formatter type on pointer receiver only keys and vals.
+ v2 := map[pstringer]pstringer{"one": "1"}
+ nv2 := (*map[pstringer]pstringer)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "map[spew_test.pstringer]spew_test.pstringer"
+ v2s := "map[stringer one:stringer 1]"
+ if spew.UnsafeDisabled {
+ v2s = "map[one:1]"
+ }
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%+v", nv2, "<nil>")
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%+v", nv2, "<nil>")
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+ addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
+
+ // Map with interface keys and values.
+ v3 := map[interface{}]interface{}{"one": 1}
+ nv3 := (*map[interface{}]interface{})(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "map[interface {}]interface {}"
+ v3t1 := "string"
+ v3t2 := "int"
+ v3s := "map[one:1]"
+ v3s2 := "map[(" + v3t1 + ")one:(" + v3t2 + ")1]"
+ addFormatterTest("%v", v3, v3s)
+ addFormatterTest("%v", pv3, "<*>"+v3s)
+ addFormatterTest("%v", &pv3, "<**>"+v3s)
+ addFormatterTest("%+v", nv3, "<nil>")
+ addFormatterTest("%+v", v3, v3s)
+ addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
+ addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
+ addFormatterTest("%+v", nv3, "<nil>")
+ addFormatterTest("%#v", v3, "("+v3t+")"+v3s2)
+ addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2)
+ addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2)
+ addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
+ addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2)
+ addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2)
+ addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2)
+ addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"<nil>")
+
+ // Map with nil interface value
+ v4 := map[string]interface{}{"nil": nil}
+ nv4 := (*map[string]interface{})(nil)
+ pv4 := &v4
+ v4Addr := fmt.Sprintf("%p", pv4)
+ pv4Addr := fmt.Sprintf("%p", &pv4)
+ v4t := "map[string]interface {}"
+ v4t1 := "interface {}"
+ v4s := "map[nil:<nil>]"
+ v4s2 := "map[nil:(" + v4t1 + ")<nil>]"
+ addFormatterTest("%v", v4, v4s)
+ addFormatterTest("%v", pv4, "<*>"+v4s)
+ addFormatterTest("%v", &pv4, "<**>"+v4s)
+ addFormatterTest("%+v", nv4, "<nil>")
+ addFormatterTest("%+v", v4, v4s)
+ addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
+ addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
+ addFormatterTest("%+v", nv4, "<nil>")
+ addFormatterTest("%#v", v4, "("+v4t+")"+v4s2)
+ addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s2)
+ addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s2)
+ addFormatterTest("%#v", nv4, "(*"+v4t+")"+"<nil>")
+ addFormatterTest("%#+v", v4, "("+v4t+")"+v4s2)
+ addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s2)
+ addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s2)
+ addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"<nil>")
+}
+
+func addStructFormatterTests() {
+ // Struct with primitives.
+ type s1 struct {
+ a int8
+ b uint8
+ }
+ v := s1{127, 255}
+ nv := (*s1)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "spew_test.s1"
+ vt2 := "int8"
+ vt3 := "uint8"
+ vs := "{127 255}"
+ vs2 := "{a:127 b:255}"
+ vs3 := "{a:(" + vt2 + ")127 b:(" + vt3 + ")255}"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%+v", v, vs2)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs2)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs2)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%#v", v, "("+vt+")"+vs3)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs3)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs3)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs3)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs3)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs3)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+ // Struct that contains another struct.
+ type s2 struct {
+ s1 s1
+ b bool
+ }
+ v2 := s2{s1{127, 255}, true}
+ nv2 := (*s2)(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "spew_test.s2"
+ v2t2 := "spew_test.s1"
+ v2t3 := "int8"
+ v2t4 := "uint8"
+ v2t5 := "bool"
+ v2s := "{{127 255} true}"
+ v2s2 := "{s1:{a:127 b:255} b:true}"
+ v2s3 := "{s1:(" + v2t2 + "){a:(" + v2t3 + ")127 b:(" + v2t4 + ")255} b:(" +
+ v2t5 + ")true}"
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%+v", nv2, "<nil>")
+ addFormatterTest("%+v", v2, v2s2)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s2)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s2)
+ addFormatterTest("%+v", nv2, "<nil>")
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s3)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s3)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s3)
+ addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s3)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s3)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s3)
+ addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
+
+ // Struct that contains custom type with Stringer pointer interface via both
+ // exported and unexported fields.
+ type s3 struct {
+ s pstringer
+ S pstringer
+ }
+ v3 := s3{"test", "test2"}
+ nv3 := (*s3)(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "spew_test.s3"
+ v3t2 := "spew_test.pstringer"
+ v3s := "{stringer test stringer test2}"
+ v3sp := v3s
+ v3s2 := "{s:stringer test S:stringer test2}"
+ v3s2p := v3s2
+ v3s3 := "{s:(" + v3t2 + ")stringer test S:(" + v3t2 + ")stringer test2}"
+ v3s3p := v3s3
+ if spew.UnsafeDisabled {
+ v3s = "{test test2}"
+ v3sp = "{test stringer test2}"
+ v3s2 = "{s:test S:test2}"
+ v3s2p = "{s:test S:stringer test2}"
+ v3s3 = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")test2}"
+ v3s3p = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")stringer test2}"
+ }
+ addFormatterTest("%v", v3, v3s)
+ addFormatterTest("%v", pv3, "<*>"+v3sp)
+ addFormatterTest("%v", &pv3, "<**>"+v3sp)
+ addFormatterTest("%+v", nv3, "<nil>")
+ addFormatterTest("%+v", v3, v3s2)
+ addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s2p)
+ addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s2p)
+ addFormatterTest("%+v", nv3, "<nil>")
+ addFormatterTest("%#v", v3, "("+v3t+")"+v3s3)
+ addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s3p)
+ addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s3p)
+ addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
+ addFormatterTest("%#+v", v3, "("+v3t+")"+v3s3)
+ addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s3p)
+ addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s3p)
+ addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"<nil>")
+
+ // Struct that contains embedded struct and field to same struct.
+ e := embed{"embedstr"}
+ v4 := embedwrap{embed: &e, e: &e}
+ nv4 := (*embedwrap)(nil)
+ pv4 := &v4
+ eAddr := fmt.Sprintf("%p", &e)
+ v4Addr := fmt.Sprintf("%p", pv4)
+ pv4Addr := fmt.Sprintf("%p", &pv4)
+ v4t := "spew_test.embedwrap"
+ v4t2 := "spew_test.embed"
+ v4t3 := "string"
+ v4s := "{<*>{embedstr} <*>{embedstr}}"
+ v4s2 := "{embed:<*>(" + eAddr + "){a:embedstr} e:<*>(" + eAddr +
+ "){a:embedstr}}"
+ v4s3 := "{embed:(*" + v4t2 + "){a:(" + v4t3 + ")embedstr} e:(*" + v4t2 +
+ "){a:(" + v4t3 + ")embedstr}}"
+ v4s4 := "{embed:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 +
+ ")embedstr} e:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + ")embedstr}}"
+ addFormatterTest("%v", v4, v4s)
+ addFormatterTest("%v", pv4, "<*>"+v4s)
+ addFormatterTest("%v", &pv4, "<**>"+v4s)
+ addFormatterTest("%+v", nv4, "<nil>")
+ addFormatterTest("%+v", v4, v4s2)
+ addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s2)
+ addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s2)
+ addFormatterTest("%+v", nv4, "<nil>")
+ addFormatterTest("%#v", v4, "("+v4t+")"+v4s3)
+ addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s3)
+ addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s3)
+ addFormatterTest("%#v", nv4, "(*"+v4t+")"+"<nil>")
+ addFormatterTest("%#+v", v4, "("+v4t+")"+v4s4)
+ addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s4)
+ addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s4)
+ addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"<nil>")
+}
+
+func addUintptrFormatterTests() {
+ // Null pointer.
+ v := uintptr(0)
+ nv := (*uintptr)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "uintptr"
+ vs := "<nil>"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+ // Address of real variable.
+ i := 1
+ v2 := uintptr(unsafe.Pointer(&i))
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "uintptr"
+ v2s := fmt.Sprintf("%p", &i)
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+}
+
+func addUnsafePointerFormatterTests() {
+ // Null pointer.
+ v := unsafe.Pointer(uintptr(0))
+ nv := (*unsafe.Pointer)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "unsafe.Pointer"
+ vs := "<nil>"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+ // Address of real variable.
+ i := 1
+ v2 := unsafe.Pointer(&i)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "unsafe.Pointer"
+ v2s := fmt.Sprintf("%p", &i)
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+}
+
+func addChanFormatterTests() {
+ // Nil channel.
+ var v chan int
+ pv := &v
+ nv := (*chan int)(nil)
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "chan int"
+ vs := "<nil>"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+ // Real channel.
+ v2 := make(chan int)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "chan int"
+ v2s := fmt.Sprintf("%p", v2)
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+}
+
+func addFuncFormatterTests() {
+ // Function with no params and no returns.
+ v := addIntFormatterTests
+ nv := (*func())(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "func()"
+ vs := fmt.Sprintf("%p", v)
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+
+ // Function with param and no returns.
+ v2 := TestFormatter
+ nv2 := (*func(*testing.T))(nil)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "func(*testing.T)"
+ v2s := fmt.Sprintf("%p", v2)
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s)
+ addFormatterTest("%v", &pv2, "<**>"+v2s)
+ addFormatterTest("%+v", nv2, "<nil>")
+ addFormatterTest("%+v", v2, v2s)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%+v", nv2, "<nil>")
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
+ addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
+ addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
+
+ // Function with multiple params and multiple returns.
+ var v3 = func(i int, s string) (b bool, err error) {
+ return true, nil
+ }
+ nv3 := (*func(int, string) (bool, error))(nil)
+ pv3 := &v3
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "func(int, string) (bool, error)"
+ v3s := fmt.Sprintf("%p", v3)
+ addFormatterTest("%v", v3, v3s)
+ addFormatterTest("%v", pv3, "<*>"+v3s)
+ addFormatterTest("%v", &pv3, "<**>"+v3s)
+ addFormatterTest("%+v", nv3, "<nil>")
+ addFormatterTest("%+v", v3, v3s)
+ addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
+ addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
+ addFormatterTest("%+v", nv3, "<nil>")
+ addFormatterTest("%#v", v3, "("+v3t+")"+v3s)
+ addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s)
+ addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s)
+ addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
+ addFormatterTest("%#+v", v3, "("+v3t+")"+v3s)
+ addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s)
+ addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s)
+ addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"<nil>")
+}
+
+func addCircularFormatterTests() {
+ // Struct that is circular through self referencing.
+ type circular struct {
+ c *circular
+ }
+ v := circular{nil}
+ v.c = &v
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "spew_test.circular"
+ vs := "{<*>{<*><shown>}}"
+ vs2 := "{<*><shown>}"
+ vs3 := "{c:<*>(" + vAddr + "){c:<*>(" + vAddr + ")<shown>}}"
+ vs4 := "{c:<*>(" + vAddr + ")<shown>}"
+ vs5 := "{c:(*" + vt + "){c:(*" + vt + ")<shown>}}"
+ vs6 := "{c:(*" + vt + ")<shown>}"
+ vs7 := "{c:(*" + vt + ")(" + vAddr + "){c:(*" + vt + ")(" + vAddr +
+ ")<shown>}}"
+ vs8 := "{c:(*" + vt + ")(" + vAddr + ")<shown>}"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs2)
+ addFormatterTest("%v", &pv, "<**>"+vs2)
+ addFormatterTest("%+v", v, vs3)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs4)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs4)
+ addFormatterTest("%#v", v, "("+vt+")"+vs5)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs6)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs6)
+ addFormatterTest("%#+v", v, "("+vt+")"+vs7)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs8)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs8)
+
+ // Structs that are circular through cross referencing.
+ v2 := xref1{nil}
+ ts2 := xref2{&v2}
+ v2.ps2 = &ts2
+ pv2 := &v2
+ ts2Addr := fmt.Sprintf("%p", &ts2)
+ v2Addr := fmt.Sprintf("%p", pv2)
+ pv2Addr := fmt.Sprintf("%p", &pv2)
+ v2t := "spew_test.xref1"
+ v2t2 := "spew_test.xref2"
+ v2s := "{<*>{<*>{<*><shown>}}}"
+ v2s2 := "{<*>{<*><shown>}}"
+ v2s3 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + "){ps2:<*>(" +
+ ts2Addr + ")<shown>}}}"
+ v2s4 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + ")<shown>}}"
+ v2s5 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + "){ps2:(*" + v2t2 +
+ ")<shown>}}}"
+ v2s6 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + ")<shown>}}"
+ v2s7 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t +
+ ")(" + v2Addr + "){ps2:(*" + v2t2 + ")(" + ts2Addr +
+ ")<shown>}}}"
+ v2s8 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t +
+ ")(" + v2Addr + ")<shown>}}"
+ addFormatterTest("%v", v2, v2s)
+ addFormatterTest("%v", pv2, "<*>"+v2s2)
+ addFormatterTest("%v", &pv2, "<**>"+v2s2)
+ addFormatterTest("%+v", v2, v2s3)
+ addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s4)
+ addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s4)
+ addFormatterTest("%#v", v2, "("+v2t+")"+v2s5)
+ addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s6)
+ addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s6)
+ addFormatterTest("%#+v", v2, "("+v2t+")"+v2s7)
+ addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s8)
+ addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s8)
+
+ // Structs that are indirectly circular.
+ v3 := indirCir1{nil}
+ tic2 := indirCir2{nil}
+ tic3 := indirCir3{&v3}
+ tic2.ps3 = &tic3
+ v3.ps2 = &tic2
+ pv3 := &v3
+ tic2Addr := fmt.Sprintf("%p", &tic2)
+ tic3Addr := fmt.Sprintf("%p", &tic3)
+ v3Addr := fmt.Sprintf("%p", pv3)
+ pv3Addr := fmt.Sprintf("%p", &pv3)
+ v3t := "spew_test.indirCir1"
+ v3t2 := "spew_test.indirCir2"
+ v3t3 := "spew_test.indirCir3"
+ v3s := "{<*>{<*>{<*>{<*><shown>}}}}"
+ v3s2 := "{<*>{<*>{<*><shown>}}}"
+ v3s3 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" +
+ v3Addr + "){ps2:<*>(" + tic2Addr + ")<shown>}}}}"
+ v3s4 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" +
+ v3Addr + ")<shown>}}}"
+ v3s5 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t +
+ "){ps2:(*" + v3t2 + ")<shown>}}}}"
+ v3s6 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t +
+ ")<shown>}}}"
+ v3s7 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" +
+ tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + "){ps2:(*" + v3t2 +
+ ")(" + tic2Addr + ")<shown>}}}}"
+ v3s8 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" +
+ tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + ")<shown>}}}"
+ addFormatterTest("%v", v3, v3s)
+ addFormatterTest("%v", pv3, "<*>"+v3s2)
+ addFormatterTest("%v", &pv3, "<**>"+v3s2)
+ addFormatterTest("%+v", v3, v3s3)
+ addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s4)
+ addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s4)
+ addFormatterTest("%#v", v3, "("+v3t+")"+v3s5)
+ addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s6)
+ addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s6)
+ addFormatterTest("%#+v", v3, "("+v3t+")"+v3s7)
+ addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s8)
+ addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s8)
+}
+
+func addPanicFormatterTests() {
+ // Type that panics in its Stringer interface.
+ v := panicer(127)
+ nv := (*panicer)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "spew_test.panicer"
+ vs := "(PANIC=test panic)127"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%v", nv, "<nil>")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+}
+
+func addErrorFormatterTests() {
+ // Type that has a custom Error interface.
+ v := customError(127)
+ nv := (*customError)(nil)
+ pv := &v
+ vAddr := fmt.Sprintf("%p", pv)
+ pvAddr := fmt.Sprintf("%p", &pv)
+ vt := "spew_test.customError"
+ vs := "error: 127"
+ addFormatterTest("%v", v, vs)
+ addFormatterTest("%v", pv, "<*>"+vs)
+ addFormatterTest("%v", &pv, "<**>"+vs)
+ addFormatterTest("%v", nv, "<nil>")
+ addFormatterTest("%+v", v, vs)
+ addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
+ addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%+v", nv, "<nil>")
+ addFormatterTest("%#v", v, "("+vt+")"+vs)
+ addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
+ addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
+ addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
+ addFormatterTest("%#+v", v, "("+vt+")"+vs)
+ addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
+ addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
+ addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
+}
+
+func addPassthroughFormatterTests() {
+ // %x passthrough with uint.
+ v := uint(4294967295)
+ pv := &v
+ vAddr := fmt.Sprintf("%x", pv)
+ pvAddr := fmt.Sprintf("%x", &pv)
+ vs := "ffffffff"
+ addFormatterTest("%x", v, vs)
+ addFormatterTest("%x", pv, vAddr)
+ addFormatterTest("%x", &pv, pvAddr)
+
+ // %#x passthrough with uint.
+ v2 := int(2147483647)
+ pv2 := &v2
+ v2Addr := fmt.Sprintf("%#x", pv2)
+ pv2Addr := fmt.Sprintf("%#x", &pv2)
+ v2s := "0x7fffffff"
+ addFormatterTest("%#x", v2, v2s)
+ addFormatterTest("%#x", pv2, v2Addr)
+ addFormatterTest("%#x", &pv2, pv2Addr)
+
+ // %f passthrough with precision.
+ addFormatterTest("%.2f", 3.1415, "3.14")
+ addFormatterTest("%.3f", 3.1415, "3.142")
+ addFormatterTest("%.4f", 3.1415, "3.1415")
+
+ // %f passthrough with width and precision.
+ addFormatterTest("%5.2f", 3.1415, " 3.14")
+ addFormatterTest("%6.3f", 3.1415, " 3.142")
+ addFormatterTest("%7.4f", 3.1415, " 3.1415")
+
+ // %d passthrough with width.
+ addFormatterTest("%3d", 127, "127")
+ addFormatterTest("%4d", 127, " 127")
+ addFormatterTest("%5d", 127, " 127")
+
+ // %q passthrough with string.
+ addFormatterTest("%q", "test", "\"test\"")
+}
+
+// TestFormatter executes all of the tests described by formatterTests.
+func TestFormatter(t *testing.T) {
+ // Setup tests.
+ addIntFormatterTests()
+ addUintFormatterTests()
+ addBoolFormatterTests()
+ addFloatFormatterTests()
+ addComplexFormatterTests()
+ addArrayFormatterTests()
+ addSliceFormatterTests()
+ addStringFormatterTests()
+ addInterfaceFormatterTests()
+ addMapFormatterTests()
+ addStructFormatterTests()
+ addUintptrFormatterTests()
+ addUnsafePointerFormatterTests()
+ addChanFormatterTests()
+ addFuncFormatterTests()
+ addCircularFormatterTests()
+ addPanicFormatterTests()
+ addErrorFormatterTests()
+ addPassthroughFormatterTests()
+
+ t.Logf("Running %d tests", len(formatterTests))
+ for i, test := range formatterTests {
+ buf := new(bytes.Buffer)
+ spew.Fprintf(buf, test.format, test.in)
+ s := buf.String()
+ if testFailed(s, test.wants) {
+ t.Errorf("Formatter #%d format: %s got: %s %s", i, test.format, s,
+ stringizeWants(test.wants))
+ continue
+ }
+ }
+}
+
+type testStruct struct {
+ x int
+}
+
+func (ts testStruct) String() string {
+ return fmt.Sprintf("ts.%d", ts.x)
+}
+
+type testStructP struct {
+ x int
+}
+
+func (ts *testStructP) String() string {
+ return fmt.Sprintf("ts.%d", ts.x)
+}
+
+func TestPrintSortedKeys(t *testing.T) {
+ cfg := spew.ConfigState{SortKeys: true}
+ s := cfg.Sprint(map[int]string{1: "1", 3: "3", 2: "2"})
+ expected := "map[1:1 2:2 3:3]"
+ if s != expected {
+ t.Errorf("Sorted keys mismatch 1:\n %v %v", s, expected)
+ }
+
+ s = cfg.Sprint(map[stringer]int{"1": 1, "3": 3, "2": 2})
+ expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]"
+ if s != expected {
+ t.Errorf("Sorted keys mismatch 2:\n %v %v", s, expected)
+ }
+
+ s = cfg.Sprint(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2})
+ expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]"
+ if spew.UnsafeDisabled {
+ expected = "map[1:1 2:2 3:3]"
+ }
+ if s != expected {
+ t.Errorf("Sorted keys mismatch 3:\n %v %v", s, expected)
+ }
+
+ s = cfg.Sprint(map[testStruct]int{testStruct{1}: 1, testStruct{3}: 3, testStruct{2}: 2})
+ expected = "map[ts.1:1 ts.2:2 ts.3:3]"
+ if s != expected {
+ t.Errorf("Sorted keys mismatch 4:\n %v %v", s, expected)
+ }
+
+ if !spew.UnsafeDisabled {
+ s = cfg.Sprint(map[testStructP]int{testStructP{1}: 1, testStructP{3}: 3, testStructP{2}: 2})
+ expected = "map[ts.1:1 ts.2:2 ts.3:3]"
+ if s != expected {
+ t.Errorf("Sorted keys mismatch 5:\n %v %v", s, expected)
+ }
+ }
+
+ s = cfg.Sprint(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2})
+ expected = "map[error: 1:1 error: 2:2 error: 3:3]"
+ if s != expected {
+ t.Errorf("Sorted keys mismatch 6:\n %v %v", s, expected)
+ }
+}
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/internal_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/internal_test.go
new file mode 100644
index 000000000..20a9cfefc
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/internal_test.go
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+This test file is part of the spew package rather than than the spew_test
+package because it needs access to internals to properly test certain cases
+which are not possible via the public interface since they should never happen.
+*/
+
+package spew
+
+import (
+ "bytes"
+ "reflect"
+ "testing"
+)
+
+// dummyFmtState implements a fake fmt.State to use for testing invalid
+// reflect.Value handling. This is necessary because the fmt package catches
+// invalid values before invoking the formatter on them.
+type dummyFmtState struct {
+ bytes.Buffer
+}
+
+func (dfs *dummyFmtState) Flag(f int) bool {
+ if f == int('+') {
+ return true
+ }
+ return false
+}
+
+func (dfs *dummyFmtState) Precision() (int, bool) {
+ return 0, false
+}
+
+func (dfs *dummyFmtState) Width() (int, bool) {
+ return 0, false
+}
+
+// TestInvalidReflectValue ensures the dump and formatter code handles an
+// invalid reflect value properly. This needs access to internal state since it
+// should never happen in real code and therefore can't be tested via the public
+// API.
+func TestInvalidReflectValue(t *testing.T) {
+ i := 1
+
+ // Dump invalid reflect value.
+ v := new(reflect.Value)
+ buf := new(bytes.Buffer)
+ d := dumpState{w: buf, cs: &Config}
+ d.dump(*v)
+ s := buf.String()
+ want := "<invalid>"
+ if s != want {
+ t.Errorf("InvalidReflectValue #%d\n got: %s want: %s", i, s, want)
+ }
+ i++
+
+ // Formatter invalid reflect value.
+ buf2 := new(dummyFmtState)
+ f := formatState{value: *v, cs: &Config, fs: buf2}
+ f.format(*v)
+ s = buf2.String()
+ want = "<invalid>"
+ if s != want {
+ t.Errorf("InvalidReflectValue #%d got: %s want: %s", i, s, want)
+ }
+}
+
+// SortValues makes the internal sortValues function available to the test
+// package.
+func SortValues(values []reflect.Value, cs *ConfigState) {
+ sortValues(values, cs)
+}
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go
new file mode 100644
index 000000000..a0c612ec3
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go
@@ -0,0 +1,102 @@
+// Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build !js,!appengine,!safe,!disableunsafe
+
+/*
+This test file is part of the spew package rather than than the spew_test
+package because it needs access to internals to properly test certain cases
+which are not possible via the public interface since they should never happen.
+*/
+
+package spew
+
+import (
+ "bytes"
+ "reflect"
+ "testing"
+ "unsafe"
+)
+
+// changeKind uses unsafe to intentionally change the kind of a reflect.Value to
+// the maximum kind value which does not exist. This is needed to test the
+// fallback code which punts to the standard fmt library for new types that
+// might get added to the language.
+func changeKind(v *reflect.Value, readOnly bool) {
+ rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag))
+ *rvf = *rvf | ((1<<flagKindWidth - 1) << flagKindShift)
+ if readOnly {
+ *rvf |= flagRO
+ } else {
+ *rvf &= ^uintptr(flagRO)
+ }
+}
+
+// TestAddedReflectValue tests functionaly of the dump and formatter code which
+// falls back to the standard fmt library for new types that might get added to
+// the language.
+func TestAddedReflectValue(t *testing.T) {
+ i := 1
+
+ // Dump using a reflect.Value that is exported.
+ v := reflect.ValueOf(int8(5))
+ changeKind(&v, false)
+ buf := new(bytes.Buffer)
+ d := dumpState{w: buf, cs: &Config}
+ d.dump(v)
+ s := buf.String()
+ want := "(int8) 5"
+ if s != want {
+ t.Errorf("TestAddedReflectValue #%d\n got: %s want: %s", i, s, want)
+ }
+ i++
+
+ // Dump using a reflect.Value that is not exported.
+ changeKind(&v, true)
+ buf.Reset()
+ d.dump(v)
+ s = buf.String()
+ want = "(int8) <int8 Value>"
+ if s != want {
+ t.Errorf("TestAddedReflectValue #%d\n got: %s want: %s", i, s, want)
+ }
+ i++
+
+ // Formatter using a reflect.Value that is exported.
+ changeKind(&v, false)
+ buf2 := new(dummyFmtState)
+ f := formatState{value: v, cs: &Config, fs: buf2}
+ f.format(v)
+ s = buf2.String()
+ want = "5"
+ if s != want {
+ t.Errorf("TestAddedReflectValue #%d got: %s want: %s", i, s, want)
+ }
+ i++
+
+ // Formatter using a reflect.Value that is not exported.
+ changeKind(&v, true)
+ buf2.Reset()
+ f = formatState{value: v, cs: &Config, fs: buf2}
+ f.format(v)
+ s = buf2.String()
+ want = "<int8 Value>"
+ if s != want {
+ t.Errorf("TestAddedReflectValue #%d got: %s want: %s", i, s, want)
+ }
+}
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/spew.go
index d8233f542..32c0e3388 100644
--- a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/spew.go
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/spew_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/spew_test.go
new file mode 100644
index 000000000..b70466c69
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/spew_test.go
@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew_test
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "testing"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+// spewFunc is used to identify which public function of the spew package or
+// ConfigState a test applies to.
+type spewFunc int
+
+const (
+ fCSFdump spewFunc = iota
+ fCSFprint
+ fCSFprintf
+ fCSFprintln
+ fCSPrint
+ fCSPrintln
+ fCSSdump
+ fCSSprint
+ fCSSprintf
+ fCSSprintln
+ fCSErrorf
+ fCSNewFormatter
+ fErrorf
+ fFprint
+ fFprintln
+ fPrint
+ fPrintln
+ fSdump
+ fSprint
+ fSprintf
+ fSprintln
+)
+
+// Map of spewFunc values to names for pretty printing.
+var spewFuncStrings = map[spewFunc]string{
+ fCSFdump: "ConfigState.Fdump",
+ fCSFprint: "ConfigState.Fprint",
+ fCSFprintf: "ConfigState.Fprintf",
+ fCSFprintln: "ConfigState.Fprintln",
+ fCSSdump: "ConfigState.Sdump",
+ fCSPrint: "ConfigState.Print",
+ fCSPrintln: "ConfigState.Println",
+ fCSSprint: "ConfigState.Sprint",
+ fCSSprintf: "ConfigState.Sprintf",
+ fCSSprintln: "ConfigState.Sprintln",
+ fCSErrorf: "ConfigState.Errorf",
+ fCSNewFormatter: "ConfigState.NewFormatter",
+ fErrorf: "spew.Errorf",
+ fFprint: "spew.Fprint",
+ fFprintln: "spew.Fprintln",
+ fPrint: "spew.Print",
+ fPrintln: "spew.Println",
+ fSdump: "spew.Sdump",
+ fSprint: "spew.Sprint",
+ fSprintf: "spew.Sprintf",
+ fSprintln: "spew.Sprintln",
+}
+
+func (f spewFunc) String() string {
+ if s, ok := spewFuncStrings[f]; ok {
+ return s
+ }
+ return fmt.Sprintf("Unknown spewFunc (%d)", int(f))
+}
+
+// spewTest is used to describe a test to be performed against the public
+// functions of the spew package or ConfigState.
+type spewTest struct {
+ cs *spew.ConfigState
+ f spewFunc
+ format string
+ in interface{}
+ want string
+}
+
+// spewTests houses the tests to be performed against the public functions of
+// the spew package and ConfigState.
+//
+// These tests are only intended to ensure the public functions are exercised
+// and are intentionally not exhaustive of types. The exhaustive type
+// tests are handled in the dump and format tests.
+var spewTests []spewTest
+
+// redirStdout is a helper function to return the standard output from f as a
+// byte slice.
+func redirStdout(f func()) ([]byte, error) {
+ tempFile, err := ioutil.TempFile("", "ss-test")
+ if err != nil {
+ return nil, err
+ }
+ fileName := tempFile.Name()
+ defer os.Remove(fileName) // Ignore error
+
+ origStdout := os.Stdout
+ os.Stdout = tempFile
+ f()
+ os.Stdout = origStdout
+ tempFile.Close()
+
+ return ioutil.ReadFile(fileName)
+}
+
+func initSpewTests() {
+ // Config states with various settings.
+ scsDefault := spew.NewDefaultConfig()
+ scsNoMethods := &spew.ConfigState{Indent: " ", DisableMethods: true}
+ scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true}
+ scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1}
+ scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true}
+ scsNoPtrAddr := &spew.ConfigState{DisablePointerAddresses: true}
+ scsNoCap := &spew.ConfigState{DisableCapacities: true}
+
+ // Variables for tests on types which implement Stringer interface with and
+ // without a pointer receiver.
+ ts := stringer("test")
+ tps := pstringer("test")
+
+ type ptrTester struct {
+ s *struct{}
+ }
+ tptr := &ptrTester{s: &struct{}{}}
+
+ // depthTester is used to test max depth handling for structs, array, slices
+ // and maps.
+ type depthTester struct {
+ ic indirCir1
+ arr [1]string
+ slice []string
+ m map[string]int
+ }
+ dt := depthTester{indirCir1{nil}, [1]string{"arr"}, []string{"slice"},
+ map[string]int{"one": 1}}
+
+ // Variable for tests on types which implement error interface.
+ te := customError(10)
+
+ spewTests = []spewTest{
+ {scsDefault, fCSFdump, "", int8(127), "(int8) 127\n"},
+ {scsDefault, fCSFprint, "", int16(32767), "32767"},
+ {scsDefault, fCSFprintf, "%v", int32(2147483647), "2147483647"},
+ {scsDefault, fCSFprintln, "", int(2147483647), "2147483647\n"},
+ {scsDefault, fCSPrint, "", int64(9223372036854775807), "9223372036854775807"},
+ {scsDefault, fCSPrintln, "", uint8(255), "255\n"},
+ {scsDefault, fCSSdump, "", uint8(64), "(uint8) 64\n"},
+ {scsDefault, fCSSprint, "", complex(1, 2), "(1+2i)"},
+ {scsDefault, fCSSprintf, "%v", complex(float32(3), 4), "(3+4i)"},
+ {scsDefault, fCSSprintln, "", complex(float64(5), 6), "(5+6i)\n"},
+ {scsDefault, fCSErrorf, "%#v", uint16(65535), "(uint16)65535"},
+ {scsDefault, fCSNewFormatter, "%v", uint32(4294967295), "4294967295"},
+ {scsDefault, fErrorf, "%v", uint64(18446744073709551615), "18446744073709551615"},
+ {scsDefault, fFprint, "", float32(3.14), "3.14"},
+ {scsDefault, fFprintln, "", float64(6.28), "6.28\n"},
+ {scsDefault, fPrint, "", true, "true"},
+ {scsDefault, fPrintln, "", false, "false\n"},
+ {scsDefault, fSdump, "", complex(-10, -20), "(complex128) (-10-20i)\n"},
+ {scsDefault, fSprint, "", complex(-1, -2), "(-1-2i)"},
+ {scsDefault, fSprintf, "%v", complex(float32(-3), -4), "(-3-4i)"},
+ {scsDefault, fSprintln, "", complex(float64(-5), -6), "(-5-6i)\n"},
+ {scsNoMethods, fCSFprint, "", ts, "test"},
+ {scsNoMethods, fCSFprint, "", &ts, "<*>test"},
+ {scsNoMethods, fCSFprint, "", tps, "test"},
+ {scsNoMethods, fCSFprint, "", &tps, "<*>test"},
+ {scsNoPmethods, fCSFprint, "", ts, "stringer test"},
+ {scsNoPmethods, fCSFprint, "", &ts, "<*>stringer test"},
+ {scsNoPmethods, fCSFprint, "", tps, "test"},
+ {scsNoPmethods, fCSFprint, "", &tps, "<*>stringer test"},
+ {scsMaxDepth, fCSFprint, "", dt, "{{<max>} [<max>] [<max>] map[<max>]}"},
+ {scsMaxDepth, fCSFdump, "", dt, "(spew_test.depthTester) {\n" +
+ " ic: (spew_test.indirCir1) {\n <max depth reached>\n },\n" +
+ " arr: ([1]string) (len=1 cap=1) {\n <max depth reached>\n },\n" +
+ " slice: ([]string) (len=1 cap=1) {\n <max depth reached>\n },\n" +
+ " m: (map[string]int) (len=1) {\n <max depth reached>\n }\n}\n"},
+ {scsContinue, fCSFprint, "", ts, "(stringer test) test"},
+ {scsContinue, fCSFdump, "", ts, "(spew_test.stringer) " +
+ "(len=4) (stringer test) \"test\"\n"},
+ {scsContinue, fCSFprint, "", te, "(error: 10) 10"},
+ {scsContinue, fCSFdump, "", te, "(spew_test.customError) " +
+ "(error: 10) 10\n"},
+ {scsNoPtrAddr, fCSFprint, "", tptr, "<*>{<*>{}}"},
+ {scsNoPtrAddr, fCSSdump, "", tptr, "(*spew_test.ptrTester)({\ns: (*struct {})({\n})\n})\n"},
+ {scsNoCap, fCSSdump, "", make([]string, 0, 10), "([]string) {\n}\n"},
+ {scsNoCap, fCSSdump, "", make([]string, 1, 10), "([]string) (len=1) {\n(string) \"\"\n}\n"},
+ }
+}
+
+// TestSpew executes all of the tests described by spewTests.
+func TestSpew(t *testing.T) {
+ initSpewTests()
+
+ t.Logf("Running %d tests", len(spewTests))
+ for i, test := range spewTests {
+ buf := new(bytes.Buffer)
+ switch test.f {
+ case fCSFdump:
+ test.cs.Fdump(buf, test.in)
+
+ case fCSFprint:
+ test.cs.Fprint(buf, test.in)
+
+ case fCSFprintf:
+ test.cs.Fprintf(buf, test.format, test.in)
+
+ case fCSFprintln:
+ test.cs.Fprintln(buf, test.in)
+
+ case fCSPrint:
+ b, err := redirStdout(func() { test.cs.Print(test.in) })
+ if err != nil {
+ t.Errorf("%v #%d %v", test.f, i, err)
+ continue
+ }
+ buf.Write(b)
+
+ case fCSPrintln:
+ b, err := redirStdout(func() { test.cs.Println(test.in) })
+ if err != nil {
+ t.Errorf("%v #%d %v", test.f, i, err)
+ continue
+ }
+ buf.Write(b)
+
+ case fCSSdump:
+ str := test.cs.Sdump(test.in)
+ buf.WriteString(str)
+
+ case fCSSprint:
+ str := test.cs.Sprint(test.in)
+ buf.WriteString(str)
+
+ case fCSSprintf:
+ str := test.cs.Sprintf(test.format, test.in)
+ buf.WriteString(str)
+
+ case fCSSprintln:
+ str := test.cs.Sprintln(test.in)
+ buf.WriteString(str)
+
+ case fCSErrorf:
+ err := test.cs.Errorf(test.format, test.in)
+ buf.WriteString(err.Error())
+
+ case fCSNewFormatter:
+ fmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in))
+
+ case fErrorf:
+ err := spew.Errorf(test.format, test.in)
+ buf.WriteString(err.Error())
+
+ case fFprint:
+ spew.Fprint(buf, test.in)
+
+ case fFprintln:
+ spew.Fprintln(buf, test.in)
+
+ case fPrint:
+ b, err := redirStdout(func() { spew.Print(test.in) })
+ if err != nil {
+ t.Errorf("%v #%d %v", test.f, i, err)
+ continue
+ }
+ buf.Write(b)
+
+ case fPrintln:
+ b, err := redirStdout(func() { spew.Println(test.in) })
+ if err != nil {
+ t.Errorf("%v #%d %v", test.f, i, err)
+ continue
+ }
+ buf.Write(b)
+
+ case fSdump:
+ str := spew.Sdump(test.in)
+ buf.WriteString(str)
+
+ case fSprint:
+ str := spew.Sprint(test.in)
+ buf.WriteString(str)
+
+ case fSprintf:
+ str := spew.Sprintf(test.format, test.in)
+ buf.WriteString(str)
+
+ case fSprintln:
+ str := spew.Sprintln(test.in)
+ buf.WriteString(str)
+
+ default:
+ t.Errorf("%v #%d unrecognized function", test.f, i)
+ continue
+ }
+ s := buf.String()
+ if test.want != s {
+ t.Errorf("ConfigState #%d\n got: %s want: %s", i, s, test.want)
+ continue
+ }
+ }
+}
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go
new file mode 100644
index 000000000..5c87dd456
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go
@@ -0,0 +1,82 @@
+// Copyright (c) 2013 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when both cgo is supported and "-tags testcgo" is added to the go test
+// command line. This code should really only be in the dumpcgo_test.go file,
+// but unfortunately Go will not allow cgo in test files, so this is a
+// workaround to allow cgo types to be tested. This configuration is used
+// because spew itself does not require cgo to run even though it does handle
+// certain cgo types specially. Rather than forcing all clients to require cgo
+// and an external C compiler just to run the tests, this scheme makes them
+// optional.
+// +build cgo,testcgo
+
+package testdata
+
+/*
+#include <stdint.h>
+typedef unsigned char custom_uchar_t;
+
+char *ncp = 0;
+char *cp = "test";
+char ca[6] = {'t', 'e', 's', 't', '2', '\0'};
+unsigned char uca[6] = {'t', 'e', 's', 't', '3', '\0'};
+signed char sca[6] = {'t', 'e', 's', 't', '4', '\0'};
+uint8_t ui8ta[6] = {'t', 'e', 's', 't', '5', '\0'};
+custom_uchar_t tuca[6] = {'t', 'e', 's', 't', '6', '\0'};
+*/
+import "C"
+
+// GetCgoNullCharPointer returns a null char pointer via cgo. This is only
+// used for tests.
+func GetCgoNullCharPointer() interface{} {
+ return C.ncp
+}
+
+// GetCgoCharPointer returns a char pointer via cgo. This is only used for
+// tests.
+func GetCgoCharPointer() interface{} {
+ return C.cp
+}
+
+// GetCgoCharArray returns a char array via cgo and the array's len and cap.
+// This is only used for tests.
+func GetCgoCharArray() (interface{}, int, int) {
+ return C.ca, len(C.ca), cap(C.ca)
+}
+
+// GetCgoUnsignedCharArray returns an unsigned char array via cgo and the
+// array's len and cap. This is only used for tests.
+func GetCgoUnsignedCharArray() (interface{}, int, int) {
+ return C.uca, len(C.uca), cap(C.uca)
+}
+
+// GetCgoSignedCharArray returns a signed char array via cgo and the array's len
+// and cap. This is only used for tests.
+func GetCgoSignedCharArray() (interface{}, int, int) {
+ return C.sca, len(C.sca), cap(C.sca)
+}
+
+// GetCgoUint8tArray returns a uint8_t array via cgo and the array's len and
+// cap. This is only used for tests.
+func GetCgoUint8tArray() (interface{}, int, int) {
+ return C.ui8ta, len(C.ui8ta), cap(C.ui8ta)
+}
+
+// GetCgoTypdefedUnsignedCharArray returns a typedefed unsigned char array via
+// cgo and the array's len and cap. This is only used for tests.
+func GetCgoTypdefedUnsignedCharArray() (interface{}, int, int) {
+ return C.tuca, len(C.tuca), cap(C.tuca)
+}
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/test_coverage.txt b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/test_coverage.txt
new file mode 100644
index 000000000..2cd087a2a
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/test_coverage.txt
@@ -0,0 +1,61 @@
+
+github.com/davecgh/go-spew/spew/dump.go dumpState.dump 100.00% (88/88)
+github.com/davecgh/go-spew/spew/format.go formatState.format 100.00% (82/82)
+github.com/davecgh/go-spew/spew/format.go formatState.formatPtr 100.00% (52/52)
+github.com/davecgh/go-spew/spew/dump.go dumpState.dumpPtr 100.00% (44/44)
+github.com/davecgh/go-spew/spew/dump.go dumpState.dumpSlice 100.00% (39/39)
+github.com/davecgh/go-spew/spew/common.go handleMethods 100.00% (30/30)
+github.com/davecgh/go-spew/spew/common.go printHexPtr 100.00% (18/18)
+github.com/davecgh/go-spew/spew/common.go unsafeReflectValue 100.00% (13/13)
+github.com/davecgh/go-spew/spew/format.go formatState.constructOrigFormat 100.00% (12/12)
+github.com/davecgh/go-spew/spew/dump.go fdump 100.00% (11/11)
+github.com/davecgh/go-spew/spew/format.go formatState.Format 100.00% (11/11)
+github.com/davecgh/go-spew/spew/common.go init 100.00% (10/10)
+github.com/davecgh/go-spew/spew/common.go printComplex 100.00% (9/9)
+github.com/davecgh/go-spew/spew/common.go valuesSorter.Less 100.00% (8/8)
+github.com/davecgh/go-spew/spew/format.go formatState.buildDefaultFormat 100.00% (7/7)
+github.com/davecgh/go-spew/spew/format.go formatState.unpackValue 100.00% (5/5)
+github.com/davecgh/go-spew/spew/dump.go dumpState.indent 100.00% (4/4)
+github.com/davecgh/go-spew/spew/common.go catchPanic 100.00% (4/4)
+github.com/davecgh/go-spew/spew/config.go ConfigState.convertArgs 100.00% (4/4)
+github.com/davecgh/go-spew/spew/spew.go convertArgs 100.00% (4/4)
+github.com/davecgh/go-spew/spew/format.go newFormatter 100.00% (3/3)
+github.com/davecgh/go-spew/spew/dump.go Sdump 100.00% (3/3)
+github.com/davecgh/go-spew/spew/common.go printBool 100.00% (3/3)
+github.com/davecgh/go-spew/spew/common.go sortValues 100.00% (3/3)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Sdump 100.00% (3/3)
+github.com/davecgh/go-spew/spew/dump.go dumpState.unpackValue 100.00% (3/3)
+github.com/davecgh/go-spew/spew/spew.go Printf 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go Println 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go Sprint 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go Sprintf 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go Sprintln 100.00% (1/1)
+github.com/davecgh/go-spew/spew/common.go printFloat 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go NewDefaultConfig 100.00% (1/1)
+github.com/davecgh/go-spew/spew/common.go printInt 100.00% (1/1)
+github.com/davecgh/go-spew/spew/common.go printUint 100.00% (1/1)
+github.com/davecgh/go-spew/spew/common.go valuesSorter.Len 100.00% (1/1)
+github.com/davecgh/go-spew/spew/common.go valuesSorter.Swap 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Errorf 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Fprint 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintf 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintln 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Print 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Printf 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Println 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Sprint 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintf 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintln 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.NewFormatter 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Fdump 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Dump 100.00% (1/1)
+github.com/davecgh/go-spew/spew/dump.go Fdump 100.00% (1/1)
+github.com/davecgh/go-spew/spew/dump.go Dump 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go Fprintln 100.00% (1/1)
+github.com/davecgh/go-spew/spew/format.go NewFormatter 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go Errorf 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go Fprint 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go Fprintf 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go Print 100.00% (1/1)
+github.com/davecgh/go-spew/spew ------------------------------- 100.00% (505/505)
+
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/.travis.yml b/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/.travis.yml
new file mode 100644
index 000000000..90c9c6f91
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/.travis.yml
@@ -0,0 +1,5 @@
+language: go
+go:
+ - 1.5
+ - tip
+
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/README.md b/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/README.md
new file mode 100644
index 000000000..e87f307ed
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/README.md
@@ -0,0 +1,50 @@
+go-difflib
+==========
+
+[![Build Status](https://travis-ci.org/pmezard/go-difflib.png?branch=master)](https://travis-ci.org/pmezard/go-difflib)
+[![GoDoc](https://godoc.org/github.com/pmezard/go-difflib/difflib?status.svg)](https://godoc.org/github.com/pmezard/go-difflib/difflib)
+
+Go-difflib is a partial port of python 3 difflib package. Its main goal
+was to make unified and context diff available in pure Go, mostly for
+testing purposes.
+
+The following class and functions (and related tests) have be ported:
+
+* `SequenceMatcher`
+* `unified_diff()`
+* `context_diff()`
+
+## Installation
+
+```bash
+$ go get github.com/pmezard/go-difflib/difflib
+```
+
+### Quick Start
+
+Diffs are configured with Unified (or ContextDiff) structures, and can
+be output to an io.Writer or returned as a string.
+
+```Go
+diff := UnifiedDiff{
+ A: difflib.SplitLines("foo\nbar\n"),
+ B: difflib.SplitLines("foo\nbaz\n"),
+ FromFile: "Original",
+ ToFile: "Current",
+ Context: 3,
+}
+text, _ := GetUnifiedDiffString(diff)
+fmt.Printf(text)
+```
+
+would output:
+
+```
+--- Original
++++ Current
+@@ -1,3 +1,3 @@
+ foo
+-bar
++baz
+```
+
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go
new file mode 100644
index 000000000..94670bea3
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go
@@ -0,0 +1,352 @@
+package difflib
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+func assertAlmostEqual(t *testing.T, a, b float64, places int) {
+ if math.Abs(a-b) > math.Pow10(-places) {
+ t.Errorf("%.7f != %.7f", a, b)
+ }
+}
+
+func assertEqual(t *testing.T, a, b interface{}) {
+ if !reflect.DeepEqual(a, b) {
+ t.Errorf("%v != %v", a, b)
+ }
+}
+
+func splitChars(s string) []string {
+ chars := make([]string, 0, len(s))
+ // Assume ASCII inputs
+ for i := 0; i != len(s); i++ {
+ chars = append(chars, string(s[i]))
+ }
+ return chars
+}
+
+func TestSequenceMatcherRatio(t *testing.T) {
+ s := NewMatcher(splitChars("abcd"), splitChars("bcde"))
+ assertEqual(t, s.Ratio(), 0.75)
+ assertEqual(t, s.QuickRatio(), 0.75)
+ assertEqual(t, s.RealQuickRatio(), 1.0)
+}
+
+func TestGetOptCodes(t *testing.T) {
+ a := "qabxcd"
+ b := "abycdf"
+ s := NewMatcher(splitChars(a), splitChars(b))
+ w := &bytes.Buffer{}
+ for _, op := range s.GetOpCodes() {
+ fmt.Fprintf(w, "%s a[%d:%d], (%s) b[%d:%d] (%s)\n", string(op.Tag),
+ op.I1, op.I2, a[op.I1:op.I2], op.J1, op.J2, b[op.J1:op.J2])
+ }
+ result := string(w.Bytes())
+ expected := `d a[0:1], (q) b[0:0] ()
+e a[1:3], (ab) b[0:2] (ab)
+r a[3:4], (x) b[2:3] (y)
+e a[4:6], (cd) b[3:5] (cd)
+i a[6:6], () b[5:6] (f)
+`
+ if expected != result {
+ t.Errorf("unexpected op codes: \n%s", result)
+ }
+}
+
+func TestGroupedOpCodes(t *testing.T) {
+ a := []string{}
+ for i := 0; i != 39; i++ {
+ a = append(a, fmt.Sprintf("%02d", i))
+ }
+ b := []string{}
+ b = append(b, a[:8]...)
+ b = append(b, " i")
+ b = append(b, a[8:19]...)
+ b = append(b, " x")
+ b = append(b, a[20:22]...)
+ b = append(b, a[27:34]...)
+ b = append(b, " y")
+ b = append(b, a[35:]...)
+ s := NewMatcher(a, b)
+ w := &bytes.Buffer{}
+ for _, g := range s.GetGroupedOpCodes(-1) {
+ fmt.Fprintf(w, "group\n")
+ for _, op := range g {
+ fmt.Fprintf(w, " %s, %d, %d, %d, %d\n", string(op.Tag),
+ op.I1, op.I2, op.J1, op.J2)
+ }
+ }
+ result := string(w.Bytes())
+ expected := `group
+ e, 5, 8, 5, 8
+ i, 8, 8, 8, 9
+ e, 8, 11, 9, 12
+group
+ e, 16, 19, 17, 20
+ r, 19, 20, 20, 21
+ e, 20, 22, 21, 23
+ d, 22, 27, 23, 23
+ e, 27, 30, 23, 26
+group
+ e, 31, 34, 27, 30
+ r, 34, 35, 30, 31
+ e, 35, 38, 31, 34
+`
+ if expected != result {
+ t.Errorf("unexpected op codes: \n%s", result)
+ }
+}
+
+func ExampleGetUnifiedDiffString() {
+ a := `one
+two
+three
+four`
+ b := `zero
+one
+three
+four`
+ diff := UnifiedDiff{
+ A: SplitLines(a),
+ B: SplitLines(b),
+ FromFile: "Original",
+ FromDate: "2005-01-26 23:30:50",
+ ToFile: "Current",
+ ToDate: "2010-04-02 10:20:52",
+ Context: 3,
+ }
+ result, _ := GetUnifiedDiffString(diff)
+ fmt.Printf(strings.Replace(result, "\t", " ", -1))
+ // Output:
+ // --- Original 2005-01-26 23:30:50
+ // +++ Current 2010-04-02 10:20:52
+ // @@ -1,4 +1,4 @@
+ // +zero
+ // one
+ // -two
+ // three
+ // four
+}
+
+func ExampleGetContextDiffString() {
+ a := `one
+two
+three
+four`
+ b := `zero
+one
+tree
+four`
+ diff := ContextDiff{
+ A: SplitLines(a),
+ B: SplitLines(b),
+ FromFile: "Original",
+ ToFile: "Current",
+ Context: 3,
+ Eol: "\n",
+ }
+ result, _ := GetContextDiffString(diff)
+ fmt.Printf(strings.Replace(result, "\t", " ", -1))
+ // Output:
+ // *** Original
+ // --- Current
+ // ***************
+ // *** 1,4 ****
+ // one
+ // ! two
+ // ! three
+ // four
+ // --- 1,4 ----
+ // + zero
+ // one
+ // ! tree
+ // four
+}
+
+func rep(s string, count int) string {
+ return strings.Repeat(s, count)
+}
+
+func TestWithAsciiOneInsert(t *testing.T) {
+ sm := NewMatcher(splitChars(rep("b", 100)),
+ splitChars("a"+rep("b", 100)))
+ assertAlmostEqual(t, sm.Ratio(), 0.995, 3)
+ assertEqual(t, sm.GetOpCodes(),
+ []OpCode{{'i', 0, 0, 0, 1}, {'e', 0, 100, 1, 101}})
+ assertEqual(t, len(sm.bPopular), 0)
+
+ sm = NewMatcher(splitChars(rep("b", 100)),
+ splitChars(rep("b", 50)+"a"+rep("b", 50)))
+ assertAlmostEqual(t, sm.Ratio(), 0.995, 3)
+ assertEqual(t, sm.GetOpCodes(),
+ []OpCode{{'e', 0, 50, 0, 50}, {'i', 50, 50, 50, 51}, {'e', 50, 100, 51, 101}})
+ assertEqual(t, len(sm.bPopular), 0)
+}
+
+func TestWithAsciiOnDelete(t *testing.T) {
+ sm := NewMatcher(splitChars(rep("a", 40)+"c"+rep("b", 40)),
+ splitChars(rep("a", 40)+rep("b", 40)))
+ assertAlmostEqual(t, sm.Ratio(), 0.994, 3)
+ assertEqual(t, sm.GetOpCodes(),
+ []OpCode{{'e', 0, 40, 0, 40}, {'d', 40, 41, 40, 40}, {'e', 41, 81, 40, 80}})
+}
+
+func TestWithAsciiBJunk(t *testing.T) {
+ isJunk := func(s string) bool {
+ return s == " "
+ }
+ sm := NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)),
+ splitChars(rep("a", 44)+rep("b", 40)), true, isJunk)
+ assertEqual(t, sm.bJunk, map[string]struct{}{})
+
+ sm = NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)),
+ splitChars(rep("a", 44)+rep("b", 40)+rep(" ", 20)), false, isJunk)
+ assertEqual(t, sm.bJunk, map[string]struct{}{" ": struct{}{}})
+
+ isJunk = func(s string) bool {
+ return s == " " || s == "b"
+ }
+ sm = NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)),
+ splitChars(rep("a", 44)+rep("b", 40)+rep(" ", 20)), false, isJunk)
+ assertEqual(t, sm.bJunk, map[string]struct{}{" ": struct{}{}, "b": struct{}{}})
+}
+
+func TestSFBugsRatioForNullSeqn(t *testing.T) {
+ sm := NewMatcher(nil, nil)
+ assertEqual(t, sm.Ratio(), 1.0)
+ assertEqual(t, sm.QuickRatio(), 1.0)
+ assertEqual(t, sm.RealQuickRatio(), 1.0)
+}
+
+func TestSFBugsComparingEmptyLists(t *testing.T) {
+ groups := NewMatcher(nil, nil).GetGroupedOpCodes(-1)
+ assertEqual(t, len(groups), 0)
+ diff := UnifiedDiff{
+ FromFile: "Original",
+ ToFile: "Current",
+ Context: 3,
+ }
+ result, err := GetUnifiedDiffString(diff)
+ assertEqual(t, err, nil)
+ assertEqual(t, result, "")
+}
+
+func TestOutputFormatRangeFormatUnified(t *testing.T) {
+ // Per the diff spec at http://www.unix.org/single_unix_specification/
+ //
+ // Each <range> field shall be of the form:
+ // %1d", <beginning line number> if the range contains exactly one line,
+ // and:
+ // "%1d,%1d", <beginning line number>, <number of lines> otherwise.
+ // If a range is empty, its beginning line number shall be the number of
+ // the line just before the range, or 0 if the empty range starts the file.
+ fm := formatRangeUnified
+ assertEqual(t, fm(3, 3), "3,0")
+ assertEqual(t, fm(3, 4), "4")
+ assertEqual(t, fm(3, 5), "4,2")
+ assertEqual(t, fm(3, 6), "4,3")
+ assertEqual(t, fm(0, 0), "0,0")
+}
+
+func TestOutputFormatRangeFormatContext(t *testing.T) {
+ // Per the diff spec at http://www.unix.org/single_unix_specification/
+ //
+ // The range of lines in file1 shall be written in the following format
+ // if the range contains two or more lines:
+ // "*** %d,%d ****\n", <beginning line number>, <ending line number>
+ // and the following format otherwise:
+ // "*** %d ****\n", <ending line number>
+ // The ending line number of an empty range shall be the number of the preceding line,
+ // or 0 if the range is at the start of the file.
+ //
+ // Next, the range of lines in file2 shall be written in the following format
+ // if the range contains two or more lines:
+ // "--- %d,%d ----\n", <beginning line number>, <ending line number>
+ // and the following format otherwise:
+ // "--- %d ----\n", <ending line number>
+ fm := formatRangeContext
+ assertEqual(t, fm(3, 3), "3")
+ assertEqual(t, fm(3, 4), "4")
+ assertEqual(t, fm(3, 5), "4,5")
+ assertEqual(t, fm(3, 6), "4,6")
+ assertEqual(t, fm(0, 0), "0")
+}
+
+func TestOutputFormatTabDelimiter(t *testing.T) {
+ diff := UnifiedDiff{
+ A: splitChars("one"),
+ B: splitChars("two"),
+ FromFile: "Original",
+ FromDate: "2005-01-26 23:30:50",
+ ToFile: "Current",
+ ToDate: "2010-04-12 10:20:52",
+ Eol: "\n",
+ }
+ ud, err := GetUnifiedDiffString(diff)
+ assertEqual(t, err, nil)
+ assertEqual(t, SplitLines(ud)[:2], []string{
+ "--- Original\t2005-01-26 23:30:50\n",
+ "+++ Current\t2010-04-12 10:20:52\n",
+ })
+ cd, err := GetContextDiffString(ContextDiff(diff))
+ assertEqual(t, err, nil)
+ assertEqual(t, SplitLines(cd)[:2], []string{
+ "*** Original\t2005-01-26 23:30:50\n",
+ "--- Current\t2010-04-12 10:20:52\n",
+ })
+}
+
+func TestOutputFormatNoTrailingTabOnEmptyFiledate(t *testing.T) {
+ diff := UnifiedDiff{
+ A: splitChars("one"),
+ B: splitChars("two"),
+ FromFile: "Original",
+ ToFile: "Current",
+ Eol: "\n",
+ }
+ ud, err := GetUnifiedDiffString(diff)
+ assertEqual(t, err, nil)
+ assertEqual(t, SplitLines(ud)[:2], []string{"--- Original\n", "+++ Current\n"})
+
+ cd, err := GetContextDiffString(ContextDiff(diff))
+ assertEqual(t, err, nil)
+ assertEqual(t, SplitLines(cd)[:2], []string{"*** Original\n", "--- Current\n"})
+}
+
+func TestSplitLines(t *testing.T) {
+ allTests := []struct {
+ input string
+ want []string
+ }{
+ {"foo", []string{"foo\n"}},
+ {"foo\nbar", []string{"foo\n", "bar\n"}},
+ {"foo\nbar\n", []string{"foo\n", "bar\n", "\n"}},
+ }
+ for _, test := range allTests {
+ assertEqual(t, SplitLines(test.input), test.want)
+ }
+}
+
+func benchmarkSplitLines(b *testing.B, count int) {
+ str := strings.Repeat("foo\n", count)
+
+ b.ResetTimer()
+
+ n := 0
+ for i := 0; i < b.N; i++ {
+ n += len(SplitLines(str))
+ }
+}
+
+func BenchmarkSplitLines100(b *testing.B) {
+ benchmarkSplitLines(b, 100)
+}
+
+func BenchmarkSplitLines10000(b *testing.B) {
+ benchmarkSplitLines(b, 10000)
+}
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/accessors_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/accessors_test.go
new file mode 100644
index 000000000..ce5d8e4aa
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/accessors_test.go
@@ -0,0 +1,145 @@
+package objx
+
+import (
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestAccessorsAccessGetSingleField(t *testing.T) {
+
+ current := map[string]interface{}{"name": "Tyler"}
+ assert.Equal(t, "Tyler", access(current, "name", nil, false, true))
+
+}
+func TestAccessorsAccessGetDeep(t *testing.T) {
+
+ current := map[string]interface{}{"name": map[string]interface{}{"first": "Tyler", "last": "Bunnell"}}
+ assert.Equal(t, "Tyler", access(current, "name.first", nil, false, true))
+ assert.Equal(t, "Bunnell", access(current, "name.last", nil, false, true))
+
+}
+func TestAccessorsAccessGetDeepDeep(t *testing.T) {
+
+ current := map[string]interface{}{"one": map[string]interface{}{"two": map[string]interface{}{"three": map[string]interface{}{"four": 4}}}}
+ assert.Equal(t, 4, access(current, "one.two.three.four", nil, false, true))
+
+}
+func TestAccessorsAccessGetInsideArray(t *testing.T) {
+
+ current := map[string]interface{}{"names": []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}}
+ assert.Equal(t, "Tyler", access(current, "names[0].first", nil, false, true))
+ assert.Equal(t, "Bunnell", access(current, "names[0].last", nil, false, true))
+ assert.Equal(t, "Capitol", access(current, "names[1].first", nil, false, true))
+ assert.Equal(t, "Bollocks", access(current, "names[1].last", nil, false, true))
+
+ assert.Panics(t, func() {
+ access(current, "names[2]", nil, false, true)
+ })
+ assert.Nil(t, access(current, "names[2]", nil, false, false))
+
+}
+
+func TestAccessorsAccessGetFromArrayWithInt(t *testing.T) {
+
+ current := []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}
+ one := access(current, 0, nil, false, false)
+ two := access(current, 1, nil, false, false)
+ three := access(current, 2, nil, false, false)
+
+ assert.Equal(t, "Tyler", one.(map[string]interface{})["first"])
+ assert.Equal(t, "Capitol", two.(map[string]interface{})["first"])
+ assert.Nil(t, three)
+
+}
+
+func TestAccessorsGet(t *testing.T) {
+
+ current := New(map[string]interface{}{"name": "Tyler"})
+ assert.Equal(t, "Tyler", current.Get("name").data)
+
+}
+
+func TestAccessorsAccessSetSingleField(t *testing.T) {
+
+ current := map[string]interface{}{"name": "Tyler"}
+ access(current, "name", "Mat", true, false)
+ assert.Equal(t, current["name"], "Mat")
+
+ access(current, "age", 29, true, true)
+ assert.Equal(t, current["age"], 29)
+
+}
+
+func TestAccessorsAccessSetSingleFieldNotExisting(t *testing.T) {
+
+ current := map[string]interface{}{}
+ access(current, "name", "Mat", true, false)
+ assert.Equal(t, current["name"], "Mat")
+
+}
+
+func TestAccessorsAccessSetDeep(t *testing.T) {
+
+ current := map[string]interface{}{"name": map[string]interface{}{"first": "Tyler", "last": "Bunnell"}}
+
+ access(current, "name.first", "Mat", true, true)
+ access(current, "name.last", "Ryer", true, true)
+
+ assert.Equal(t, "Mat", access(current, "name.first", nil, false, true))
+ assert.Equal(t, "Ryer", access(current, "name.last", nil, false, true))
+
+}
+func TestAccessorsAccessSetDeepDeep(t *testing.T) {
+
+ current := map[string]interface{}{"one": map[string]interface{}{"two": map[string]interface{}{"three": map[string]interface{}{"four": 4}}}}
+
+ access(current, "one.two.three.four", 5, true, true)
+
+ assert.Equal(t, 5, access(current, "one.two.three.four", nil, false, true))
+
+}
+func TestAccessorsAccessSetArray(t *testing.T) {
+
+ current := map[string]interface{}{"names": []interface{}{"Tyler"}}
+
+ access(current, "names[0]", "Mat", true, true)
+
+ assert.Equal(t, "Mat", access(current, "names[0]", nil, false, true))
+
+}
+func TestAccessorsAccessSetInsideArray(t *testing.T) {
+
+ current := map[string]interface{}{"names": []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}}
+
+ access(current, "names[0].first", "Mat", true, true)
+ access(current, "names[0].last", "Ryer", true, true)
+ access(current, "names[1].first", "Captain", true, true)
+ access(current, "names[1].last", "Underpants", true, true)
+
+ assert.Equal(t, "Mat", access(current, "names[0].first", nil, false, true))
+ assert.Equal(t, "Ryer", access(current, "names[0].last", nil, false, true))
+ assert.Equal(t, "Captain", access(current, "names[1].first", nil, false, true))
+ assert.Equal(t, "Underpants", access(current, "names[1].last", nil, false, true))
+
+}
+
+func TestAccessorsAccessSetFromArrayWithInt(t *testing.T) {
+
+ current := []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}
+ one := access(current, 0, nil, false, false)
+ two := access(current, 1, nil, false, false)
+ three := access(current, 2, nil, false, false)
+
+ assert.Equal(t, "Tyler", one.(map[string]interface{})["first"])
+ assert.Equal(t, "Capitol", two.(map[string]interface{})["first"])
+ assert.Nil(t, three)
+
+}
+
+func TestAccessorsSet(t *testing.T) {
+
+ current := New(map[string]interface{}{"name": "Tyler"})
+ current.Set("name", "Mat")
+ assert.Equal(t, "Mat", current.Get("name").data)
+
+}
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/conversions_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/conversions_test.go
new file mode 100644
index 000000000..e9ccd2987
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/conversions_test.go
@@ -0,0 +1,94 @@
+package objx
+
+import (
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestConversionJSON(t *testing.T) {
+
+ jsonString := `{"name":"Mat"}`
+ o := MustFromJSON(jsonString)
+
+ result, err := o.JSON()
+
+ if assert.NoError(t, err) {
+ assert.Equal(t, jsonString, result)
+ }
+
+ assert.Equal(t, jsonString, o.MustJSON())
+
+}
+
+func TestConversionJSONWithError(t *testing.T) {
+
+ o := MSI()
+ o["test"] = func() {}
+
+ assert.Panics(t, func() {
+ o.MustJSON()
+ })
+
+ _, err := o.JSON()
+
+ assert.Error(t, err)
+
+}
+
+func TestConversionBase64(t *testing.T) {
+
+ o := New(map[string]interface{}{"name": "Mat"})
+
+ result, err := o.Base64()
+
+ if assert.NoError(t, err) {
+ assert.Equal(t, "eyJuYW1lIjoiTWF0In0=", result)
+ }
+
+ assert.Equal(t, "eyJuYW1lIjoiTWF0In0=", o.MustBase64())
+
+}
+
+func TestConversionBase64WithError(t *testing.T) {
+
+ o := MSI()
+ o["test"] = func() {}
+
+ assert.Panics(t, func() {
+ o.MustBase64()
+ })
+
+ _, err := o.Base64()
+
+ assert.Error(t, err)
+
+}
+
+func TestConversionSignedBase64(t *testing.T) {
+
+ o := New(map[string]interface{}{"name": "Mat"})
+
+ result, err := o.SignedBase64("key")
+
+ if assert.NoError(t, err) {
+ assert.Equal(t, "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6", result)
+ }
+
+ assert.Equal(t, "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6", o.MustSignedBase64("key"))
+
+}
+
+func TestConversionSignedBase64WithError(t *testing.T) {
+
+ o := MSI()
+ o["test"] = func() {}
+
+ assert.Panics(t, func() {
+ o.MustSignedBase64("key")
+ })
+
+ _, err := o.SignedBase64("key")
+
+ assert.Error(t, err)
+
+}
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/fixture_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/fixture_test.go
new file mode 100644
index 000000000..27f7d9049
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/fixture_test.go
@@ -0,0 +1,98 @@
+package objx
+
+import (
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+var fixtures = []struct {
+ // name is the name of the fixture (used for reporting
+ // failures)
+ name string
+ // data is the JSON data to be worked on
+ data string
+ // get is the argument(s) to pass to Get
+ get interface{}
+ // output is the expected output
+ output interface{}
+}{
+ {
+ name: "Simple get",
+ data: `{"name": "Mat"}`,
+ get: "name",
+ output: "Mat",
+ },
+ {
+ name: "Get with dot notation",
+ data: `{"address": {"city": "Boulder"}}`,
+ get: "address.city",
+ output: "Boulder",
+ },
+ {
+ name: "Deep get with dot notation",
+ data: `{"one": {"two": {"three": {"four": "hello"}}}}`,
+ get: "one.two.three.four",
+ output: "hello",
+ },
+ {
+ name: "Get missing with dot notation",
+ data: `{"one": {"two": {"three": {"four": "hello"}}}}`,
+ get: "one.ten",
+ output: nil,
+ },
+ {
+ name: "Get with array notation",
+ data: `{"tags": ["one", "two", "three"]}`,
+ get: "tags[1]",
+ output: "two",
+ },
+ {
+ name: "Get with array and dot notation",
+ data: `{"types": { "tags": ["one", "two", "three"]}}`,
+ get: "types.tags[1]",
+ output: "two",
+ },
+ {
+ name: "Get with array and dot notation - field after array",
+ data: `{"tags": [{"name":"one"}, {"name":"two"}, {"name":"three"}]}`,
+ get: "tags[1].name",
+ output: "two",
+ },
+ {
+ name: "Complex get with array and dot notation",
+ data: `{"tags": [{"list": [{"one":"pizza"}]}]}`,
+ get: "tags[0].list[0].one",
+ output: "pizza",
+ },
+ {
+ name: "Get field from within string should be nil",
+ data: `{"name":"Tyler"}`,
+ get: "name.something",
+ output: nil,
+ },
+ {
+ name: "Get field from within string (using array accessor) should be nil",
+ data: `{"numbers":["one", "two", "three"]}`,
+ get: "numbers[0].nope",
+ output: nil,
+ },
+}
+
+func TestFixtures(t *testing.T) {
+
+ for _, fixture := range fixtures {
+
+ m := MustFromJSON(fixture.data)
+
+ // get the value
+ t.Logf("Running get fixture: \"%s\" (%v)", fixture.name, fixture)
+ value := m.Get(fixture.get.(string))
+
+ // make sure it matches
+ assert.Equal(t, fixture.output, value.data,
+ "Get fixture \"%s\" failed: %v", fixture.name, fixture,
+ )
+
+ }
+
+}
diff --git a/vendor/github.com/stretchr/objx/map_for_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map_for_test.go
index 6beb50675..6beb50675 100644
--- a/vendor/github.com/stretchr/objx/map_for_test.go
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map_for_test.go
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map_test.go
new file mode 100644
index 000000000..1f8b45c61
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map_test.go
@@ -0,0 +1,147 @@
+package objx
+
+import (
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+type Convertable struct {
+ name string
+}
+
+func (c *Convertable) MSI() map[string]interface{} {
+ return map[string]interface{}{"name": c.name}
+}
+
+type Unconvertable struct {
+ name string
+}
+
+func TestMapCreation(t *testing.T) {
+
+ o := New(nil)
+ assert.Nil(t, o)
+
+ o = New("Tyler")
+ assert.Nil(t, o)
+
+ unconvertable := &Unconvertable{name: "Tyler"}
+ o = New(unconvertable)
+ assert.Nil(t, o)
+
+ convertable := &Convertable{name: "Tyler"}
+ o = New(convertable)
+ if assert.NotNil(t, convertable) {
+ assert.Equal(t, "Tyler", o["name"], "Tyler")
+ }
+
+ o = MSI()
+ if assert.NotNil(t, o) {
+ assert.NotNil(t, o)
+ }
+
+ o = MSI("name", "Tyler")
+ if assert.NotNil(t, o) {
+ if assert.NotNil(t, o) {
+ assert.Equal(t, o["name"], "Tyler")
+ }
+ }
+
+}
+
+func TestMapMustFromJSONWithError(t *testing.T) {
+
+ _, err := FromJSON(`"name":"Mat"}`)
+ assert.Error(t, err)
+
+}
+
+func TestMapFromJSON(t *testing.T) {
+
+ o := MustFromJSON(`{"name":"Mat"}`)
+
+ if assert.NotNil(t, o) {
+ if assert.NotNil(t, o) {
+ assert.Equal(t, "Mat", o["name"])
+ }
+ }
+
+}
+
+func TestMapFromJSONWithError(t *testing.T) {
+
+ var m Map
+
+ assert.Panics(t, func() {
+ m = MustFromJSON(`"name":"Mat"}`)
+ })
+
+ assert.Nil(t, m)
+
+}
+
+func TestMapFromBase64String(t *testing.T) {
+
+ base64String := "eyJuYW1lIjoiTWF0In0="
+
+ o, err := FromBase64(base64String)
+
+ if assert.NoError(t, err) {
+ assert.Equal(t, o.Get("name").Str(), "Mat")
+ }
+
+ assert.Equal(t, MustFromBase64(base64String).Get("name").Str(), "Mat")
+
+}
+
+func TestMapFromBase64StringWithError(t *testing.T) {
+
+ base64String := "eyJuYW1lIjoiTWFasd0In0="
+
+ _, err := FromBase64(base64String)
+
+ assert.Error(t, err)
+
+ assert.Panics(t, func() {
+ MustFromBase64(base64String)
+ })
+
+}
+
+func TestMapFromSignedBase64String(t *testing.T) {
+
+ base64String := "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6"
+
+ o, err := FromSignedBase64(base64String, "key")
+
+ if assert.NoError(t, err) {
+ assert.Equal(t, o.Get("name").Str(), "Mat")
+ }
+
+ assert.Equal(t, MustFromSignedBase64(base64String, "key").Get("name").Str(), "Mat")
+
+}
+
+func TestMapFromSignedBase64StringWithError(t *testing.T) {
+
+ base64String := "eyJuYW1lasdIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6"
+
+ _, err := FromSignedBase64(base64String, "key")
+
+ assert.Error(t, err)
+
+ assert.Panics(t, func() {
+ MustFromSignedBase64(base64String, "key")
+ })
+
+}
+
+func TestMapFromURLQuery(t *testing.T) {
+
+ m, err := FromURLQuery("name=tyler&state=UT")
+ if assert.NoError(t, err) && assert.NotNil(t, m) {
+ assert.Equal(t, "tyler", m.Get("name").Str())
+ assert.Equal(t, "UT", m.Get("state").Str())
+ }
+
+}
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/mutations_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/mutations_test.go
new file mode 100644
index 000000000..e20ee23bc
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/mutations_test.go
@@ -0,0 +1,77 @@
+package objx
+
+import (
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestExclude(t *testing.T) {
+
+ d := make(Map)
+ d["name"] = "Mat"
+ d["age"] = 29
+ d["secret"] = "ABC"
+
+ excluded := d.Exclude([]string{"secret"})
+
+ assert.Equal(t, d["name"], excluded["name"])
+ assert.Equal(t, d["age"], excluded["age"])
+ assert.False(t, excluded.Has("secret"), "secret should be excluded")
+
+}
+
+func TestCopy(t *testing.T) {
+
+ d1 := make(map[string]interface{})
+ d1["name"] = "Tyler"
+ d1["location"] = "UT"
+
+ d1Obj := New(d1)
+ d2Obj := d1Obj.Copy()
+
+ d2Obj["name"] = "Mat"
+
+ assert.Equal(t, d1Obj.Get("name").Str(), "Tyler")
+ assert.Equal(t, d2Obj.Get("name").Str(), "Mat")
+
+}
+
+func TestMerge(t *testing.T) {
+
+ d := make(map[string]interface{})
+ d["name"] = "Mat"
+
+ d1 := make(map[string]interface{})
+ d1["name"] = "Tyler"
+ d1["location"] = "UT"
+
+ dObj := New(d)
+ d1Obj := New(d1)
+
+ merged := dObj.Merge(d1Obj)
+
+ assert.Equal(t, merged.Get("name").Str(), d1Obj.Get("name").Str())
+ assert.Equal(t, merged.Get("location").Str(), d1Obj.Get("location").Str())
+ assert.Empty(t, dObj.Get("location").Str())
+
+}
+
+func TestMergeHere(t *testing.T) {
+
+ d := make(map[string]interface{})
+ d["name"] = "Mat"
+
+ d1 := make(map[string]interface{})
+ d1["name"] = "Tyler"
+ d1["location"] = "UT"
+
+ dObj := New(d)
+ d1Obj := New(d1)
+
+ merged := dObj.MergeHere(d1Obj)
+
+ assert.Equal(t, dObj, merged, "With MergeHere, it should return the first modified map")
+ assert.Equal(t, merged.Get("name").Str(), d1Obj.Get("name").Str())
+ assert.Equal(t, merged.Get("location").Str(), d1Obj.Get("location").Str())
+ assert.Equal(t, merged.Get("location").Str(), dObj.Get("location").Str())
+}
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/security_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/security_test.go
new file mode 100644
index 000000000..8f0898f62
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/security_test.go
@@ -0,0 +1,12 @@
+package objx
+
+import (
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestHashWithKey(t *testing.T) {
+
+ assert.Equal(t, "0ce84d8d01f2c7b6e0882b784429c54d280ea2d9", HashWithKey("abc", "def"))
+
+}
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/simple_example_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/simple_example_test.go
new file mode 100644
index 000000000..5408c7fd3
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/simple_example_test.go
@@ -0,0 +1,41 @@
+package objx
+
+import (
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestSimpleExample(t *testing.T) {
+
+ // build a map from a JSON object
+ o := MustFromJSON(`{"name":"Mat","foods":["indian","chinese"], "location":{"county":"hobbiton","city":"the shire"}}`)
+
+ // Map can be used as a straight map[string]interface{}
+ assert.Equal(t, o["name"], "Mat")
+
+ // Get an Value object
+ v := o.Get("name")
+ assert.Equal(t, v, &Value{data: "Mat"})
+
+ // Test the contained value
+ assert.False(t, v.IsInt())
+ assert.False(t, v.IsBool())
+ assert.True(t, v.IsStr())
+
+ // Get the contained value
+ assert.Equal(t, v.Str(), "Mat")
+
+ // Get a default value if the contained value is not of the expected type or does not exist
+ assert.Equal(t, 1, v.Int(1))
+
+ // Get a value by using array notation
+ assert.Equal(t, "indian", o.Get("foods[0]").Data())
+
+ // Set a value by using array notation
+ o.Set("foods[0]", "italian")
+ assert.Equal(t, "italian", o.Get("foods[0]").Str())
+
+ // Get a value by using dot notation
+ assert.Equal(t, "hobbiton", o.Get("location.county").Str())
+
+}
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/tests_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/tests_test.go
new file mode 100644
index 000000000..bcc1eb03d
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/tests_test.go
@@ -0,0 +1,24 @@
+package objx
+
+import (
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestHas(t *testing.T) {
+
+ m := New(TestMap)
+
+ assert.True(t, m.Has("name"))
+ assert.True(t, m.Has("address.state"))
+ assert.True(t, m.Has("numbers[4]"))
+
+ assert.False(t, m.Has("address.state.nope"))
+ assert.False(t, m.Has("address.nope"))
+ assert.False(t, m.Has("nope"))
+ assert.False(t, m.Has("numbers[5]"))
+
+ m = nil
+ assert.False(t, m.Has("nothing"))
+
+}
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/type_specific_codegen_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/type_specific_codegen_test.go
new file mode 100644
index 000000000..f7a4fceea
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/type_specific_codegen_test.go
@@ -0,0 +1,2867 @@
+package objx
+
+import (
+ "fmt"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+// ************************************************************
+// TESTS
+// ************************************************************
+
+func TestInter(t *testing.T) {
+
+ val := interface{}("something")
+ m := map[string]interface{}{"value": val, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Inter())
+ assert.Equal(t, val, New(m).Get("value").MustInter())
+ assert.Equal(t, interface{}(nil), New(m).Get("nothing").Inter())
+ assert.Equal(t, val, New(m).Get("nothing").Inter("something"))
+
+ assert.Panics(t, func() {
+ New(m).Get("age").MustInter()
+ })
+
+}
+
+func TestInterSlice(t *testing.T) {
+
+ val := interface{}("something")
+ m := map[string]interface{}{"value": []interface{}{val}, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").InterSlice()[0])
+ assert.Equal(t, val, New(m).Get("value").MustInterSlice()[0])
+ assert.Equal(t, []interface{}(nil), New(m).Get("nothing").InterSlice())
+ assert.Equal(t, val, New(m).Get("nothing").InterSlice([]interface{}{interface{}("something")})[0])
+
+ assert.Panics(t, func() {
+ New(m).Get("nothing").MustInterSlice()
+ })
+
+}
+
+func TestIsInter(t *testing.T) {
+
+ var v *Value
+
+ v = &Value{data: interface{}("something")}
+ assert.True(t, v.IsInter())
+
+ v = &Value{data: []interface{}{interface{}("something")}}
+ assert.True(t, v.IsInterSlice())
+
+}
+
+func TestEachInter(t *testing.T) {
+
+ v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}}
+ count := 0
+ replacedVals := make([]interface{}, 0)
+ assert.Equal(t, v, v.EachInter(func(i int, val interface{}) bool {
+
+ count++
+ replacedVals = append(replacedVals, val)
+
+ // abort early
+ if i == 2 {
+ return false
+ }
+
+ return true
+
+ }))
+
+ assert.Equal(t, count, 3)
+ assert.Equal(t, replacedVals[0], v.MustInterSlice()[0])
+ assert.Equal(t, replacedVals[1], v.MustInterSlice()[1])
+ assert.Equal(t, replacedVals[2], v.MustInterSlice()[2])
+
+}
+
+func TestWhereInter(t *testing.T) {
+
+ v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}}
+
+ selected := v.WhereInter(func(i int, val interface{}) bool {
+ return i%2 == 0
+ }).MustInterSlice()
+
+ assert.Equal(t, 3, len(selected))
+
+}
+
+func TestGroupInter(t *testing.T) {
+
+ v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}}
+
+ grouped := v.GroupInter(func(i int, val interface{}) string {
+ return fmt.Sprintf("%v", i%2 == 0)
+ }).data.(map[string][]interface{})
+
+ assert.Equal(t, 2, len(grouped))
+ assert.Equal(t, 3, len(grouped["true"]))
+ assert.Equal(t, 3, len(grouped["false"]))
+
+}
+
+func TestReplaceInter(t *testing.T) {
+
+ v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}}
+
+ rawArr := v.MustInterSlice()
+
+ replaced := v.ReplaceInter(func(index int, val interface{}) interface{} {
+ if index < len(rawArr)-1 {
+ return rawArr[index+1]
+ }
+ return rawArr[0]
+ })
+
+ replacedArr := replaced.MustInterSlice()
+ if assert.Equal(t, 6, len(replacedArr)) {
+ assert.Equal(t, replacedArr[0], rawArr[1])
+ assert.Equal(t, replacedArr[1], rawArr[2])
+ assert.Equal(t, replacedArr[2], rawArr[3])
+ assert.Equal(t, replacedArr[3], rawArr[4])
+ assert.Equal(t, replacedArr[4], rawArr[5])
+ assert.Equal(t, replacedArr[5], rawArr[0])
+ }
+
+}
+
+func TestCollectInter(t *testing.T) {
+
+ v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}}
+
+ collected := v.CollectInter(func(index int, val interface{}) interface{} {
+ return index
+ })
+
+ collectedArr := collected.MustInterSlice()
+ if assert.Equal(t, 6, len(collectedArr)) {
+ assert.Equal(t, collectedArr[0], 0)
+ assert.Equal(t, collectedArr[1], 1)
+ assert.Equal(t, collectedArr[2], 2)
+ assert.Equal(t, collectedArr[3], 3)
+ assert.Equal(t, collectedArr[4], 4)
+ assert.Equal(t, collectedArr[5], 5)
+ }
+
+}
+
+// ************************************************************
+// TESTS
+// ************************************************************
+
+func TestMSI(t *testing.T) {
+
+ val := map[string]interface{}(map[string]interface{}{"name": "Tyler"})
+ m := map[string]interface{}{"value": val, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").MSI())
+ assert.Equal(t, val, New(m).Get("value").MustMSI())
+ assert.Equal(t, map[string]interface{}(nil), New(m).Get("nothing").MSI())
+ assert.Equal(t, val, New(m).Get("nothing").MSI(map[string]interface{}{"name": "Tyler"}))
+
+ assert.Panics(t, func() {
+ New(m).Get("age").MustMSI()
+ })
+
+}
+
+func TestMSISlice(t *testing.T) {
+
+ val := map[string]interface{}(map[string]interface{}{"name": "Tyler"})
+ m := map[string]interface{}{"value": []map[string]interface{}{val}, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").MSISlice()[0])
+ assert.Equal(t, val, New(m).Get("value").MustMSISlice()[0])
+ assert.Equal(t, []map[string]interface{}(nil), New(m).Get("nothing").MSISlice())
+ assert.Equal(t, val, New(m).Get("nothing").MSISlice([]map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"})})[0])
+
+ assert.Panics(t, func() {
+ New(m).Get("nothing").MustMSISlice()
+ })
+
+}
+
+func TestIsMSI(t *testing.T) {
+
+ var v *Value
+
+ v = &Value{data: map[string]interface{}(map[string]interface{}{"name": "Tyler"})}
+ assert.True(t, v.IsMSI())
+
+ v = &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"})}}
+ assert.True(t, v.IsMSISlice())
+
+}
+
+func TestEachMSI(t *testing.T) {
+
+ v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}}
+ count := 0
+ replacedVals := make([]map[string]interface{}, 0)
+ assert.Equal(t, v, v.EachMSI(func(i int, val map[string]interface{}) bool {
+
+ count++
+ replacedVals = append(replacedVals, val)
+
+ // abort early
+ if i == 2 {
+ return false
+ }
+
+ return true
+
+ }))
+
+ assert.Equal(t, count, 3)
+ assert.Equal(t, replacedVals[0], v.MustMSISlice()[0])
+ assert.Equal(t, replacedVals[1], v.MustMSISlice()[1])
+ assert.Equal(t, replacedVals[2], v.MustMSISlice()[2])
+
+}
+
+func TestWhereMSI(t *testing.T) {
+
+ v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}}
+
+ selected := v.WhereMSI(func(i int, val map[string]interface{}) bool {
+ return i%2 == 0
+ }).MustMSISlice()
+
+ assert.Equal(t, 3, len(selected))
+
+}
+
+func TestGroupMSI(t *testing.T) {
+
+ v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}}
+
+ grouped := v.GroupMSI(func(i int, val map[string]interface{}) string {
+ return fmt.Sprintf("%v", i%2 == 0)
+ }).data.(map[string][]map[string]interface{})
+
+ assert.Equal(t, 2, len(grouped))
+ assert.Equal(t, 3, len(grouped["true"]))
+ assert.Equal(t, 3, len(grouped["false"]))
+
+}
+
+func TestReplaceMSI(t *testing.T) {
+
+ v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}}
+
+ rawArr := v.MustMSISlice()
+
+ replaced := v.ReplaceMSI(func(index int, val map[string]interface{}) map[string]interface{} {
+ if index < len(rawArr)-1 {
+ return rawArr[index+1]
+ }
+ return rawArr[0]
+ })
+
+ replacedArr := replaced.MustMSISlice()
+ if assert.Equal(t, 6, len(replacedArr)) {
+ assert.Equal(t, replacedArr[0], rawArr[1])
+ assert.Equal(t, replacedArr[1], rawArr[2])
+ assert.Equal(t, replacedArr[2], rawArr[3])
+ assert.Equal(t, replacedArr[3], rawArr[4])
+ assert.Equal(t, replacedArr[4], rawArr[5])
+ assert.Equal(t, replacedArr[5], rawArr[0])
+ }
+
+}
+
+func TestCollectMSI(t *testing.T) {
+
+ v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}}
+
+ collected := v.CollectMSI(func(index int, val map[string]interface{}) interface{} {
+ return index
+ })
+
+ collectedArr := collected.MustInterSlice()
+ if assert.Equal(t, 6, len(collectedArr)) {
+ assert.Equal(t, collectedArr[0], 0)
+ assert.Equal(t, collectedArr[1], 1)
+ assert.Equal(t, collectedArr[2], 2)
+ assert.Equal(t, collectedArr[3], 3)
+ assert.Equal(t, collectedArr[4], 4)
+ assert.Equal(t, collectedArr[5], 5)
+ }
+
+}
+
+// ************************************************************
+// TESTS
+// ************************************************************
+
+func TestObjxMap(t *testing.T) {
+
+ val := (Map)(New(1))
+ m := map[string]interface{}{"value": val, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").ObjxMap())
+ assert.Equal(t, val, New(m).Get("value").MustObjxMap())
+ assert.Equal(t, (Map)(New(nil)), New(m).Get("nothing").ObjxMap())
+ assert.Equal(t, val, New(m).Get("nothing").ObjxMap(New(1)))
+
+ assert.Panics(t, func() {
+ New(m).Get("age").MustObjxMap()
+ })
+
+}
+
+func TestObjxMapSlice(t *testing.T) {
+
+ val := (Map)(New(1))
+ m := map[string]interface{}{"value": [](Map){val}, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").ObjxMapSlice()[0])
+ assert.Equal(t, val, New(m).Get("value").MustObjxMapSlice()[0])
+ assert.Equal(t, [](Map)(nil), New(m).Get("nothing").ObjxMapSlice())
+ assert.Equal(t, val, New(m).Get("nothing").ObjxMapSlice([](Map){(Map)(New(1))})[0])
+
+ assert.Panics(t, func() {
+ New(m).Get("nothing").MustObjxMapSlice()
+ })
+
+}
+
+func TestIsObjxMap(t *testing.T) {
+
+ var v *Value
+
+ v = &Value{data: (Map)(New(1))}
+ assert.True(t, v.IsObjxMap())
+
+ v = &Value{data: [](Map){(Map)(New(1))}}
+ assert.True(t, v.IsObjxMapSlice())
+
+}
+
+func TestEachObjxMap(t *testing.T) {
+
+ v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}}
+ count := 0
+ replacedVals := make([](Map), 0)
+ assert.Equal(t, v, v.EachObjxMap(func(i int, val Map) bool {
+
+ count++
+ replacedVals = append(replacedVals, val)
+
+ // abort early
+ if i == 2 {
+ return false
+ }
+
+ return true
+
+ }))
+
+ assert.Equal(t, count, 3)
+ assert.Equal(t, replacedVals[0], v.MustObjxMapSlice()[0])
+ assert.Equal(t, replacedVals[1], v.MustObjxMapSlice()[1])
+ assert.Equal(t, replacedVals[2], v.MustObjxMapSlice()[2])
+
+}
+
+func TestWhereObjxMap(t *testing.T) {
+
+ v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}}
+
+ selected := v.WhereObjxMap(func(i int, val Map) bool {
+ return i%2 == 0
+ }).MustObjxMapSlice()
+
+ assert.Equal(t, 3, len(selected))
+
+}
+
+func TestGroupObjxMap(t *testing.T) {
+
+ v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}}
+
+ grouped := v.GroupObjxMap(func(i int, val Map) string {
+ return fmt.Sprintf("%v", i%2 == 0)
+ }).data.(map[string][](Map))
+
+ assert.Equal(t, 2, len(grouped))
+ assert.Equal(t, 3, len(grouped["true"]))
+ assert.Equal(t, 3, len(grouped["false"]))
+
+}
+
+func TestReplaceObjxMap(t *testing.T) {
+
+ v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}}
+
+ rawArr := v.MustObjxMapSlice()
+
+ replaced := v.ReplaceObjxMap(func(index int, val Map) Map {
+ if index < len(rawArr)-1 {
+ return rawArr[index+1]
+ }
+ return rawArr[0]
+ })
+
+ replacedArr := replaced.MustObjxMapSlice()
+ if assert.Equal(t, 6, len(replacedArr)) {
+ assert.Equal(t, replacedArr[0], rawArr[1])
+ assert.Equal(t, replacedArr[1], rawArr[2])
+ assert.Equal(t, replacedArr[2], rawArr[3])
+ assert.Equal(t, replacedArr[3], rawArr[4])
+ assert.Equal(t, replacedArr[4], rawArr[5])
+ assert.Equal(t, replacedArr[5], rawArr[0])
+ }
+
+}
+
+func TestCollectObjxMap(t *testing.T) {
+
+ v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}}
+
+ collected := v.CollectObjxMap(func(index int, val Map) interface{} {
+ return index
+ })
+
+ collectedArr := collected.MustInterSlice()
+ if assert.Equal(t, 6, len(collectedArr)) {
+ assert.Equal(t, collectedArr[0], 0)
+ assert.Equal(t, collectedArr[1], 1)
+ assert.Equal(t, collectedArr[2], 2)
+ assert.Equal(t, collectedArr[3], 3)
+ assert.Equal(t, collectedArr[4], 4)
+ assert.Equal(t, collectedArr[5], 5)
+ }
+
+}
+
+// ************************************************************
+// TESTS
+// ************************************************************
+
+func TestBool(t *testing.T) {
+
+ val := bool(true)
+ m := map[string]interface{}{"value": val, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Bool())
+ assert.Equal(t, val, New(m).Get("value").MustBool())
+ assert.Equal(t, bool(false), New(m).Get("nothing").Bool())
+ assert.Equal(t, val, New(m).Get("nothing").Bool(true))
+
+ assert.Panics(t, func() {
+ New(m).Get("age").MustBool()
+ })
+
+}
+
+func TestBoolSlice(t *testing.T) {
+
+ val := bool(true)
+ m := map[string]interface{}{"value": []bool{val}, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").BoolSlice()[0])
+ assert.Equal(t, val, New(m).Get("value").MustBoolSlice()[0])
+ assert.Equal(t, []bool(nil), New(m).Get("nothing").BoolSlice())
+ assert.Equal(t, val, New(m).Get("nothing").BoolSlice([]bool{bool(true)})[0])
+
+ assert.Panics(t, func() {
+ New(m).Get("nothing").MustBoolSlice()
+ })
+
+}
+
+func TestIsBool(t *testing.T) {
+
+ var v *Value
+
+ v = &Value{data: bool(true)}
+ assert.True(t, v.IsBool())
+
+ v = &Value{data: []bool{bool(true)}}
+ assert.True(t, v.IsBoolSlice())
+
+}
+
+func TestEachBool(t *testing.T) {
+
+ v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true)}}
+ count := 0
+ replacedVals := make([]bool, 0)
+ assert.Equal(t, v, v.EachBool(func(i int, val bool) bool {
+
+ count++
+ replacedVals = append(replacedVals, val)
+
+ // abort early
+ if i == 2 {
+ return false
+ }
+
+ return true
+
+ }))
+
+ assert.Equal(t, count, 3)
+ assert.Equal(t, replacedVals[0], v.MustBoolSlice()[0])
+ assert.Equal(t, replacedVals[1], v.MustBoolSlice()[1])
+ assert.Equal(t, replacedVals[2], v.MustBoolSlice()[2])
+
+}
+
+func TestWhereBool(t *testing.T) {
+
+ v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}}
+
+ selected := v.WhereBool(func(i int, val bool) bool {
+ return i%2 == 0
+ }).MustBoolSlice()
+
+ assert.Equal(t, 3, len(selected))
+
+}
+
+func TestGroupBool(t *testing.T) {
+
+ v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}}
+
+ grouped := v.GroupBool(func(i int, val bool) string {
+ return fmt.Sprintf("%v", i%2 == 0)
+ }).data.(map[string][]bool)
+
+ assert.Equal(t, 2, len(grouped))
+ assert.Equal(t, 3, len(grouped["true"]))
+ assert.Equal(t, 3, len(grouped["false"]))
+
+}
+
+func TestReplaceBool(t *testing.T) {
+
+ v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}}
+
+ rawArr := v.MustBoolSlice()
+
+ replaced := v.ReplaceBool(func(index int, val bool) bool {
+ if index < len(rawArr)-1 {
+ return rawArr[index+1]
+ }
+ return rawArr[0]
+ })
+
+ replacedArr := replaced.MustBoolSlice()
+ if assert.Equal(t, 6, len(replacedArr)) {
+ assert.Equal(t, replacedArr[0], rawArr[1])
+ assert.Equal(t, replacedArr[1], rawArr[2])
+ assert.Equal(t, replacedArr[2], rawArr[3])
+ assert.Equal(t, replacedArr[3], rawArr[4])
+ assert.Equal(t, replacedArr[4], rawArr[5])
+ assert.Equal(t, replacedArr[5], rawArr[0])
+ }
+
+}
+
+func TestCollectBool(t *testing.T) {
+
+ v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}}
+
+ collected := v.CollectBool(func(index int, val bool) interface{} {
+ return index
+ })
+
+ collectedArr := collected.MustInterSlice()
+ if assert.Equal(t, 6, len(collectedArr)) {
+ assert.Equal(t, collectedArr[0], 0)
+ assert.Equal(t, collectedArr[1], 1)
+ assert.Equal(t, collectedArr[2], 2)
+ assert.Equal(t, collectedArr[3], 3)
+ assert.Equal(t, collectedArr[4], 4)
+ assert.Equal(t, collectedArr[5], 5)
+ }
+
+}
+
+// ************************************************************
+// TESTS
+// ************************************************************
+
+func TestStr(t *testing.T) {
+
+ val := string("hello")
+ m := map[string]interface{}{"value": val, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Str())
+ assert.Equal(t, val, New(m).Get("value").MustStr())
+ assert.Equal(t, string(""), New(m).Get("nothing").Str())
+ assert.Equal(t, val, New(m).Get("nothing").Str("hello"))
+
+ assert.Panics(t, func() {
+ New(m).Get("age").MustStr()
+ })
+
+}
+
+func TestStrSlice(t *testing.T) {
+
+ val := string("hello")
+ m := map[string]interface{}{"value": []string{val}, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").StrSlice()[0])
+ assert.Equal(t, val, New(m).Get("value").MustStrSlice()[0])
+ assert.Equal(t, []string(nil), New(m).Get("nothing").StrSlice())
+ assert.Equal(t, val, New(m).Get("nothing").StrSlice([]string{string("hello")})[0])
+
+ assert.Panics(t, func() {
+ New(m).Get("nothing").MustStrSlice()
+ })
+
+}
+
+func TestIsStr(t *testing.T) {
+
+ var v *Value
+
+ v = &Value{data: string("hello")}
+ assert.True(t, v.IsStr())
+
+ v = &Value{data: []string{string("hello")}}
+ assert.True(t, v.IsStrSlice())
+
+}
+
+func TestEachStr(t *testing.T) {
+
+ v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}}
+ count := 0
+ replacedVals := make([]string, 0)
+ assert.Equal(t, v, v.EachStr(func(i int, val string) bool {
+
+ count++
+ replacedVals = append(replacedVals, val)
+
+ // abort early
+ if i == 2 {
+ return false
+ }
+
+ return true
+
+ }))
+
+ assert.Equal(t, count, 3)
+ assert.Equal(t, replacedVals[0], v.MustStrSlice()[0])
+ assert.Equal(t, replacedVals[1], v.MustStrSlice()[1])
+ assert.Equal(t, replacedVals[2], v.MustStrSlice()[2])
+
+}
+
+func TestWhereStr(t *testing.T) {
+
+ v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}}
+
+ selected := v.WhereStr(func(i int, val string) bool {
+ return i%2 == 0
+ }).MustStrSlice()
+
+ assert.Equal(t, 3, len(selected))
+
+}
+
+func TestGroupStr(t *testing.T) {
+
+ v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}}
+
+ grouped := v.GroupStr(func(i int, val string) string {
+ return fmt.Sprintf("%v", i%2 == 0)
+ }).data.(map[string][]string)
+
+ assert.Equal(t, 2, len(grouped))
+ assert.Equal(t, 3, len(grouped["true"]))
+ assert.Equal(t, 3, len(grouped["false"]))
+
+}
+
+func TestReplaceStr(t *testing.T) {
+
+ v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}}
+
+ rawArr := v.MustStrSlice()
+
+ replaced := v.ReplaceStr(func(index int, val string) string {
+ if index < len(rawArr)-1 {
+ return rawArr[index+1]
+ }
+ return rawArr[0]
+ })
+
+ replacedArr := replaced.MustStrSlice()
+ if assert.Equal(t, 6, len(replacedArr)) {
+ assert.Equal(t, replacedArr[0], rawArr[1])
+ assert.Equal(t, replacedArr[1], rawArr[2])
+ assert.Equal(t, replacedArr[2], rawArr[3])
+ assert.Equal(t, replacedArr[3], rawArr[4])
+ assert.Equal(t, replacedArr[4], rawArr[5])
+ assert.Equal(t, replacedArr[5], rawArr[0])
+ }
+
+}
+
+func TestCollectStr(t *testing.T) {
+
+ v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}}
+
+ collected := v.CollectStr(func(index int, val string) interface{} {
+ return index
+ })
+
+ collectedArr := collected.MustInterSlice()
+ if assert.Equal(t, 6, len(collectedArr)) {
+ assert.Equal(t, collectedArr[0], 0)
+ assert.Equal(t, collectedArr[1], 1)
+ assert.Equal(t, collectedArr[2], 2)
+ assert.Equal(t, collectedArr[3], 3)
+ assert.Equal(t, collectedArr[4], 4)
+ assert.Equal(t, collectedArr[5], 5)
+ }
+
+}
+
+// ************************************************************
+// TESTS
+// ************************************************************
+
+func TestInt(t *testing.T) {
+
+ val := int(1)
+ m := map[string]interface{}{"value": val, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Int())
+ assert.Equal(t, val, New(m).Get("value").MustInt())
+ assert.Equal(t, int(0), New(m).Get("nothing").Int())
+ assert.Equal(t, val, New(m).Get("nothing").Int(1))
+
+ assert.Panics(t, func() {
+ New(m).Get("age").MustInt()
+ })
+
+}
+
+func TestIntSlice(t *testing.T) {
+
+ val := int(1)
+ m := map[string]interface{}{"value": []int{val}, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").IntSlice()[0])
+ assert.Equal(t, val, New(m).Get("value").MustIntSlice()[0])
+ assert.Equal(t, []int(nil), New(m).Get("nothing").IntSlice())
+ assert.Equal(t, val, New(m).Get("nothing").IntSlice([]int{int(1)})[0])
+
+ assert.Panics(t, func() {
+ New(m).Get("nothing").MustIntSlice()
+ })
+
+}
+
+func TestIsInt(t *testing.T) {
+
+ var v *Value
+
+ v = &Value{data: int(1)}
+ assert.True(t, v.IsInt())
+
+ v = &Value{data: []int{int(1)}}
+ assert.True(t, v.IsIntSlice())
+
+}
+
+func TestEachInt(t *testing.T) {
+
+ v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1)}}
+ count := 0
+ replacedVals := make([]int, 0)
+ assert.Equal(t, v, v.EachInt(func(i int, val int) bool {
+
+ count++
+ replacedVals = append(replacedVals, val)
+
+ // abort early
+ if i == 2 {
+ return false
+ }
+
+ return true
+
+ }))
+
+ assert.Equal(t, count, 3)
+ assert.Equal(t, replacedVals[0], v.MustIntSlice()[0])
+ assert.Equal(t, replacedVals[1], v.MustIntSlice()[1])
+ assert.Equal(t, replacedVals[2], v.MustIntSlice()[2])
+
+}
+
+func TestWhereInt(t *testing.T) {
+
+ v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}}
+
+ selected := v.WhereInt(func(i int, val int) bool {
+ return i%2 == 0
+ }).MustIntSlice()
+
+ assert.Equal(t, 3, len(selected))
+
+}
+
+func TestGroupInt(t *testing.T) {
+
+ v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}}
+
+ grouped := v.GroupInt(func(i int, val int) string {
+ return fmt.Sprintf("%v", i%2 == 0)
+ }).data.(map[string][]int)
+
+ assert.Equal(t, 2, len(grouped))
+ assert.Equal(t, 3, len(grouped["true"]))
+ assert.Equal(t, 3, len(grouped["false"]))
+
+}
+
+func TestReplaceInt(t *testing.T) {
+
+ v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}}
+
+ rawArr := v.MustIntSlice()
+
+ replaced := v.ReplaceInt(func(index int, val int) int {
+ if index < len(rawArr)-1 {
+ return rawArr[index+1]
+ }
+ return rawArr[0]
+ })
+
+ replacedArr := replaced.MustIntSlice()
+ if assert.Equal(t, 6, len(replacedArr)) {
+ assert.Equal(t, replacedArr[0], rawArr[1])
+ assert.Equal(t, replacedArr[1], rawArr[2])
+ assert.Equal(t, replacedArr[2], rawArr[3])
+ assert.Equal(t, replacedArr[3], rawArr[4])
+ assert.Equal(t, replacedArr[4], rawArr[5])
+ assert.Equal(t, replacedArr[5], rawArr[0])
+ }
+
+}
+
+func TestCollectInt(t *testing.T) {
+
+ v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}}
+
+ collected := v.CollectInt(func(index int, val int) interface{} {
+ return index
+ })
+
+ collectedArr := collected.MustInterSlice()
+ if assert.Equal(t, 6, len(collectedArr)) {
+ assert.Equal(t, collectedArr[0], 0)
+ assert.Equal(t, collectedArr[1], 1)
+ assert.Equal(t, collectedArr[2], 2)
+ assert.Equal(t, collectedArr[3], 3)
+ assert.Equal(t, collectedArr[4], 4)
+ assert.Equal(t, collectedArr[5], 5)
+ }
+
+}
+
+// ************************************************************
+// TESTS
+// ************************************************************
+
+func TestInt8(t *testing.T) {
+
+ val := int8(1)
+ m := map[string]interface{}{"value": val, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Int8())
+ assert.Equal(t, val, New(m).Get("value").MustInt8())
+ assert.Equal(t, int8(0), New(m).Get("nothing").Int8())
+ assert.Equal(t, val, New(m).Get("nothing").Int8(1))
+
+ assert.Panics(t, func() {
+ New(m).Get("age").MustInt8()
+ })
+
+}
+
+func TestInt8Slice(t *testing.T) {
+
+ val := int8(1)
+ m := map[string]interface{}{"value": []int8{val}, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Int8Slice()[0])
+ assert.Equal(t, val, New(m).Get("value").MustInt8Slice()[0])
+ assert.Equal(t, []int8(nil), New(m).Get("nothing").Int8Slice())
+ assert.Equal(t, val, New(m).Get("nothing").Int8Slice([]int8{int8(1)})[0])
+
+ assert.Panics(t, func() {
+ New(m).Get("nothing").MustInt8Slice()
+ })
+
+}
+
+func TestIsInt8(t *testing.T) {
+
+ var v *Value
+
+ v = &Value{data: int8(1)}
+ assert.True(t, v.IsInt8())
+
+ v = &Value{data: []int8{int8(1)}}
+ assert.True(t, v.IsInt8Slice())
+
+}
+
+func TestEachInt8(t *testing.T) {
+
+ v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1)}}
+ count := 0
+ replacedVals := make([]int8, 0)
+ assert.Equal(t, v, v.EachInt8(func(i int, val int8) bool {
+
+ count++
+ replacedVals = append(replacedVals, val)
+
+ // abort early
+ if i == 2 {
+ return false
+ }
+
+ return true
+
+ }))
+
+ assert.Equal(t, count, 3)
+ assert.Equal(t, replacedVals[0], v.MustInt8Slice()[0])
+ assert.Equal(t, replacedVals[1], v.MustInt8Slice()[1])
+ assert.Equal(t, replacedVals[2], v.MustInt8Slice()[2])
+
+}
+
+func TestWhereInt8(t *testing.T) {
+
+ v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}}
+
+ selected := v.WhereInt8(func(i int, val int8) bool {
+ return i%2 == 0
+ }).MustInt8Slice()
+
+ assert.Equal(t, 3, len(selected))
+
+}
+
+func TestGroupInt8(t *testing.T) {
+
+ v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}}
+
+ grouped := v.GroupInt8(func(i int, val int8) string {
+ return fmt.Sprintf("%v", i%2 == 0)
+ }).data.(map[string][]int8)
+
+ assert.Equal(t, 2, len(grouped))
+ assert.Equal(t, 3, len(grouped["true"]))
+ assert.Equal(t, 3, len(grouped["false"]))
+
+}
+
+func TestReplaceInt8(t *testing.T) {
+
+ v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}}
+
+ rawArr := v.MustInt8Slice()
+
+ replaced := v.ReplaceInt8(func(index int, val int8) int8 {
+ if index < len(rawArr)-1 {
+ return rawArr[index+1]
+ }
+ return rawArr[0]
+ })
+
+ replacedArr := replaced.MustInt8Slice()
+ if assert.Equal(t, 6, len(replacedArr)) {
+ assert.Equal(t, replacedArr[0], rawArr[1])
+ assert.Equal(t, replacedArr[1], rawArr[2])
+ assert.Equal(t, replacedArr[2], rawArr[3])
+ assert.Equal(t, replacedArr[3], rawArr[4])
+ assert.Equal(t, replacedArr[4], rawArr[5])
+ assert.Equal(t, replacedArr[5], rawArr[0])
+ }
+
+}
+
+func TestCollectInt8(t *testing.T) {
+
+ v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}}
+
+ collected := v.CollectInt8(func(index int, val int8) interface{} {
+ return index
+ })
+
+ collectedArr := collected.MustInterSlice()
+ if assert.Equal(t, 6, len(collectedArr)) {
+ assert.Equal(t, collectedArr[0], 0)
+ assert.Equal(t, collectedArr[1], 1)
+ assert.Equal(t, collectedArr[2], 2)
+ assert.Equal(t, collectedArr[3], 3)
+ assert.Equal(t, collectedArr[4], 4)
+ assert.Equal(t, collectedArr[5], 5)
+ }
+
+}
+
+// ************************************************************
+// TESTS
+// ************************************************************
+
+func TestInt16(t *testing.T) {
+
+ val := int16(1)
+ m := map[string]interface{}{"value": val, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Int16())
+ assert.Equal(t, val, New(m).Get("value").MustInt16())
+ assert.Equal(t, int16(0), New(m).Get("nothing").Int16())
+ assert.Equal(t, val, New(m).Get("nothing").Int16(1))
+
+ assert.Panics(t, func() {
+ New(m).Get("age").MustInt16()
+ })
+
+}
+
+func TestInt16Slice(t *testing.T) {
+
+ val := int16(1)
+ m := map[string]interface{}{"value": []int16{val}, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Int16Slice()[0])
+ assert.Equal(t, val, New(m).Get("value").MustInt16Slice()[0])
+ assert.Equal(t, []int16(nil), New(m).Get("nothing").Int16Slice())
+ assert.Equal(t, val, New(m).Get("nothing").Int16Slice([]int16{int16(1)})[0])
+
+ assert.Panics(t, func() {
+ New(m).Get("nothing").MustInt16Slice()
+ })
+
+}
+
+func TestIsInt16(t *testing.T) {
+
+ var v *Value
+
+ v = &Value{data: int16(1)}
+ assert.True(t, v.IsInt16())
+
+ v = &Value{data: []int16{int16(1)}}
+ assert.True(t, v.IsInt16Slice())
+
+}
+
+func TestEachInt16(t *testing.T) {
+
+ v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1)}}
+ count := 0
+ replacedVals := make([]int16, 0)
+ assert.Equal(t, v, v.EachInt16(func(i int, val int16) bool {
+
+ count++
+ replacedVals = append(replacedVals, val)
+
+ // abort early
+ if i == 2 {
+ return false
+ }
+
+ return true
+
+ }))
+
+ assert.Equal(t, count, 3)
+ assert.Equal(t, replacedVals[0], v.MustInt16Slice()[0])
+ assert.Equal(t, replacedVals[1], v.MustInt16Slice()[1])
+ assert.Equal(t, replacedVals[2], v.MustInt16Slice()[2])
+
+}
+
+func TestWhereInt16(t *testing.T) {
+
+ v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}}
+
+ selected := v.WhereInt16(func(i int, val int16) bool {
+ return i%2 == 0
+ }).MustInt16Slice()
+
+ assert.Equal(t, 3, len(selected))
+
+}
+
+func TestGroupInt16(t *testing.T) {
+
+ v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}}
+
+ grouped := v.GroupInt16(func(i int, val int16) string {
+ return fmt.Sprintf("%v", i%2 == 0)
+ }).data.(map[string][]int16)
+
+ assert.Equal(t, 2, len(grouped))
+ assert.Equal(t, 3, len(grouped["true"]))
+ assert.Equal(t, 3, len(grouped["false"]))
+
+}
+
+func TestReplaceInt16(t *testing.T) {
+
+ v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}}
+
+ rawArr := v.MustInt16Slice()
+
+ replaced := v.ReplaceInt16(func(index int, val int16) int16 {
+ if index < len(rawArr)-1 {
+ return rawArr[index+1]
+ }
+ return rawArr[0]
+ })
+
+ replacedArr := replaced.MustInt16Slice()
+ if assert.Equal(t, 6, len(replacedArr)) {
+ assert.Equal(t, replacedArr[0], rawArr[1])
+ assert.Equal(t, replacedArr[1], rawArr[2])
+ assert.Equal(t, replacedArr[2], rawArr[3])
+ assert.Equal(t, replacedArr[3], rawArr[4])
+ assert.Equal(t, replacedArr[4], rawArr[5])
+ assert.Equal(t, replacedArr[5], rawArr[0])
+ }
+
+}
+
+func TestCollectInt16(t *testing.T) {
+
+ v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}}
+
+ collected := v.CollectInt16(func(index int, val int16) interface{} {
+ return index
+ })
+
+ collectedArr := collected.MustInterSlice()
+ if assert.Equal(t, 6, len(collectedArr)) {
+ assert.Equal(t, collectedArr[0], 0)
+ assert.Equal(t, collectedArr[1], 1)
+ assert.Equal(t, collectedArr[2], 2)
+ assert.Equal(t, collectedArr[3], 3)
+ assert.Equal(t, collectedArr[4], 4)
+ assert.Equal(t, collectedArr[5], 5)
+ }
+
+}
+
+// ************************************************************
+// TESTS
+// ************************************************************
+
+func TestInt32(t *testing.T) {
+
+ val := int32(1)
+ m := map[string]interface{}{"value": val, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Int32())
+ assert.Equal(t, val, New(m).Get("value").MustInt32())
+ assert.Equal(t, int32(0), New(m).Get("nothing").Int32())
+ assert.Equal(t, val, New(m).Get("nothing").Int32(1))
+
+ assert.Panics(t, func() {
+ New(m).Get("age").MustInt32()
+ })
+
+}
+
+func TestInt32Slice(t *testing.T) {
+
+ val := int32(1)
+ m := map[string]interface{}{"value": []int32{val}, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Int32Slice()[0])
+ assert.Equal(t, val, New(m).Get("value").MustInt32Slice()[0])
+ assert.Equal(t, []int32(nil), New(m).Get("nothing").Int32Slice())
+ assert.Equal(t, val, New(m).Get("nothing").Int32Slice([]int32{int32(1)})[0])
+
+ assert.Panics(t, func() {
+ New(m).Get("nothing").MustInt32Slice()
+ })
+
+}
+
+func TestIsInt32(t *testing.T) {
+
+ var v *Value
+
+ v = &Value{data: int32(1)}
+ assert.True(t, v.IsInt32())
+
+ v = &Value{data: []int32{int32(1)}}
+ assert.True(t, v.IsInt32Slice())
+
+}
+
+func TestEachInt32(t *testing.T) {
+
+ v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1)}}
+ count := 0
+ replacedVals := make([]int32, 0)
+ assert.Equal(t, v, v.EachInt32(func(i int, val int32) bool {
+
+ count++
+ replacedVals = append(replacedVals, val)
+
+ // abort early
+ if i == 2 {
+ return false
+ }
+
+ return true
+
+ }))
+
+ assert.Equal(t, count, 3)
+ assert.Equal(t, replacedVals[0], v.MustInt32Slice()[0])
+ assert.Equal(t, replacedVals[1], v.MustInt32Slice()[1])
+ assert.Equal(t, replacedVals[2], v.MustInt32Slice()[2])
+
+}
+
+func TestWhereInt32(t *testing.T) {
+
+ v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}}
+
+ selected := v.WhereInt32(func(i int, val int32) bool {
+ return i%2 == 0
+ }).MustInt32Slice()
+
+ assert.Equal(t, 3, len(selected))
+
+}
+
+func TestGroupInt32(t *testing.T) {
+
+ v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}}
+
+ grouped := v.GroupInt32(func(i int, val int32) string {
+ return fmt.Sprintf("%v", i%2 == 0)
+ }).data.(map[string][]int32)
+
+ assert.Equal(t, 2, len(grouped))
+ assert.Equal(t, 3, len(grouped["true"]))
+ assert.Equal(t, 3, len(grouped["false"]))
+
+}
+
+func TestReplaceInt32(t *testing.T) {
+
+ v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}}
+
+ rawArr := v.MustInt32Slice()
+
+ replaced := v.ReplaceInt32(func(index int, val int32) int32 {
+ if index < len(rawArr)-1 {
+ return rawArr[index+1]
+ }
+ return rawArr[0]
+ })
+
+ replacedArr := replaced.MustInt32Slice()
+ if assert.Equal(t, 6, len(replacedArr)) {
+ assert.Equal(t, replacedArr[0], rawArr[1])
+ assert.Equal(t, replacedArr[1], rawArr[2])
+ assert.Equal(t, replacedArr[2], rawArr[3])
+ assert.Equal(t, replacedArr[3], rawArr[4])
+ assert.Equal(t, replacedArr[4], rawArr[5])
+ assert.Equal(t, replacedArr[5], rawArr[0])
+ }
+
+}
+
+func TestCollectInt32(t *testing.T) {
+
+ v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}}
+
+ collected := v.CollectInt32(func(index int, val int32) interface{} {
+ return index
+ })
+
+ collectedArr := collected.MustInterSlice()
+ if assert.Equal(t, 6, len(collectedArr)) {
+ assert.Equal(t, collectedArr[0], 0)
+ assert.Equal(t, collectedArr[1], 1)
+ assert.Equal(t, collectedArr[2], 2)
+ assert.Equal(t, collectedArr[3], 3)
+ assert.Equal(t, collectedArr[4], 4)
+ assert.Equal(t, collectedArr[5], 5)
+ }
+
+}
+
+// ************************************************************
+// TESTS
+// ************************************************************
+
+func TestInt64(t *testing.T) {
+
+ val := int64(1)
+ m := map[string]interface{}{"value": val, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Int64())
+ assert.Equal(t, val, New(m).Get("value").MustInt64())
+ assert.Equal(t, int64(0), New(m).Get("nothing").Int64())
+ assert.Equal(t, val, New(m).Get("nothing").Int64(1))
+
+ assert.Panics(t, func() {
+ New(m).Get("age").MustInt64()
+ })
+
+}
+
+func TestInt64Slice(t *testing.T) {
+
+ val := int64(1)
+ m := map[string]interface{}{"value": []int64{val}, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Int64Slice()[0])
+ assert.Equal(t, val, New(m).Get("value").MustInt64Slice()[0])
+ assert.Equal(t, []int64(nil), New(m).Get("nothing").Int64Slice())
+ assert.Equal(t, val, New(m).Get("nothing").Int64Slice([]int64{int64(1)})[0])
+
+ assert.Panics(t, func() {
+ New(m).Get("nothing").MustInt64Slice()
+ })
+
+}
+
+func TestIsInt64(t *testing.T) {
+
+ var v *Value
+
+ v = &Value{data: int64(1)}
+ assert.True(t, v.IsInt64())
+
+ v = &Value{data: []int64{int64(1)}}
+ assert.True(t, v.IsInt64Slice())
+
+}
+
+func TestEachInt64(t *testing.T) {
+
+ v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1)}}
+ count := 0
+ replacedVals := make([]int64, 0)
+ assert.Equal(t, v, v.EachInt64(func(i int, val int64) bool {
+
+ count++
+ replacedVals = append(replacedVals, val)
+
+ // abort early
+ if i == 2 {
+ return false
+ }
+
+ return true
+
+ }))
+
+ assert.Equal(t, count, 3)
+ assert.Equal(t, replacedVals[0], v.MustInt64Slice()[0])
+ assert.Equal(t, replacedVals[1], v.MustInt64Slice()[1])
+ assert.Equal(t, replacedVals[2], v.MustInt64Slice()[2])
+
+}
+
+func TestWhereInt64(t *testing.T) {
+
+ v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}}
+
+ selected := v.WhereInt64(func(i int, val int64) bool {
+ return i%2 == 0
+ }).MustInt64Slice()
+
+ assert.Equal(t, 3, len(selected))
+
+}
+
+func TestGroupInt64(t *testing.T) {
+
+ v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}}
+
+ grouped := v.GroupInt64(func(i int, val int64) string {
+ return fmt.Sprintf("%v", i%2 == 0)
+ }).data.(map[string][]int64)
+
+ assert.Equal(t, 2, len(grouped))
+ assert.Equal(t, 3, len(grouped["true"]))
+ assert.Equal(t, 3, len(grouped["false"]))
+
+}
+
+func TestReplaceInt64(t *testing.T) {
+
+ v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}}
+
+ rawArr := v.MustInt64Slice()
+
+ replaced := v.ReplaceInt64(func(index int, val int64) int64 {
+ if index < len(rawArr)-1 {
+ return rawArr[index+1]
+ }
+ return rawArr[0]
+ })
+
+ replacedArr := replaced.MustInt64Slice()
+ if assert.Equal(t, 6, len(replacedArr)) {
+ assert.Equal(t, replacedArr[0], rawArr[1])
+ assert.Equal(t, replacedArr[1], rawArr[2])
+ assert.Equal(t, replacedArr[2], rawArr[3])
+ assert.Equal(t, replacedArr[3], rawArr[4])
+ assert.Equal(t, replacedArr[4], rawArr[5])
+ assert.Equal(t, replacedArr[5], rawArr[0])
+ }
+
+}
+
+func TestCollectInt64(t *testing.T) {
+
+ v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}}
+
+ collected := v.CollectInt64(func(index int, val int64) interface{} {
+ return index
+ })
+
+ collectedArr := collected.MustInterSlice()
+ if assert.Equal(t, 6, len(collectedArr)) {
+ assert.Equal(t, collectedArr[0], 0)
+ assert.Equal(t, collectedArr[1], 1)
+ assert.Equal(t, collectedArr[2], 2)
+ assert.Equal(t, collectedArr[3], 3)
+ assert.Equal(t, collectedArr[4], 4)
+ assert.Equal(t, collectedArr[5], 5)
+ }
+
+}
+
+// ************************************************************
+// TESTS
+// ************************************************************
+
+func TestUint(t *testing.T) {
+
+ val := uint(1)
+ m := map[string]interface{}{"value": val, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Uint())
+ assert.Equal(t, val, New(m).Get("value").MustUint())
+ assert.Equal(t, uint(0), New(m).Get("nothing").Uint())
+ assert.Equal(t, val, New(m).Get("nothing").Uint(1))
+
+ assert.Panics(t, func() {
+ New(m).Get("age").MustUint()
+ })
+
+}
+
+func TestUintSlice(t *testing.T) {
+
+ val := uint(1)
+ m := map[string]interface{}{"value": []uint{val}, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").UintSlice()[0])
+ assert.Equal(t, val, New(m).Get("value").MustUintSlice()[0])
+ assert.Equal(t, []uint(nil), New(m).Get("nothing").UintSlice())
+ assert.Equal(t, val, New(m).Get("nothing").UintSlice([]uint{uint(1)})[0])
+
+ assert.Panics(t, func() {
+ New(m).Get("nothing").MustUintSlice()
+ })
+
+}
+
+func TestIsUint(t *testing.T) {
+
+ var v *Value
+
+ v = &Value{data: uint(1)}
+ assert.True(t, v.IsUint())
+
+ v = &Value{data: []uint{uint(1)}}
+ assert.True(t, v.IsUintSlice())
+
+}
+
+func TestEachUint(t *testing.T) {
+
+ v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1)}}
+ count := 0
+ replacedVals := make([]uint, 0)
+ assert.Equal(t, v, v.EachUint(func(i int, val uint) bool {
+
+ count++
+ replacedVals = append(replacedVals, val)
+
+ // abort early
+ if i == 2 {
+ return false
+ }
+
+ return true
+
+ }))
+
+ assert.Equal(t, count, 3)
+ assert.Equal(t, replacedVals[0], v.MustUintSlice()[0])
+ assert.Equal(t, replacedVals[1], v.MustUintSlice()[1])
+ assert.Equal(t, replacedVals[2], v.MustUintSlice()[2])
+
+}
+
+func TestWhereUint(t *testing.T) {
+
+ v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}}
+
+ selected := v.WhereUint(func(i int, val uint) bool {
+ return i%2 == 0
+ }).MustUintSlice()
+
+ assert.Equal(t, 3, len(selected))
+
+}
+
+func TestGroupUint(t *testing.T) {
+
+ v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}}
+
+ grouped := v.GroupUint(func(i int, val uint) string {
+ return fmt.Sprintf("%v", i%2 == 0)
+ }).data.(map[string][]uint)
+
+ assert.Equal(t, 2, len(grouped))
+ assert.Equal(t, 3, len(grouped["true"]))
+ assert.Equal(t, 3, len(grouped["false"]))
+
+}
+
+func TestReplaceUint(t *testing.T) {
+
+ v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}}
+
+ rawArr := v.MustUintSlice()
+
+ replaced := v.ReplaceUint(func(index int, val uint) uint {
+ if index < len(rawArr)-1 {
+ return rawArr[index+1]
+ }
+ return rawArr[0]
+ })
+
+ replacedArr := replaced.MustUintSlice()
+ if assert.Equal(t, 6, len(replacedArr)) {
+ assert.Equal(t, replacedArr[0], rawArr[1])
+ assert.Equal(t, replacedArr[1], rawArr[2])
+ assert.Equal(t, replacedArr[2], rawArr[3])
+ assert.Equal(t, replacedArr[3], rawArr[4])
+ assert.Equal(t, replacedArr[4], rawArr[5])
+ assert.Equal(t, replacedArr[5], rawArr[0])
+ }
+
+}
+
+func TestCollectUint(t *testing.T) {
+
+ v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}}
+
+ collected := v.CollectUint(func(index int, val uint) interface{} {
+ return index
+ })
+
+ collectedArr := collected.MustInterSlice()
+ if assert.Equal(t, 6, len(collectedArr)) {
+ assert.Equal(t, collectedArr[0], 0)
+ assert.Equal(t, collectedArr[1], 1)
+ assert.Equal(t, collectedArr[2], 2)
+ assert.Equal(t, collectedArr[3], 3)
+ assert.Equal(t, collectedArr[4], 4)
+ assert.Equal(t, collectedArr[5], 5)
+ }
+
+}
+
+// ************************************************************
+// TESTS
+// ************************************************************
+
+func TestUint8(t *testing.T) {
+
+ val := uint8(1)
+ m := map[string]interface{}{"value": val, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Uint8())
+ assert.Equal(t, val, New(m).Get("value").MustUint8())
+ assert.Equal(t, uint8(0), New(m).Get("nothing").Uint8())
+ assert.Equal(t, val, New(m).Get("nothing").Uint8(1))
+
+ assert.Panics(t, func() {
+ New(m).Get("age").MustUint8()
+ })
+
+}
+
+func TestUint8Slice(t *testing.T) {
+
+ val := uint8(1)
+ m := map[string]interface{}{"value": []uint8{val}, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Uint8Slice()[0])
+ assert.Equal(t, val, New(m).Get("value").MustUint8Slice()[0])
+ assert.Equal(t, []uint8(nil), New(m).Get("nothing").Uint8Slice())
+ assert.Equal(t, val, New(m).Get("nothing").Uint8Slice([]uint8{uint8(1)})[0])
+
+ assert.Panics(t, func() {
+ New(m).Get("nothing").MustUint8Slice()
+ })
+
+}
+
+func TestIsUint8(t *testing.T) {
+
+ var v *Value
+
+ v = &Value{data: uint8(1)}
+ assert.True(t, v.IsUint8())
+
+ v = &Value{data: []uint8{uint8(1)}}
+ assert.True(t, v.IsUint8Slice())
+
+}
+
+func TestEachUint8(t *testing.T) {
+
+ v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}}
+ count := 0
+ replacedVals := make([]uint8, 0)
+ assert.Equal(t, v, v.EachUint8(func(i int, val uint8) bool {
+
+ count++
+ replacedVals = append(replacedVals, val)
+
+ // abort early
+ if i == 2 {
+ return false
+ }
+
+ return true
+
+ }))
+
+ assert.Equal(t, count, 3)
+ assert.Equal(t, replacedVals[0], v.MustUint8Slice()[0])
+ assert.Equal(t, replacedVals[1], v.MustUint8Slice()[1])
+ assert.Equal(t, replacedVals[2], v.MustUint8Slice()[2])
+
+}
+
+func TestWhereUint8(t *testing.T) {
+
+ v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}}
+
+ selected := v.WhereUint8(func(i int, val uint8) bool {
+ return i%2 == 0
+ }).MustUint8Slice()
+
+ assert.Equal(t, 3, len(selected))
+
+}
+
+func TestGroupUint8(t *testing.T) {
+
+ v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}}
+
+ grouped := v.GroupUint8(func(i int, val uint8) string {
+ return fmt.Sprintf("%v", i%2 == 0)
+ }).data.(map[string][]uint8)
+
+ assert.Equal(t, 2, len(grouped))
+ assert.Equal(t, 3, len(grouped["true"]))
+ assert.Equal(t, 3, len(grouped["false"]))
+
+}
+
+func TestReplaceUint8(t *testing.T) {
+
+ v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}}
+
+ rawArr := v.MustUint8Slice()
+
+ replaced := v.ReplaceUint8(func(index int, val uint8) uint8 {
+ if index < len(rawArr)-1 {
+ return rawArr[index+1]
+ }
+ return rawArr[0]
+ })
+
+ replacedArr := replaced.MustUint8Slice()
+ if assert.Equal(t, 6, len(replacedArr)) {
+ assert.Equal(t, replacedArr[0], rawArr[1])
+ assert.Equal(t, replacedArr[1], rawArr[2])
+ assert.Equal(t, replacedArr[2], rawArr[3])
+ assert.Equal(t, replacedArr[3], rawArr[4])
+ assert.Equal(t, replacedArr[4], rawArr[5])
+ assert.Equal(t, replacedArr[5], rawArr[0])
+ }
+
+}
+
+func TestCollectUint8(t *testing.T) {
+
+ v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}}
+
+ collected := v.CollectUint8(func(index int, val uint8) interface{} {
+ return index
+ })
+
+ collectedArr := collected.MustInterSlice()
+ if assert.Equal(t, 6, len(collectedArr)) {
+ assert.Equal(t, collectedArr[0], 0)
+ assert.Equal(t, collectedArr[1], 1)
+ assert.Equal(t, collectedArr[2], 2)
+ assert.Equal(t, collectedArr[3], 3)
+ assert.Equal(t, collectedArr[4], 4)
+ assert.Equal(t, collectedArr[5], 5)
+ }
+
+}
+
+// ************************************************************
+// TESTS
+// ************************************************************
+
+func TestUint16(t *testing.T) {
+
+ val := uint16(1)
+ m := map[string]interface{}{"value": val, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Uint16())
+ assert.Equal(t, val, New(m).Get("value").MustUint16())
+ assert.Equal(t, uint16(0), New(m).Get("nothing").Uint16())
+ assert.Equal(t, val, New(m).Get("nothing").Uint16(1))
+
+ assert.Panics(t, func() {
+ New(m).Get("age").MustUint16()
+ })
+
+}
+
+func TestUint16Slice(t *testing.T) {
+
+ val := uint16(1)
+ m := map[string]interface{}{"value": []uint16{val}, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Uint16Slice()[0])
+ assert.Equal(t, val, New(m).Get("value").MustUint16Slice()[0])
+ assert.Equal(t, []uint16(nil), New(m).Get("nothing").Uint16Slice())
+ assert.Equal(t, val, New(m).Get("nothing").Uint16Slice([]uint16{uint16(1)})[0])
+
+ assert.Panics(t, func() {
+ New(m).Get("nothing").MustUint16Slice()
+ })
+
+}
+
+func TestIsUint16(t *testing.T) {
+
+ var v *Value
+
+ v = &Value{data: uint16(1)}
+ assert.True(t, v.IsUint16())
+
+ v = &Value{data: []uint16{uint16(1)}}
+ assert.True(t, v.IsUint16Slice())
+
+}
+
+func TestEachUint16(t *testing.T) {
+
+ v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}}
+ count := 0
+ replacedVals := make([]uint16, 0)
+ assert.Equal(t, v, v.EachUint16(func(i int, val uint16) bool {
+
+ count++
+ replacedVals = append(replacedVals, val)
+
+ // abort early
+ if i == 2 {
+ return false
+ }
+
+ return true
+
+ }))
+
+ assert.Equal(t, count, 3)
+ assert.Equal(t, replacedVals[0], v.MustUint16Slice()[0])
+ assert.Equal(t, replacedVals[1], v.MustUint16Slice()[1])
+ assert.Equal(t, replacedVals[2], v.MustUint16Slice()[2])
+
+}
+
+func TestWhereUint16(t *testing.T) {
+
+ v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}}
+
+ selected := v.WhereUint16(func(i int, val uint16) bool {
+ return i%2 == 0
+ }).MustUint16Slice()
+
+ assert.Equal(t, 3, len(selected))
+
+}
+
+func TestGroupUint16(t *testing.T) {
+
+ v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}}
+
+ grouped := v.GroupUint16(func(i int, val uint16) string {
+ return fmt.Sprintf("%v", i%2 == 0)
+ }).data.(map[string][]uint16)
+
+ assert.Equal(t, 2, len(grouped))
+ assert.Equal(t, 3, len(grouped["true"]))
+ assert.Equal(t, 3, len(grouped["false"]))
+
+}
+
+func TestReplaceUint16(t *testing.T) {
+
+ v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}}
+
+ rawArr := v.MustUint16Slice()
+
+ replaced := v.ReplaceUint16(func(index int, val uint16) uint16 {
+ if index < len(rawArr)-1 {
+ return rawArr[index+1]
+ }
+ return rawArr[0]
+ })
+
+ replacedArr := replaced.MustUint16Slice()
+ if assert.Equal(t, 6, len(replacedArr)) {
+ assert.Equal(t, replacedArr[0], rawArr[1])
+ assert.Equal(t, replacedArr[1], rawArr[2])
+ assert.Equal(t, replacedArr[2], rawArr[3])
+ assert.Equal(t, replacedArr[3], rawArr[4])
+ assert.Equal(t, replacedArr[4], rawArr[5])
+ assert.Equal(t, replacedArr[5], rawArr[0])
+ }
+
+}
+
+func TestCollectUint16(t *testing.T) {
+
+ v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}}
+
+ collected := v.CollectUint16(func(index int, val uint16) interface{} {
+ return index
+ })
+
+ collectedArr := collected.MustInterSlice()
+ if assert.Equal(t, 6, len(collectedArr)) {
+ assert.Equal(t, collectedArr[0], 0)
+ assert.Equal(t, collectedArr[1], 1)
+ assert.Equal(t, collectedArr[2], 2)
+ assert.Equal(t, collectedArr[3], 3)
+ assert.Equal(t, collectedArr[4], 4)
+ assert.Equal(t, collectedArr[5], 5)
+ }
+
+}
+
+// ************************************************************
+// TESTS
+// ************************************************************
+
+func TestUint32(t *testing.T) {
+
+ val := uint32(1)
+ m := map[string]interface{}{"value": val, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Uint32())
+ assert.Equal(t, val, New(m).Get("value").MustUint32())
+ assert.Equal(t, uint32(0), New(m).Get("nothing").Uint32())
+ assert.Equal(t, val, New(m).Get("nothing").Uint32(1))
+
+ assert.Panics(t, func() {
+ New(m).Get("age").MustUint32()
+ })
+
+}
+
+func TestUint32Slice(t *testing.T) {
+
+ val := uint32(1)
+ m := map[string]interface{}{"value": []uint32{val}, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Uint32Slice()[0])
+ assert.Equal(t, val, New(m).Get("value").MustUint32Slice()[0])
+ assert.Equal(t, []uint32(nil), New(m).Get("nothing").Uint32Slice())
+ assert.Equal(t, val, New(m).Get("nothing").Uint32Slice([]uint32{uint32(1)})[0])
+
+ assert.Panics(t, func() {
+ New(m).Get("nothing").MustUint32Slice()
+ })
+
+}
+
+func TestIsUint32(t *testing.T) {
+
+ var v *Value
+
+ v = &Value{data: uint32(1)}
+ assert.True(t, v.IsUint32())
+
+ v = &Value{data: []uint32{uint32(1)}}
+ assert.True(t, v.IsUint32Slice())
+
+}
+
+func TestEachUint32(t *testing.T) {
+
+ v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}}
+ count := 0
+ replacedVals := make([]uint32, 0)
+ assert.Equal(t, v, v.EachUint32(func(i int, val uint32) bool {
+
+ count++
+ replacedVals = append(replacedVals, val)
+
+ // abort early
+ if i == 2 {
+ return false
+ }
+
+ return true
+
+ }))
+
+ assert.Equal(t, count, 3)
+ assert.Equal(t, replacedVals[0], v.MustUint32Slice()[0])
+ assert.Equal(t, replacedVals[1], v.MustUint32Slice()[1])
+ assert.Equal(t, replacedVals[2], v.MustUint32Slice()[2])
+
+}
+
+func TestWhereUint32(t *testing.T) {
+
+ v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}}
+
+ selected := v.WhereUint32(func(i int, val uint32) bool {
+ return i%2 == 0
+ }).MustUint32Slice()
+
+ assert.Equal(t, 3, len(selected))
+
+}
+
+func TestGroupUint32(t *testing.T) {
+
+ v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}}
+
+ grouped := v.GroupUint32(func(i int, val uint32) string {
+ return fmt.Sprintf("%v", i%2 == 0)
+ }).data.(map[string][]uint32)
+
+ assert.Equal(t, 2, len(grouped))
+ assert.Equal(t, 3, len(grouped["true"]))
+ assert.Equal(t, 3, len(grouped["false"]))
+
+}
+
+func TestReplaceUint32(t *testing.T) {
+
+ v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}}
+
+ rawArr := v.MustUint32Slice()
+
+ replaced := v.ReplaceUint32(func(index int, val uint32) uint32 {
+ if index < len(rawArr)-1 {
+ return rawArr[index+1]
+ }
+ return rawArr[0]
+ })
+
+ replacedArr := replaced.MustUint32Slice()
+ if assert.Equal(t, 6, len(replacedArr)) {
+ assert.Equal(t, replacedArr[0], rawArr[1])
+ assert.Equal(t, replacedArr[1], rawArr[2])
+ assert.Equal(t, replacedArr[2], rawArr[3])
+ assert.Equal(t, replacedArr[3], rawArr[4])
+ assert.Equal(t, replacedArr[4], rawArr[5])
+ assert.Equal(t, replacedArr[5], rawArr[0])
+ }
+
+}
+
+func TestCollectUint32(t *testing.T) {
+
+ v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}}
+
+ collected := v.CollectUint32(func(index int, val uint32) interface{} {
+ return index
+ })
+
+ collectedArr := collected.MustInterSlice()
+ if assert.Equal(t, 6, len(collectedArr)) {
+ assert.Equal(t, collectedArr[0], 0)
+ assert.Equal(t, collectedArr[1], 1)
+ assert.Equal(t, collectedArr[2], 2)
+ assert.Equal(t, collectedArr[3], 3)
+ assert.Equal(t, collectedArr[4], 4)
+ assert.Equal(t, collectedArr[5], 5)
+ }
+
+}
+
+// ************************************************************
+// TESTS
+// ************************************************************
+
+func TestUint64(t *testing.T) {
+
+ val := uint64(1)
+ m := map[string]interface{}{"value": val, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Uint64())
+ assert.Equal(t, val, New(m).Get("value").MustUint64())
+ assert.Equal(t, uint64(0), New(m).Get("nothing").Uint64())
+ assert.Equal(t, val, New(m).Get("nothing").Uint64(1))
+
+ assert.Panics(t, func() {
+ New(m).Get("age").MustUint64()
+ })
+
+}
+
+func TestUint64Slice(t *testing.T) {
+
+ val := uint64(1)
+ m := map[string]interface{}{"value": []uint64{val}, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Uint64Slice()[0])
+ assert.Equal(t, val, New(m).Get("value").MustUint64Slice()[0])
+ assert.Equal(t, []uint64(nil), New(m).Get("nothing").Uint64Slice())
+ assert.Equal(t, val, New(m).Get("nothing").Uint64Slice([]uint64{uint64(1)})[0])
+
+ assert.Panics(t, func() {
+ New(m).Get("nothing").MustUint64Slice()
+ })
+
+}
+
+func TestIsUint64(t *testing.T) {
+
+ var v *Value
+
+ v = &Value{data: uint64(1)}
+ assert.True(t, v.IsUint64())
+
+ v = &Value{data: []uint64{uint64(1)}}
+ assert.True(t, v.IsUint64Slice())
+
+}
+
+func TestEachUint64(t *testing.T) {
+
+ v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}}
+ count := 0
+ replacedVals := make([]uint64, 0)
+ assert.Equal(t, v, v.EachUint64(func(i int, val uint64) bool {
+
+ count++
+ replacedVals = append(replacedVals, val)
+
+ // abort early
+ if i == 2 {
+ return false
+ }
+
+ return true
+
+ }))
+
+ assert.Equal(t, count, 3)
+ assert.Equal(t, replacedVals[0], v.MustUint64Slice()[0])
+ assert.Equal(t, replacedVals[1], v.MustUint64Slice()[1])
+ assert.Equal(t, replacedVals[2], v.MustUint64Slice()[2])
+
+}
+
+func TestWhereUint64(t *testing.T) {
+
+ v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}}
+
+ selected := v.WhereUint64(func(i int, val uint64) bool {
+ return i%2 == 0
+ }).MustUint64Slice()
+
+ assert.Equal(t, 3, len(selected))
+
+}
+
+func TestGroupUint64(t *testing.T) {
+
+ v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}}
+
+ grouped := v.GroupUint64(func(i int, val uint64) string {
+ return fmt.Sprintf("%v", i%2 == 0)
+ }).data.(map[string][]uint64)
+
+ assert.Equal(t, 2, len(grouped))
+ assert.Equal(t, 3, len(grouped["true"]))
+ assert.Equal(t, 3, len(grouped["false"]))
+
+}
+
+func TestReplaceUint64(t *testing.T) {
+
+ v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}}
+
+ rawArr := v.MustUint64Slice()
+
+ replaced := v.ReplaceUint64(func(index int, val uint64) uint64 {
+ if index < len(rawArr)-1 {
+ return rawArr[index+1]
+ }
+ return rawArr[0]
+ })
+
+ replacedArr := replaced.MustUint64Slice()
+ if assert.Equal(t, 6, len(replacedArr)) {
+ assert.Equal(t, replacedArr[0], rawArr[1])
+ assert.Equal(t, replacedArr[1], rawArr[2])
+ assert.Equal(t, replacedArr[2], rawArr[3])
+ assert.Equal(t, replacedArr[3], rawArr[4])
+ assert.Equal(t, replacedArr[4], rawArr[5])
+ assert.Equal(t, replacedArr[5], rawArr[0])
+ }
+
+}
+
+func TestCollectUint64(t *testing.T) {
+
+ v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}}
+
+ collected := v.CollectUint64(func(index int, val uint64) interface{} {
+ return index
+ })
+
+ collectedArr := collected.MustInterSlice()
+ if assert.Equal(t, 6, len(collectedArr)) {
+ assert.Equal(t, collectedArr[0], 0)
+ assert.Equal(t, collectedArr[1], 1)
+ assert.Equal(t, collectedArr[2], 2)
+ assert.Equal(t, collectedArr[3], 3)
+ assert.Equal(t, collectedArr[4], 4)
+ assert.Equal(t, collectedArr[5], 5)
+ }
+
+}
+
+// ************************************************************
+// TESTS
+// ************************************************************
+
+func TestUintptr(t *testing.T) {
+
+ val := uintptr(1)
+ m := map[string]interface{}{"value": val, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Uintptr())
+ assert.Equal(t, val, New(m).Get("value").MustUintptr())
+ assert.Equal(t, uintptr(0), New(m).Get("nothing").Uintptr())
+ assert.Equal(t, val, New(m).Get("nothing").Uintptr(1))
+
+ assert.Panics(t, func() {
+ New(m).Get("age").MustUintptr()
+ })
+
+}
+
+func TestUintptrSlice(t *testing.T) {
+
+ val := uintptr(1)
+ m := map[string]interface{}{"value": []uintptr{val}, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").UintptrSlice()[0])
+ assert.Equal(t, val, New(m).Get("value").MustUintptrSlice()[0])
+ assert.Equal(t, []uintptr(nil), New(m).Get("nothing").UintptrSlice())
+ assert.Equal(t, val, New(m).Get("nothing").UintptrSlice([]uintptr{uintptr(1)})[0])
+
+ assert.Panics(t, func() {
+ New(m).Get("nothing").MustUintptrSlice()
+ })
+
+}
+
+func TestIsUintptr(t *testing.T) {
+
+ var v *Value
+
+ v = &Value{data: uintptr(1)}
+ assert.True(t, v.IsUintptr())
+
+ v = &Value{data: []uintptr{uintptr(1)}}
+ assert.True(t, v.IsUintptrSlice())
+
+}
+
+func TestEachUintptr(t *testing.T) {
+
+ v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}}
+ count := 0
+ replacedVals := make([]uintptr, 0)
+ assert.Equal(t, v, v.EachUintptr(func(i int, val uintptr) bool {
+
+ count++
+ replacedVals = append(replacedVals, val)
+
+ // abort early
+ if i == 2 {
+ return false
+ }
+
+ return true
+
+ }))
+
+ assert.Equal(t, count, 3)
+ assert.Equal(t, replacedVals[0], v.MustUintptrSlice()[0])
+ assert.Equal(t, replacedVals[1], v.MustUintptrSlice()[1])
+ assert.Equal(t, replacedVals[2], v.MustUintptrSlice()[2])
+
+}
+
+func TestWhereUintptr(t *testing.T) {
+
+ v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}}
+
+ selected := v.WhereUintptr(func(i int, val uintptr) bool {
+ return i%2 == 0
+ }).MustUintptrSlice()
+
+ assert.Equal(t, 3, len(selected))
+
+}
+
+func TestGroupUintptr(t *testing.T) {
+
+ v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}}
+
+ grouped := v.GroupUintptr(func(i int, val uintptr) string {
+ return fmt.Sprintf("%v", i%2 == 0)
+ }).data.(map[string][]uintptr)
+
+ assert.Equal(t, 2, len(grouped))
+ assert.Equal(t, 3, len(grouped["true"]))
+ assert.Equal(t, 3, len(grouped["false"]))
+
+}
+
+func TestReplaceUintptr(t *testing.T) {
+
+ v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}}
+
+ rawArr := v.MustUintptrSlice()
+
+ replaced := v.ReplaceUintptr(func(index int, val uintptr) uintptr {
+ if index < len(rawArr)-1 {
+ return rawArr[index+1]
+ }
+ return rawArr[0]
+ })
+
+ replacedArr := replaced.MustUintptrSlice()
+ if assert.Equal(t, 6, len(replacedArr)) {
+ assert.Equal(t, replacedArr[0], rawArr[1])
+ assert.Equal(t, replacedArr[1], rawArr[2])
+ assert.Equal(t, replacedArr[2], rawArr[3])
+ assert.Equal(t, replacedArr[3], rawArr[4])
+ assert.Equal(t, replacedArr[4], rawArr[5])
+ assert.Equal(t, replacedArr[5], rawArr[0])
+ }
+
+}
+
+func TestCollectUintptr(t *testing.T) {
+
+ v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}}
+
+ collected := v.CollectUintptr(func(index int, val uintptr) interface{} {
+ return index
+ })
+
+ collectedArr := collected.MustInterSlice()
+ if assert.Equal(t, 6, len(collectedArr)) {
+ assert.Equal(t, collectedArr[0], 0)
+ assert.Equal(t, collectedArr[1], 1)
+ assert.Equal(t, collectedArr[2], 2)
+ assert.Equal(t, collectedArr[3], 3)
+ assert.Equal(t, collectedArr[4], 4)
+ assert.Equal(t, collectedArr[5], 5)
+ }
+
+}
+
+// ************************************************************
+// TESTS
+// ************************************************************
+
+func TestFloat32(t *testing.T) {
+
+ val := float32(1)
+ m := map[string]interface{}{"value": val, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Float32())
+ assert.Equal(t, val, New(m).Get("value").MustFloat32())
+ assert.Equal(t, float32(0), New(m).Get("nothing").Float32())
+ assert.Equal(t, val, New(m).Get("nothing").Float32(1))
+
+ assert.Panics(t, func() {
+ New(m).Get("age").MustFloat32()
+ })
+
+}
+
+func TestFloat32Slice(t *testing.T) {
+
+ val := float32(1)
+ m := map[string]interface{}{"value": []float32{val}, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Float32Slice()[0])
+ assert.Equal(t, val, New(m).Get("value").MustFloat32Slice()[0])
+ assert.Equal(t, []float32(nil), New(m).Get("nothing").Float32Slice())
+ assert.Equal(t, val, New(m).Get("nothing").Float32Slice([]float32{float32(1)})[0])
+
+ assert.Panics(t, func() {
+ New(m).Get("nothing").MustFloat32Slice()
+ })
+
+}
+
+func TestIsFloat32(t *testing.T) {
+
+ var v *Value
+
+ v = &Value{data: float32(1)}
+ assert.True(t, v.IsFloat32())
+
+ v = &Value{data: []float32{float32(1)}}
+ assert.True(t, v.IsFloat32Slice())
+
+}
+
+func TestEachFloat32(t *testing.T) {
+
+ v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1)}}
+ count := 0
+ replacedVals := make([]float32, 0)
+ assert.Equal(t, v, v.EachFloat32(func(i int, val float32) bool {
+
+ count++
+ replacedVals = append(replacedVals, val)
+
+ // abort early
+ if i == 2 {
+ return false
+ }
+
+ return true
+
+ }))
+
+ assert.Equal(t, count, 3)
+ assert.Equal(t, replacedVals[0], v.MustFloat32Slice()[0])
+ assert.Equal(t, replacedVals[1], v.MustFloat32Slice()[1])
+ assert.Equal(t, replacedVals[2], v.MustFloat32Slice()[2])
+
+}
+
+func TestWhereFloat32(t *testing.T) {
+
+ v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}}
+
+ selected := v.WhereFloat32(func(i int, val float32) bool {
+ return i%2 == 0
+ }).MustFloat32Slice()
+
+ assert.Equal(t, 3, len(selected))
+
+}
+
+func TestGroupFloat32(t *testing.T) {
+
+ v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}}
+
+ grouped := v.GroupFloat32(func(i int, val float32) string {
+ return fmt.Sprintf("%v", i%2 == 0)
+ }).data.(map[string][]float32)
+
+ assert.Equal(t, 2, len(grouped))
+ assert.Equal(t, 3, len(grouped["true"]))
+ assert.Equal(t, 3, len(grouped["false"]))
+
+}
+
+func TestReplaceFloat32(t *testing.T) {
+
+ v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}}
+
+ rawArr := v.MustFloat32Slice()
+
+ replaced := v.ReplaceFloat32(func(index int, val float32) float32 {
+ if index < len(rawArr)-1 {
+ return rawArr[index+1]
+ }
+ return rawArr[0]
+ })
+
+ replacedArr := replaced.MustFloat32Slice()
+ if assert.Equal(t, 6, len(replacedArr)) {
+ assert.Equal(t, replacedArr[0], rawArr[1])
+ assert.Equal(t, replacedArr[1], rawArr[2])
+ assert.Equal(t, replacedArr[2], rawArr[3])
+ assert.Equal(t, replacedArr[3], rawArr[4])
+ assert.Equal(t, replacedArr[4], rawArr[5])
+ assert.Equal(t, replacedArr[5], rawArr[0])
+ }
+
+}
+
+func TestCollectFloat32(t *testing.T) {
+
+ v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}}
+
+ collected := v.CollectFloat32(func(index int, val float32) interface{} {
+ return index
+ })
+
+ collectedArr := collected.MustInterSlice()
+ if assert.Equal(t, 6, len(collectedArr)) {
+ assert.Equal(t, collectedArr[0], 0)
+ assert.Equal(t, collectedArr[1], 1)
+ assert.Equal(t, collectedArr[2], 2)
+ assert.Equal(t, collectedArr[3], 3)
+ assert.Equal(t, collectedArr[4], 4)
+ assert.Equal(t, collectedArr[5], 5)
+ }
+
+}
+
+// ************************************************************
+// TESTS
+// ************************************************************
+
+func TestFloat64(t *testing.T) {
+
+ val := float64(1)
+ m := map[string]interface{}{"value": val, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Float64())
+ assert.Equal(t, val, New(m).Get("value").MustFloat64())
+ assert.Equal(t, float64(0), New(m).Get("nothing").Float64())
+ assert.Equal(t, val, New(m).Get("nothing").Float64(1))
+
+ assert.Panics(t, func() {
+ New(m).Get("age").MustFloat64()
+ })
+
+}
+
+func TestFloat64Slice(t *testing.T) {
+
+ val := float64(1)
+ m := map[string]interface{}{"value": []float64{val}, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Float64Slice()[0])
+ assert.Equal(t, val, New(m).Get("value").MustFloat64Slice()[0])
+ assert.Equal(t, []float64(nil), New(m).Get("nothing").Float64Slice())
+ assert.Equal(t, val, New(m).Get("nothing").Float64Slice([]float64{float64(1)})[0])
+
+ assert.Panics(t, func() {
+ New(m).Get("nothing").MustFloat64Slice()
+ })
+
+}
+
+func TestIsFloat64(t *testing.T) {
+
+ var v *Value
+
+ v = &Value{data: float64(1)}
+ assert.True(t, v.IsFloat64())
+
+ v = &Value{data: []float64{float64(1)}}
+ assert.True(t, v.IsFloat64Slice())
+
+}
+
+func TestEachFloat64(t *testing.T) {
+
+ v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1)}}
+ count := 0
+ replacedVals := make([]float64, 0)
+ assert.Equal(t, v, v.EachFloat64(func(i int, val float64) bool {
+
+ count++
+ replacedVals = append(replacedVals, val)
+
+ // abort early
+ if i == 2 {
+ return false
+ }
+
+ return true
+
+ }))
+
+ assert.Equal(t, count, 3)
+ assert.Equal(t, replacedVals[0], v.MustFloat64Slice()[0])
+ assert.Equal(t, replacedVals[1], v.MustFloat64Slice()[1])
+ assert.Equal(t, replacedVals[2], v.MustFloat64Slice()[2])
+
+}
+
+func TestWhereFloat64(t *testing.T) {
+
+ v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}}
+
+ selected := v.WhereFloat64(func(i int, val float64) bool {
+ return i%2 == 0
+ }).MustFloat64Slice()
+
+ assert.Equal(t, 3, len(selected))
+
+}
+
+func TestGroupFloat64(t *testing.T) {
+
+ v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}}
+
+ grouped := v.GroupFloat64(func(i int, val float64) string {
+ return fmt.Sprintf("%v", i%2 == 0)
+ }).data.(map[string][]float64)
+
+ assert.Equal(t, 2, len(grouped))
+ assert.Equal(t, 3, len(grouped["true"]))
+ assert.Equal(t, 3, len(grouped["false"]))
+
+}
+
+func TestReplaceFloat64(t *testing.T) {
+
+ v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}}
+
+ rawArr := v.MustFloat64Slice()
+
+ replaced := v.ReplaceFloat64(func(index int, val float64) float64 {
+ if index < len(rawArr)-1 {
+ return rawArr[index+1]
+ }
+ return rawArr[0]
+ })
+
+ replacedArr := replaced.MustFloat64Slice()
+ if assert.Equal(t, 6, len(replacedArr)) {
+ assert.Equal(t, replacedArr[0], rawArr[1])
+ assert.Equal(t, replacedArr[1], rawArr[2])
+ assert.Equal(t, replacedArr[2], rawArr[3])
+ assert.Equal(t, replacedArr[3], rawArr[4])
+ assert.Equal(t, replacedArr[4], rawArr[5])
+ assert.Equal(t, replacedArr[5], rawArr[0])
+ }
+
+}
+
+func TestCollectFloat64(t *testing.T) {
+
+ v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}}
+
+ collected := v.CollectFloat64(func(index int, val float64) interface{} {
+ return index
+ })
+
+ collectedArr := collected.MustInterSlice()
+ if assert.Equal(t, 6, len(collectedArr)) {
+ assert.Equal(t, collectedArr[0], 0)
+ assert.Equal(t, collectedArr[1], 1)
+ assert.Equal(t, collectedArr[2], 2)
+ assert.Equal(t, collectedArr[3], 3)
+ assert.Equal(t, collectedArr[4], 4)
+ assert.Equal(t, collectedArr[5], 5)
+ }
+
+}
+
+// ************************************************************
+// TESTS
+// ************************************************************
+
+func TestComplex64(t *testing.T) {
+
+ val := complex64(1)
+ m := map[string]interface{}{"value": val, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Complex64())
+ assert.Equal(t, val, New(m).Get("value").MustComplex64())
+ assert.Equal(t, complex64(0), New(m).Get("nothing").Complex64())
+ assert.Equal(t, val, New(m).Get("nothing").Complex64(1))
+
+ assert.Panics(t, func() {
+ New(m).Get("age").MustComplex64()
+ })
+
+}
+
+func TestComplex64Slice(t *testing.T) {
+
+ val := complex64(1)
+ m := map[string]interface{}{"value": []complex64{val}, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Complex64Slice()[0])
+ assert.Equal(t, val, New(m).Get("value").MustComplex64Slice()[0])
+ assert.Equal(t, []complex64(nil), New(m).Get("nothing").Complex64Slice())
+ assert.Equal(t, val, New(m).Get("nothing").Complex64Slice([]complex64{complex64(1)})[0])
+
+ assert.Panics(t, func() {
+ New(m).Get("nothing").MustComplex64Slice()
+ })
+
+}
+
+func TestIsComplex64(t *testing.T) {
+
+ var v *Value
+
+ v = &Value{data: complex64(1)}
+ assert.True(t, v.IsComplex64())
+
+ v = &Value{data: []complex64{complex64(1)}}
+ assert.True(t, v.IsComplex64Slice())
+
+}
+
+func TestEachComplex64(t *testing.T) {
+
+ v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}}
+ count := 0
+ replacedVals := make([]complex64, 0)
+ assert.Equal(t, v, v.EachComplex64(func(i int, val complex64) bool {
+
+ count++
+ replacedVals = append(replacedVals, val)
+
+ // abort early
+ if i == 2 {
+ return false
+ }
+
+ return true
+
+ }))
+
+ assert.Equal(t, count, 3)
+ assert.Equal(t, replacedVals[0], v.MustComplex64Slice()[0])
+ assert.Equal(t, replacedVals[1], v.MustComplex64Slice()[1])
+ assert.Equal(t, replacedVals[2], v.MustComplex64Slice()[2])
+
+}
+
+func TestWhereComplex64(t *testing.T) {
+
+ v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}}
+
+ selected := v.WhereComplex64(func(i int, val complex64) bool {
+ return i%2 == 0
+ }).MustComplex64Slice()
+
+ assert.Equal(t, 3, len(selected))
+
+}
+
+func TestGroupComplex64(t *testing.T) {
+
+ v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}}
+
+ grouped := v.GroupComplex64(func(i int, val complex64) string {
+ return fmt.Sprintf("%v", i%2 == 0)
+ }).data.(map[string][]complex64)
+
+ assert.Equal(t, 2, len(grouped))
+ assert.Equal(t, 3, len(grouped["true"]))
+ assert.Equal(t, 3, len(grouped["false"]))
+
+}
+
+func TestReplaceComplex64(t *testing.T) {
+
+ v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}}
+
+ rawArr := v.MustComplex64Slice()
+
+ replaced := v.ReplaceComplex64(func(index int, val complex64) complex64 {
+ if index < len(rawArr)-1 {
+ return rawArr[index+1]
+ }
+ return rawArr[0]
+ })
+
+ replacedArr := replaced.MustComplex64Slice()
+ if assert.Equal(t, 6, len(replacedArr)) {
+ assert.Equal(t, replacedArr[0], rawArr[1])
+ assert.Equal(t, replacedArr[1], rawArr[2])
+ assert.Equal(t, replacedArr[2], rawArr[3])
+ assert.Equal(t, replacedArr[3], rawArr[4])
+ assert.Equal(t, replacedArr[4], rawArr[5])
+ assert.Equal(t, replacedArr[5], rawArr[0])
+ }
+
+}
+
+func TestCollectComplex64(t *testing.T) {
+
+ v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}}
+
+ collected := v.CollectComplex64(func(index int, val complex64) interface{} {
+ return index
+ })
+
+ collectedArr := collected.MustInterSlice()
+ if assert.Equal(t, 6, len(collectedArr)) {
+ assert.Equal(t, collectedArr[0], 0)
+ assert.Equal(t, collectedArr[1], 1)
+ assert.Equal(t, collectedArr[2], 2)
+ assert.Equal(t, collectedArr[3], 3)
+ assert.Equal(t, collectedArr[4], 4)
+ assert.Equal(t, collectedArr[5], 5)
+ }
+
+}
+
+// ************************************************************
+// TESTS
+// ************************************************************
+
+func TestComplex128(t *testing.T) {
+
+ val := complex128(1)
+ m := map[string]interface{}{"value": val, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Complex128())
+ assert.Equal(t, val, New(m).Get("value").MustComplex128())
+ assert.Equal(t, complex128(0), New(m).Get("nothing").Complex128())
+ assert.Equal(t, val, New(m).Get("nothing").Complex128(1))
+
+ assert.Panics(t, func() {
+ New(m).Get("age").MustComplex128()
+ })
+
+}
+
+func TestComplex128Slice(t *testing.T) {
+
+ val := complex128(1)
+ m := map[string]interface{}{"value": []complex128{val}, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").Complex128Slice()[0])
+ assert.Equal(t, val, New(m).Get("value").MustComplex128Slice()[0])
+ assert.Equal(t, []complex128(nil), New(m).Get("nothing").Complex128Slice())
+ assert.Equal(t, val, New(m).Get("nothing").Complex128Slice([]complex128{complex128(1)})[0])
+
+ assert.Panics(t, func() {
+ New(m).Get("nothing").MustComplex128Slice()
+ })
+
+}
+
+func TestIsComplex128(t *testing.T) {
+
+ var v *Value
+
+ v = &Value{data: complex128(1)}
+ assert.True(t, v.IsComplex128())
+
+ v = &Value{data: []complex128{complex128(1)}}
+ assert.True(t, v.IsComplex128Slice())
+
+}
+
+func TestEachComplex128(t *testing.T) {
+
+ v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}}
+ count := 0
+ replacedVals := make([]complex128, 0)
+ assert.Equal(t, v, v.EachComplex128(func(i int, val complex128) bool {
+
+ count++
+ replacedVals = append(replacedVals, val)
+
+ // abort early
+ if i == 2 {
+ return false
+ }
+
+ return true
+
+ }))
+
+ assert.Equal(t, count, 3)
+ assert.Equal(t, replacedVals[0], v.MustComplex128Slice()[0])
+ assert.Equal(t, replacedVals[1], v.MustComplex128Slice()[1])
+ assert.Equal(t, replacedVals[2], v.MustComplex128Slice()[2])
+
+}
+
+func TestWhereComplex128(t *testing.T) {
+
+ v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}}
+
+ selected := v.WhereComplex128(func(i int, val complex128) bool {
+ return i%2 == 0
+ }).MustComplex128Slice()
+
+ assert.Equal(t, 3, len(selected))
+
+}
+
+func TestGroupComplex128(t *testing.T) {
+
+ v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}}
+
+ grouped := v.GroupComplex128(func(i int, val complex128) string {
+ return fmt.Sprintf("%v", i%2 == 0)
+ }).data.(map[string][]complex128)
+
+ assert.Equal(t, 2, len(grouped))
+ assert.Equal(t, 3, len(grouped["true"]))
+ assert.Equal(t, 3, len(grouped["false"]))
+
+}
+
+func TestReplaceComplex128(t *testing.T) {
+
+ v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}}
+
+ rawArr := v.MustComplex128Slice()
+
+ replaced := v.ReplaceComplex128(func(index int, val complex128) complex128 {
+ if index < len(rawArr)-1 {
+ return rawArr[index+1]
+ }
+ return rawArr[0]
+ })
+
+ replacedArr := replaced.MustComplex128Slice()
+ if assert.Equal(t, 6, len(replacedArr)) {
+ assert.Equal(t, replacedArr[0], rawArr[1])
+ assert.Equal(t, replacedArr[1], rawArr[2])
+ assert.Equal(t, replacedArr[2], rawArr[3])
+ assert.Equal(t, replacedArr[3], rawArr[4])
+ assert.Equal(t, replacedArr[4], rawArr[5])
+ assert.Equal(t, replacedArr[5], rawArr[0])
+ }
+
+}
+
+func TestCollectComplex128(t *testing.T) {
+
+ v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}}
+
+ collected := v.CollectComplex128(func(index int, val complex128) interface{} {
+ return index
+ })
+
+ collectedArr := collected.MustInterSlice()
+ if assert.Equal(t, 6, len(collectedArr)) {
+ assert.Equal(t, collectedArr[0], 0)
+ assert.Equal(t, collectedArr[1], 1)
+ assert.Equal(t, collectedArr[2], 2)
+ assert.Equal(t, collectedArr[3], 3)
+ assert.Equal(t, collectedArr[4], 4)
+ assert.Equal(t, collectedArr[5], 5)
+ }
+
+}
diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/value_test.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/value_test.go
new file mode 100644
index 000000000..0bc65d92c
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/value_test.go
@@ -0,0 +1 @@
+package objx
diff --git a/vendor/github.com/tylerb/graceful/README.md b/vendor/github.com/tylerb/graceful/README.md
index 328c3acf8..c641b6e78 100644
--- a/vendor/github.com/tylerb/graceful/README.md
+++ b/vendor/github.com/tylerb/graceful/README.md
@@ -3,6 +3,11 @@ graceful [![GoDoc](https://godoc.org/github.com/tylerb/graceful?status.png)](htt
Graceful is a Go 1.3+ package enabling graceful shutdown of http.Handler servers.
+## Using Go 1.8?
+
+If you are using Go 1.8, you may not need to use this library! Consider using `http.Server`'s built-in [Shutdown()](https://golang.org/pkg/net/http/#Server.Shutdown)
+method for graceful shutdowns.
+
## Installation
To install, simply execute:
diff --git a/vendor/github.com/xenolf/lego/README.md b/vendor/github.com/xenolf/lego/README.md
index b9c374432..9ec7e1f38 100644
--- a/vendor/github.com/xenolf/lego/README.md
+++ b/vendor/github.com/xenolf/lego/README.md
@@ -229,7 +229,7 @@ myUser := MyUser{
// A client facilitates communication with the CA server. This CA URL is
// configured for a local dev instance of Boulder running in Docker in a VM.
-client, err := acme.NewClient("http://192.168.99.100:4000", &myUser, acme.RSA2048)
+client, err := acme.NewClient("http://192.168.99.100:4000/directory", &myUser, acme.RSA2048)
if err != nil {
log.Fatal(err)
}
diff --git a/vendor/github.com/xenolf/lego/acme/http.go b/vendor/github.com/xenolf/lego/acme/http.go
index fd6018a10..e469e0de2 100644
--- a/vendor/github.com/xenolf/lego/acme/http.go
+++ b/vendor/github.com/xenolf/lego/acme/http.go
@@ -18,6 +18,7 @@ var UserAgent string
// HTTPClient is an HTTP client with a reasonable timeout value.
var HTTPClient = http.Client{
Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
diff --git a/vendor/github.com/xenolf/lego/cli.go b/vendor/github.com/xenolf/lego/cli.go
index 3aac9e253..58567be9f 100644
--- a/vendor/github.com/xenolf/lego/cli.go
+++ b/vendor/github.com/xenolf/lego/cli.go
@@ -172,7 +172,7 @@ func main() {
},
cli.StringSliceFlag{
Name: "dns-resolvers",
- Usage: "Set the resolvers to use for performing recursive DNS queries. Supported: host:port. The default is to use Google's DNS resolvers.",
+ Usage: "Set the resolvers to use for performing recursive DNS queries. Supported: host:port. The default is to use the system resolvers, or Google's DNS resolvers if the system's cannot be determined.",
},
cli.BoolFlag{
Name: "pem",
diff --git a/vendor/github.com/xenolf/lego/providers/dns/exoscale/exoscale.go b/vendor/github.com/xenolf/lego/providers/dns/exoscale/exoscale.go
index 7b2fccc98..4b125e8df 100644
--- a/vendor/github.com/xenolf/lego/providers/dns/exoscale/exoscale.go
+++ b/vendor/github.com/xenolf/lego/providers/dns/exoscale/exoscale.go
@@ -48,25 +48,25 @@ func (c *DNSProvider) Present(domain, token, keyAuth string) error {
return err
}
- recordId, err := c.FindExistingRecordId(zone, recordName)
+ recordID, err := c.FindExistingRecordId(zone, recordName)
if err != nil {
return err
}
record := egoscale.DNSRecord{
Name: recordName,
- Ttl: ttl,
+ TTL: ttl,
Content: value,
RecordType: "TXT",
}
- if recordId == 0 {
+ if recordID == 0 {
_, err := c.client.CreateRecord(zone, record)
if err != nil {
return errors.New("Error while creating DNS record: " + err.Error())
}
} else {
- record.Id = recordId
+ record.ID = recordID
_, err := c.client.UpdateRecord(zone, record)
if err != nil {
return errors.New("Error while updating DNS record: " + err.Error())
@@ -84,17 +84,13 @@ func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error {
return err
}
- recordId, err := c.FindExistingRecordId(zone, recordName)
+ recordID, err := c.FindExistingRecordId(zone, recordName)
if err != nil {
return err
}
- if recordId != 0 {
- record := egoscale.DNSRecord{
- Id: recordId,
- }
-
- err = c.client.DeleteRecord(zone, record)
+ if recordID != 0 {
+ err = c.client.DeleteRecord(zone, recordID)
if err != nil {
return errors.New("Error while deleting DNS record: " + err.Error())
}
@@ -106,13 +102,13 @@ func (c *DNSProvider) CleanUp(domain, token, keyAuth string) error {
// Query Exoscale to find an existing record for this name.
// Returns nil if no record could be found
func (c *DNSProvider) FindExistingRecordId(zone, recordName string) (int64, error) {
- responses, err := c.client.GetRecords(zone)
+ records, err := c.client.GetRecords(zone)
if err != nil {
return -1, errors.New("Error while retrievening DNS records: " + err.Error())
}
- for _, response := range responses {
- if response.Record.Name == recordName {
- return response.Record.Id, nil
+ for _, record := range records {
+ if record.Name == recordName {
+ return record.ID, nil
}
}
return 0, nil
diff --git a/vendor/golang.org/x/crypto/acme/autocert/autocert.go b/vendor/golang.org/x/crypto/acme/autocert/autocert.go
index 94edba986..453e72291 100644
--- a/vendor/golang.org/x/crypto/acme/autocert/autocert.go
+++ b/vendor/golang.org/x/crypto/acme/autocert/autocert.go
@@ -24,7 +24,9 @@ import (
"fmt"
"io"
mathrand "math/rand"
+ "net"
"net/http"
+ "path"
"strconv"
"strings"
"sync"
@@ -80,8 +82,9 @@ func defaultHostPolicy(context.Context, string) error {
}
// Manager is a stateful certificate manager built on top of acme.Client.
-// It obtains and refreshes certificates automatically,
-// as well as providing them to a TLS server via tls.Config.
+// It obtains and refreshes certificates automatically using "tls-sni-01",
+// "tls-sni-02" and "http-01" challenge types, as well as providing them
+// to a TLS server via tls.Config.
//
// You must specify a cache implementation, such as DirCache,
// to reuse obtained certificates across program restarts.
@@ -150,15 +153,26 @@ type Manager struct {
stateMu sync.Mutex
state map[string]*certState // keyed by domain name
- // tokenCert is keyed by token domain name, which matches server name
- // of ClientHello. Keys always have ".acme.invalid" suffix.
- tokenCertMu sync.RWMutex
- tokenCert map[string]*tls.Certificate
-
// renewal tracks the set of domains currently running renewal timers.
// It is keyed by domain name.
renewalMu sync.Mutex
renewal map[string]*domainRenewal
+
+ // tokensMu guards the rest of the fields: tryHTTP01, certTokens and httpTokens.
+ tokensMu sync.RWMutex
+ // tryHTTP01 indicates whether the Manager should try "http-01" challenge type
+ // during the authorization flow.
+ tryHTTP01 bool
+ // httpTokens contains response body values for http-01 challenges
+ // and is keyed by the URL path at which a challenge response is expected
+ // to be provisioned.
+ // The entries are stored for the duration of the authorization flow.
+ httpTokens map[string][]byte
+ // certTokens contains temporary certificates for tls-sni challenges
+ // and is keyed by token domain name, which matches server name of ClientHello.
+ // Keys always have ".acme.invalid" suffix.
+ // The entries are stored for the duration of the authorization flow.
+ certTokens map[string]*tls.Certificate
}
// GetCertificate implements the tls.Config.GetCertificate hook.
@@ -185,14 +199,16 @@ func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate,
return nil, errors.New("acme/autocert: server name contains invalid character")
}
+ // In the worst-case scenario, the timeout needs to account for caching, host policy,
+ // domain ownership verification and certificate issuance.
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
// check whether this is a token cert requested for TLS-SNI challenge
if strings.HasSuffix(name, ".acme.invalid") {
- m.tokenCertMu.RLock()
- defer m.tokenCertMu.RUnlock()
- if cert := m.tokenCert[name]; cert != nil {
+ m.tokensMu.RLock()
+ defer m.tokensMu.RUnlock()
+ if cert := m.certTokens[name]; cert != nil {
return cert, nil
}
if cert, err := m.cacheGet(ctx, name); err == nil {
@@ -224,6 +240,68 @@ func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate,
return cert, nil
}
+// HTTPHandler configures the Manager to provision ACME "http-01" challenge responses.
+// It returns an http.Handler that responds to the challenges and must be
+// running on port 80. If it receives a request that is not an ACME challenge,
+// it delegates the request to the optional fallback handler.
+//
+// If fallback is nil, the returned handler redirects all GET and HEAD requests
+// to the default TLS port 443 with 302 Found status code, preserving the original
+// request path and query. It responds with 400 Bad Request to all other HTTP methods.
+// The fallback is not protected by the optional HostPolicy.
+//
+// Because the fallback handler is run with unencrypted port 80 requests,
+// the fallback should not serve TLS-only requests.
+//
+// If HTTPHandler is never called, the Manager will only use TLS SNI
+// challenges for domain verification.
+func (m *Manager) HTTPHandler(fallback http.Handler) http.Handler {
+ m.tokensMu.Lock()
+ defer m.tokensMu.Unlock()
+ m.tryHTTP01 = true
+
+ if fallback == nil {
+ fallback = http.HandlerFunc(handleHTTPRedirect)
+ }
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if !strings.HasPrefix(r.URL.Path, "/.well-known/acme-challenge/") {
+ fallback.ServeHTTP(w, r)
+ return
+ }
+ // A reasonable context timeout for cache and host policy only,
+ // because we don't wait for a new certificate issuance here.
+ ctx, cancel := context.WithTimeout(r.Context(), time.Minute)
+ defer cancel()
+ if err := m.hostPolicy()(ctx, r.Host); err != nil {
+ http.Error(w, err.Error(), http.StatusForbidden)
+ return
+ }
+ data, err := m.httpToken(ctx, r.URL.Path)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ w.Write(data)
+ })
+}
+
+func handleHTTPRedirect(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "GET" && r.Method != "HEAD" {
+ http.Error(w, "Use HTTPS", http.StatusBadRequest)
+ return
+ }
+ target := "https://" + stripPort(r.Host) + r.URL.RequestURI()
+ http.Redirect(w, r, target, http.StatusFound)
+}
+
+func stripPort(hostport string) string {
+ host, _, err := net.SplitHostPort(hostport)
+ if err != nil {
+ return hostport
+ }
+ return net.JoinHostPort(host, "443")
+}
+
// cert returns an existing certificate either from m.state or cache.
// If a certificate is found in cache but not in m.state, the latter will be filled
// with the cached value.
@@ -442,13 +520,14 @@ func (m *Manager) certState(domain string) (*certState, error) {
// authorizedCert starts the domain ownership verification process and requests a new cert upon success.
// The key argument is the certificate private key.
func (m *Manager) authorizedCert(ctx context.Context, key crypto.Signer, domain string) (der [][]byte, leaf *x509.Certificate, err error) {
- if err := m.verify(ctx, domain); err != nil {
- return nil, nil, err
- }
client, err := m.acmeClient(ctx)
if err != nil {
return nil, nil, err
}
+
+ if err := m.verify(ctx, client, domain); err != nil {
+ return nil, nil, err
+ }
csr, err := certRequest(key, domain)
if err != nil {
return nil, nil, err
@@ -464,98 +543,171 @@ func (m *Manager) authorizedCert(ctx context.Context, key crypto.Signer, domain
return der, leaf, nil
}
-// verify starts a new identifier (domain) authorization flow.
-// It prepares a challenge response and then blocks until the authorization
-// is marked as "completed" by the CA (either succeeded or failed).
-//
-// verify returns nil iff the verification was successful.
-func (m *Manager) verify(ctx context.Context, domain string) error {
- client, err := m.acmeClient(ctx)
- if err != nil {
- return err
- }
-
- // start domain authorization and get the challenge
- authz, err := client.Authorize(ctx, domain)
- if err != nil {
- return err
- }
- // maybe don't need to at all
- if authz.Status == acme.StatusValid {
- return nil
- }
+// verify runs the identifier (domain) authorization flow
+// using each applicable ACME challenge type.
+func (m *Manager) verify(ctx context.Context, client *acme.Client, domain string) error {
+ // The list of challenge types we'll try to fulfill
+ // in this specific order.
+ challengeTypes := []string{"tls-sni-02", "tls-sni-01"}
+ m.tokensMu.RLock()
+ if m.tryHTTP01 {
+ challengeTypes = append(challengeTypes, "http-01")
+ }
+ m.tokensMu.RUnlock()
+
+ var nextTyp int // challengeType index of the next challenge type to try
+ for {
+ // Start domain authorization and get the challenge.
+ authz, err := client.Authorize(ctx, domain)
+ if err != nil {
+ return err
+ }
+ // No point in accepting challenges if the authorization status
+ // is in a final state.
+ switch authz.Status {
+ case acme.StatusValid:
+ return nil // already authorized
+ case acme.StatusInvalid:
+ return fmt.Errorf("acme/autocert: invalid authorization %q", authz.URI)
+ }
- // pick a challenge: prefer tls-sni-02 over tls-sni-01
- // TODO: consider authz.Combinations
- var chal *acme.Challenge
- for _, c := range authz.Challenges {
- if c.Type == "tls-sni-02" {
- chal = c
- break
+ // Pick the next preferred challenge.
+ var chal *acme.Challenge
+ for chal == nil && nextTyp < len(challengeTypes) {
+ chal = pickChallenge(challengeTypes[nextTyp], authz.Challenges)
+ nextTyp++
}
- if c.Type == "tls-sni-01" {
- chal = c
+ if chal == nil {
+ return fmt.Errorf("acme/autocert: unable to authorize %q; tried %q", domain, challengeTypes)
+ }
+ cleanup, err := m.fulfill(ctx, client, chal)
+ if err != nil {
+ continue
+ }
+ defer cleanup()
+ if _, err := client.Accept(ctx, chal); err != nil {
+ continue
+ }
+
+ // A challenge is fulfilled and accepted: wait for the CA to validate.
+ if _, err := client.WaitAuthorization(ctx, authz.URI); err == nil {
+ return nil
}
}
- if chal == nil {
- return errors.New("acme/autocert: no supported challenge type found")
- }
+}
- // create a token cert for the challenge response
- var (
- cert tls.Certificate
- name string
- )
+// fulfill provisions a response to the challenge chal.
+// The cleanup is non-nil only if provisioning succeeded.
+func (m *Manager) fulfill(ctx context.Context, client *acme.Client, chal *acme.Challenge) (cleanup func(), err error) {
switch chal.Type {
case "tls-sni-01":
- cert, name, err = client.TLSSNI01ChallengeCert(chal.Token)
+ cert, name, err := client.TLSSNI01ChallengeCert(chal.Token)
+ if err != nil {
+ return nil, err
+ }
+ m.putCertToken(ctx, name, &cert)
+ return func() { go m.deleteCertToken(name) }, nil
case "tls-sni-02":
- cert, name, err = client.TLSSNI02ChallengeCert(chal.Token)
- default:
- err = fmt.Errorf("acme/autocert: unknown challenge type %q", chal.Type)
- }
- if err != nil {
- return err
+ cert, name, err := client.TLSSNI02ChallengeCert(chal.Token)
+ if err != nil {
+ return nil, err
+ }
+ m.putCertToken(ctx, name, &cert)
+ return func() { go m.deleteCertToken(name) }, nil
+ case "http-01":
+ resp, err := client.HTTP01ChallengeResponse(chal.Token)
+ if err != nil {
+ return nil, err
+ }
+ p := client.HTTP01ChallengePath(chal.Token)
+ m.putHTTPToken(ctx, p, resp)
+ return func() { go m.deleteHTTPToken(p) }, nil
}
- m.putTokenCert(ctx, name, &cert)
- defer func() {
- // verification has ended at this point
- // don't need token cert anymore
- go m.deleteTokenCert(name)
- }()
+ return nil, fmt.Errorf("acme/autocert: unknown challenge type %q", chal.Type)
+}
- // ready to fulfill the challenge
- if _, err := client.Accept(ctx, chal); err != nil {
- return err
+func pickChallenge(typ string, chal []*acme.Challenge) *acme.Challenge {
+ for _, c := range chal {
+ if c.Type == typ {
+ return c
+ }
}
- // wait for the CA to validate
- _, err = client.WaitAuthorization(ctx, authz.URI)
- return err
+ return nil
}
-// putTokenCert stores the cert under the named key in both m.tokenCert map
+// putCertToken stores the cert under the named key in both m.certTokens map
// and m.Cache.
-func (m *Manager) putTokenCert(ctx context.Context, name string, cert *tls.Certificate) {
- m.tokenCertMu.Lock()
- defer m.tokenCertMu.Unlock()
- if m.tokenCert == nil {
- m.tokenCert = make(map[string]*tls.Certificate)
+func (m *Manager) putCertToken(ctx context.Context, name string, cert *tls.Certificate) {
+ m.tokensMu.Lock()
+ defer m.tokensMu.Unlock()
+ if m.certTokens == nil {
+ m.certTokens = make(map[string]*tls.Certificate)
}
- m.tokenCert[name] = cert
+ m.certTokens[name] = cert
m.cachePut(ctx, name, cert)
}
-// deleteTokenCert removes the token certificate for the specified domain name
-// from both m.tokenCert map and m.Cache.
-func (m *Manager) deleteTokenCert(name string) {
- m.tokenCertMu.Lock()
- defer m.tokenCertMu.Unlock()
- delete(m.tokenCert, name)
+// deleteCertToken removes the token certificate for the specified domain name
+// from both m.certTokens map and m.Cache.
+func (m *Manager) deleteCertToken(name string) {
+ m.tokensMu.Lock()
+ defer m.tokensMu.Unlock()
+ delete(m.certTokens, name)
if m.Cache != nil {
m.Cache.Delete(context.Background(), name)
}
}
+// httpToken retrieves an existing http-01 token value from an in-memory map
+// or the optional cache.
+func (m *Manager) httpToken(ctx context.Context, tokenPath string) ([]byte, error) {
+ m.tokensMu.RLock()
+ defer m.tokensMu.RUnlock()
+ if v, ok := m.httpTokens[tokenPath]; ok {
+ return v, nil
+ }
+ if m.Cache == nil {
+ return nil, fmt.Errorf("acme/autocert: no token at %q", tokenPath)
+ }
+ return m.Cache.Get(ctx, httpTokenCacheKey(tokenPath))
+}
+
+// putHTTPToken stores an http-01 token value using tokenPath as key
+// in both in-memory map and the optional Cache.
+//
+// It ignores any error returned from Cache.Put.
+func (m *Manager) putHTTPToken(ctx context.Context, tokenPath, val string) {
+ m.tokensMu.Lock()
+ defer m.tokensMu.Unlock()
+ if m.httpTokens == nil {
+ m.httpTokens = make(map[string][]byte)
+ }
+ b := []byte(val)
+ m.httpTokens[tokenPath] = b
+ if m.Cache != nil {
+ m.Cache.Put(ctx, httpTokenCacheKey(tokenPath), b)
+ }
+}
+
+// deleteHTTPToken removes an http-01 token value from both in-memory map
+// and the optional Cache, ignoring any error returned from the latter.
+//
+// If m.Cache is non-nil, it blocks until Cache.Delete returns without a timeout.
+func (m *Manager) deleteHTTPToken(tokenPath string) {
+ m.tokensMu.Lock()
+ defer m.tokensMu.Unlock()
+ delete(m.httpTokens, tokenPath)
+ if m.Cache != nil {
+ m.Cache.Delete(context.Background(), httpTokenCacheKey(tokenPath))
+ }
+}
+
+// httpTokenCacheKey returns a key at which an http-01 token value may be stored
+// in the Manager's optional Cache.
+func httpTokenCacheKey(tokenPath string) string {
+ return "http-01-" + path.Base(tokenPath)
+}
+
// renew starts a cert renewal timer loop, one per domain.
//
// The loop is scheduled in two cases:
diff --git a/vendor/golang.org/x/crypto/acme/autocert/autocert_test.go b/vendor/golang.org/x/crypto/acme/autocert/autocert_test.go
index 43a62011a..2da1912e9 100644
--- a/vendor/golang.org/x/crypto/acme/autocert/autocert_test.go
+++ b/vendor/golang.org/x/crypto/acme/autocert/autocert_test.go
@@ -23,6 +23,7 @@ import (
"net/http"
"net/http/httptest"
"reflect"
+ "strings"
"sync"
"testing"
"time"
@@ -48,6 +49,16 @@ var authzTmpl = template.Must(template.New("authz").Parse(`{
"uri": "{{.}}/challenge/2",
"type": "tls-sni-02",
"token": "token-02"
+ },
+ {
+ "uri": "{{.}}/challenge/dns-01",
+ "type": "dns-01",
+ "token": "token-dns-01"
+ },
+ {
+ "uri": "{{.}}/challenge/http-01",
+ "type": "http-01",
+ "token": "token-http-01"
}
]
}`))
@@ -419,6 +430,146 @@ func testGetCertificate(t *testing.T, man *Manager, domain string, hello *tls.Cl
}
+func TestVerifyHTTP01(t *testing.T) {
+ var (
+ http01 http.Handler
+
+ authzCount int // num. of created authorizations
+ didAcceptHTTP01 bool
+ )
+
+ verifyHTTPToken := func() {
+ r := httptest.NewRequest("GET", "/.well-known/acme-challenge/token-http-01", nil)
+ w := httptest.NewRecorder()
+ http01.ServeHTTP(w, r)
+ if w.Code != http.StatusOK {
+ t.Errorf("http token: w.Code = %d; want %d", w.Code, http.StatusOK)
+ }
+ if v := string(w.Body.Bytes()); !strings.HasPrefix(v, "token-http-01.") {
+ t.Errorf("http token value = %q; want 'token-http-01.' prefix", v)
+ }
+ }
+
+ // ACME CA server stub, only the needed bits.
+ // TODO: Merge this with startACMEServerStub, making it a configurable CA for testing.
+ var ca *httptest.Server
+ ca = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Replay-Nonce", "nonce")
+ if r.Method == "HEAD" {
+ // a nonce request
+ return
+ }
+
+ switch r.URL.Path {
+ // Discovery.
+ case "/":
+ if err := discoTmpl.Execute(w, ca.URL); err != nil {
+ t.Errorf("discoTmpl: %v", err)
+ }
+ // Client key registration.
+ case "/new-reg":
+ w.Write([]byte("{}"))
+ // New domain authorization.
+ case "/new-authz":
+ authzCount++
+ w.Header().Set("Location", fmt.Sprintf("%s/authz/%d", ca.URL, authzCount))
+ w.WriteHeader(http.StatusCreated)
+ if err := authzTmpl.Execute(w, ca.URL); err != nil {
+ t.Errorf("authzTmpl: %v", err)
+ }
+ // Accept tls-sni-02.
+ case "/challenge/2":
+ w.Write([]byte("{}"))
+ // Reject tls-sni-01.
+ case "/challenge/1":
+ http.Error(w, "won't accept tls-sni-01", http.StatusBadRequest)
+ // Should not accept dns-01.
+ case "/challenge/dns-01":
+ t.Errorf("dns-01 challenge was accepted")
+ http.Error(w, "won't accept dns-01", http.StatusBadRequest)
+ // Accept http-01.
+ case "/challenge/http-01":
+ didAcceptHTTP01 = true
+ verifyHTTPToken()
+ w.Write([]byte("{}"))
+ // Authorization statuses.
+ // Make tls-sni-xxx invalid.
+ case "/authz/1", "/authz/2":
+ w.Write([]byte(`{"status": "invalid"}`))
+ case "/authz/3", "/authz/4":
+ w.Write([]byte(`{"status": "valid"}`))
+ default:
+ http.NotFound(w, r)
+ t.Errorf("unrecognized r.URL.Path: %s", r.URL.Path)
+ }
+ }))
+ defer ca.Close()
+
+ key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+ m := &Manager{
+ Client: &acme.Client{
+ Key: key,
+ DirectoryURL: ca.URL,
+ },
+ }
+ http01 = m.HTTPHandler(nil)
+ if err := m.verify(context.Background(), m.Client, "example.org"); err != nil {
+ t.Errorf("m.verify: %v", err)
+ }
+ // Only tls-sni-01, tls-sni-02 and http-01 must be accepted
+ // The dns-01 challenge is unsupported.
+ if authzCount != 3 {
+ t.Errorf("authzCount = %d; want 3", authzCount)
+ }
+ if !didAcceptHTTP01 {
+ t.Error("did not accept http-01 challenge")
+ }
+}
+
+func TestHTTPHandlerDefaultFallback(t *testing.T) {
+ tt := []struct {
+ method, url string
+ wantCode int
+ wantLocation string
+ }{
+ {"GET", "http://example.org", 302, "https://example.org/"},
+ {"GET", "http://example.org/foo", 302, "https://example.org/foo"},
+ {"GET", "http://example.org/foo/bar/", 302, "https://example.org/foo/bar/"},
+ {"GET", "http://example.org/?a=b", 302, "https://example.org/?a=b"},
+ {"GET", "http://example.org/foo?a=b", 302, "https://example.org/foo?a=b"},
+ {"GET", "http://example.org:80/foo?a=b", 302, "https://example.org:443/foo?a=b"},
+ {"GET", "http://example.org:80/foo%20bar", 302, "https://example.org:443/foo%20bar"},
+ {"GET", "http://[2602:d1:xxxx::c60a]:1234", 302, "https://[2602:d1:xxxx::c60a]:443/"},
+ {"GET", "http://[2602:d1:xxxx::c60a]", 302, "https://[2602:d1:xxxx::c60a]/"},
+ {"GET", "http://[2602:d1:xxxx::c60a]/foo?a=b", 302, "https://[2602:d1:xxxx::c60a]/foo?a=b"},
+ {"HEAD", "http://example.org", 302, "https://example.org/"},
+ {"HEAD", "http://example.org/foo", 302, "https://example.org/foo"},
+ {"HEAD", "http://example.org/foo/bar/", 302, "https://example.org/foo/bar/"},
+ {"HEAD", "http://example.org/?a=b", 302, "https://example.org/?a=b"},
+ {"HEAD", "http://example.org/foo?a=b", 302, "https://example.org/foo?a=b"},
+ {"POST", "http://example.org", 400, ""},
+ {"PUT", "http://example.org", 400, ""},
+ {"GET", "http://example.org/.well-known/acme-challenge/x", 404, ""},
+ }
+ var m Manager
+ h := m.HTTPHandler(nil)
+ for i, test := range tt {
+ r := httptest.NewRequest(test.method, test.url, nil)
+ w := httptest.NewRecorder()
+ h.ServeHTTP(w, r)
+ if w.Code != test.wantCode {
+ t.Errorf("%d: w.Code = %d; want %d", i, w.Code, test.wantCode)
+ t.Errorf("%d: body: %s", i, w.Body.Bytes())
+ }
+ if v := w.Header().Get("Location"); v != test.wantLocation {
+ t.Errorf("%d: Location = %q; want %q", i, v, test.wantLocation)
+ }
+ }
+}
+
func TestAccountKeyCache(t *testing.T) {
m := Manager{Cache: newMemCache()}
ctx := context.Background()
diff --git a/vendor/golang.org/x/crypto/acme/autocert/example_test.go b/vendor/golang.org/x/crypto/acme/autocert/example_test.go
index 71d61eb1c..552a62549 100644
--- a/vendor/golang.org/x/crypto/acme/autocert/example_test.go
+++ b/vendor/golang.org/x/crypto/acme/autocert/example_test.go
@@ -22,11 +22,12 @@ func ExampleNewListener() {
}
func ExampleManager() {
- m := autocert.Manager{
+ m := &autocert.Manager{
Cache: autocert.DirCache("secret-dir"),
Prompt: autocert.AcceptTOS,
HostPolicy: autocert.HostWhitelist("example.org"),
}
+ go http.ListenAndServe(":http", m.HTTPHandler(nil))
s := &http.Server{
Addr: ":https",
TLSConfig: &tls.Config{GetCertificate: m.GetCertificate},
diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go
index 583ac4be2..bb2b0d8b4 100644
--- a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go
+++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build amd64,!gccgo,!appengine
+
package argon2
func init() {
diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go
index e67c5e0aa..30a49fdf2 100644
--- a/vendor/golang.org/x/crypto/ssh/cipher.go
+++ b/vendor/golang.org/x/crypto/ssh/cipher.go
@@ -16,6 +16,9 @@ import (
"hash"
"io"
"io/ioutil"
+
+ "golang.org/x/crypto/internal/chacha20"
+ "golang.org/x/crypto/poly1305"
)
const (
@@ -53,78 +56,78 @@ func newRC4(key, iv []byte) (cipher.Stream, error) {
return rc4.NewCipher(key)
}
-type streamCipherMode struct {
- keySize int
- ivSize int
- skip int
- createFunc func(key, iv []byte) (cipher.Stream, error)
+type cipherMode struct {
+ keySize int
+ ivSize int
+ create func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error)
}
-func (c *streamCipherMode) createStream(key, iv []byte) (cipher.Stream, error) {
- if len(key) < c.keySize {
- panic("ssh: key length too small for cipher")
- }
- if len(iv) < c.ivSize {
- panic("ssh: iv too small for cipher")
- }
-
- stream, err := c.createFunc(key[:c.keySize], iv[:c.ivSize])
- if err != nil {
- return nil, err
- }
+func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, error)) func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
+ return func(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
+ stream, err := createFunc(key, iv)
+ if err != nil {
+ return nil, err
+ }
- var streamDump []byte
- if c.skip > 0 {
- streamDump = make([]byte, 512)
- }
+ var streamDump []byte
+ if skip > 0 {
+ streamDump = make([]byte, 512)
+ }
- for remainingToDump := c.skip; remainingToDump > 0; {
- dumpThisTime := remainingToDump
- if dumpThisTime > len(streamDump) {
- dumpThisTime = len(streamDump)
+ for remainingToDump := skip; remainingToDump > 0; {
+ dumpThisTime := remainingToDump
+ if dumpThisTime > len(streamDump) {
+ dumpThisTime = len(streamDump)
+ }
+ stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime])
+ remainingToDump -= dumpThisTime
}
- stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime])
- remainingToDump -= dumpThisTime
- }
- return stream, nil
+ mac := macModes[algs.MAC].new(macKey)
+ return &streamPacketCipher{
+ mac: mac,
+ etm: macModes[algs.MAC].etm,
+ macResult: make([]byte, mac.Size()),
+ cipher: stream,
+ }, nil
+ }
}
// cipherModes documents properties of supported ciphers. Ciphers not included
// are not supported and will not be negotiated, even if explicitly requested in
// ClientConfig.Crypto.Ciphers.
-var cipherModes = map[string]*streamCipherMode{
+var cipherModes = map[string]*cipherMode{
// Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms
// are defined in the order specified in the RFC.
- "aes128-ctr": {16, aes.BlockSize, 0, newAESCTR},
- "aes192-ctr": {24, aes.BlockSize, 0, newAESCTR},
- "aes256-ctr": {32, aes.BlockSize, 0, newAESCTR},
+ "aes128-ctr": {16, aes.BlockSize, streamCipherMode(0, newAESCTR)},
+ "aes192-ctr": {24, aes.BlockSize, streamCipherMode(0, newAESCTR)},
+ "aes256-ctr": {32, aes.BlockSize, streamCipherMode(0, newAESCTR)},
// Ciphers from RFC4345, which introduces security-improved arcfour ciphers.
// They are defined in the order specified in the RFC.
- "arcfour128": {16, 0, 1536, newRC4},
- "arcfour256": {32, 0, 1536, newRC4},
+ "arcfour128": {16, 0, streamCipherMode(1536, newRC4)},
+ "arcfour256": {32, 0, streamCipherMode(1536, newRC4)},
// Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol.
// Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and
// RC4) has problems with weak keys, and should be used with caution."
// RFC4345 introduces improved versions of Arcfour.
- "arcfour": {16, 0, 0, newRC4},
+ "arcfour": {16, 0, streamCipherMode(0, newRC4)},
- // AES-GCM is not a stream cipher, so it is constructed with a
- // special case. If we add any more non-stream ciphers, we
- // should invest a cleaner way to do this.
- gcmCipherID: {16, 12, 0, nil},
+ // AEAD ciphers
+ gcmCipherID: {16, 12, newGCMCipher},
+ chacha20Poly1305ID: {64, 0, newChaCha20Cipher},
// CBC mode is insecure and so is not included in the default config.
// (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely
// needed, it's possible to specify a custom Config to enable it.
// You should expect that an active attacker can recover plaintext if
// you do.
- aes128cbcID: {16, aes.BlockSize, 0, nil},
+ aes128cbcID: {16, aes.BlockSize, newAESCBCCipher},
- // 3des-cbc is insecure and is disabled by default.
- tripledescbcID: {24, des.BlockSize, 0, nil},
+ // 3des-cbc is insecure and is not included in the default
+ // config.
+ tripledescbcID: {24, des.BlockSize, newTripleDESCBCCipher},
}
// prefixLen is the length of the packet prefix that contains the packet length
@@ -304,7 +307,7 @@ type gcmCipher struct {
buf []byte
}
-func newGCMCipher(iv, key []byte) (packetCipher, error) {
+func newGCMCipher(key, iv, unusedMacKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) {
c, err := aes.NewCipher(key)
if err != nil {
return nil, err
@@ -422,7 +425,7 @@ type cbcCipher struct {
oracleCamouflage uint32
}
-func newCBCCipher(c cipher.Block, iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
+func newCBCCipher(c cipher.Block, key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
cbc := &cbcCipher{
mac: macModes[algs.MAC].new(macKey),
decrypter: cipher.NewCBCDecrypter(c, iv),
@@ -436,13 +439,13 @@ func newCBCCipher(c cipher.Block, iv, key, macKey []byte, algs directionAlgorith
return cbc, nil
}
-func newAESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
+func newAESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
c, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
- cbc, err := newCBCCipher(c, iv, key, macKey, algs)
+ cbc, err := newCBCCipher(c, key, iv, macKey, algs)
if err != nil {
return nil, err
}
@@ -450,13 +453,13 @@ func newAESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCi
return cbc, nil
}
-func newTripleDESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
+func newTripleDESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
c, err := des.NewTripleDESCipher(key)
if err != nil {
return nil, err
}
- cbc, err := newCBCCipher(c, iv, key, macKey, algs)
+ cbc, err := newCBCCipher(c, key, iv, macKey, algs)
if err != nil {
return nil, err
}
@@ -627,3 +630,142 @@ func (c *cbcCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, pack
return nil
}
+
+const chacha20Poly1305ID = "chacha20-poly1305@openssh.com"
+
+// chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com
+// AEAD, which is described here:
+//
+// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00
+//
+// the methods here also implement padding, which RFC4253 Section 6
+// also requires of stream ciphers.
+type chacha20Poly1305Cipher struct {
+ lengthKey [32]byte
+ contentKey [32]byte
+ buf []byte
+}
+
+func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) {
+ if len(key) != 64 {
+ panic(len(key))
+ }
+
+ c := &chacha20Poly1305Cipher{
+ buf: make([]byte, 256),
+ }
+
+ copy(c.contentKey[:], key[:32])
+ copy(c.lengthKey[:], key[32:])
+ return c, nil
+}
+
+// The Poly1305 key is obtained by encrypting 32 0-bytes.
+var chacha20PolyKeyInput [32]byte
+
+func (c *chacha20Poly1305Cipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
+ var counter [16]byte
+ binary.BigEndian.PutUint64(counter[8:], uint64(seqNum))
+
+ var polyKey [32]byte
+ chacha20.XORKeyStream(polyKey[:], chacha20PolyKeyInput[:], &counter, &c.contentKey)
+
+ encryptedLength := c.buf[:4]
+ if _, err := io.ReadFull(r, encryptedLength); err != nil {
+ return nil, err
+ }
+
+ var lenBytes [4]byte
+ chacha20.XORKeyStream(lenBytes[:], encryptedLength, &counter, &c.lengthKey)
+
+ length := binary.BigEndian.Uint32(lenBytes[:])
+ if length > maxPacket {
+ return nil, errors.New("ssh: invalid packet length, packet too large")
+ }
+
+ contentEnd := 4 + length
+ packetEnd := contentEnd + poly1305.TagSize
+ if uint32(cap(c.buf)) < packetEnd {
+ c.buf = make([]byte, packetEnd)
+ copy(c.buf[:], encryptedLength)
+ } else {
+ c.buf = c.buf[:packetEnd]
+ }
+
+ if _, err := io.ReadFull(r, c.buf[4:packetEnd]); err != nil {
+ return nil, err
+ }
+
+ var mac [poly1305.TagSize]byte
+ copy(mac[:], c.buf[contentEnd:packetEnd])
+ if !poly1305.Verify(&mac, c.buf[:contentEnd], &polyKey) {
+ return nil, errors.New("ssh: MAC failure")
+ }
+
+ counter[0] = 1
+
+ plain := c.buf[4:contentEnd]
+ chacha20.XORKeyStream(plain, plain, &counter, &c.contentKey)
+
+ padding := plain[0]
+ if padding < 4 {
+ // padding is a byte, so it automatically satisfies
+ // the maximum size, which is 255.
+ return nil, fmt.Errorf("ssh: illegal padding %d", padding)
+ }
+
+ if int(padding)+1 >= len(plain) {
+ return nil, fmt.Errorf("ssh: padding %d too large", padding)
+ }
+
+ plain = plain[1 : len(plain)-int(padding)]
+
+ return plain, nil
+}
+
+func (c *chacha20Poly1305Cipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, payload []byte) error {
+ var counter [16]byte
+ binary.BigEndian.PutUint64(counter[8:], uint64(seqNum))
+
+ var polyKey [32]byte
+ chacha20.XORKeyStream(polyKey[:], chacha20PolyKeyInput[:], &counter, &c.contentKey)
+
+ // There is no blocksize, so fall back to multiple of 8 byte
+ // padding, as described in RFC 4253, Sec 6.
+ const packetSizeMultiple = 8
+
+ padding := packetSizeMultiple - (1+len(payload))%packetSizeMultiple
+ if padding < 4 {
+ padding += packetSizeMultiple
+ }
+
+ // size (4 bytes), padding (1), payload, padding, tag.
+ totalLength := 4 + 1 + len(payload) + padding + poly1305.TagSize
+ if cap(c.buf) < totalLength {
+ c.buf = make([]byte, totalLength)
+ } else {
+ c.buf = c.buf[:totalLength]
+ }
+
+ binary.BigEndian.PutUint32(c.buf, uint32(1+len(payload)+padding))
+ chacha20.XORKeyStream(c.buf, c.buf[:4], &counter, &c.lengthKey)
+ c.buf[4] = byte(padding)
+ copy(c.buf[5:], payload)
+ packetEnd := 5 + len(payload) + padding
+ if _, err := io.ReadFull(rand, c.buf[5+len(payload):packetEnd]); err != nil {
+ return err
+ }
+
+ counter[0] = 1
+ chacha20.XORKeyStream(c.buf[4:], c.buf[4:packetEnd], &counter, &c.contentKey)
+
+ var mac [poly1305.TagSize]byte
+ poly1305.Sum(&mac, c.buf[:packetEnd], &polyKey)
+
+ copy(c.buf[packetEnd:], mac[:])
+
+ if _, err := w.Write(c.buf); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/crypto/ssh/cipher_test.go b/vendor/golang.org/x/crypto/ssh/cipher_test.go
index 6a35d8708..a52d6e486 100644
--- a/vendor/golang.org/x/crypto/ssh/cipher_test.go
+++ b/vendor/golang.org/x/crypto/ssh/cipher_test.go
@@ -7,7 +7,6 @@ package ssh
import (
"bytes"
"crypto"
- "crypto/aes"
"crypto/rand"
"testing"
)
@@ -15,7 +14,12 @@ import (
func TestDefaultCiphersExist(t *testing.T) {
for _, cipherAlgo := range supportedCiphers {
if _, ok := cipherModes[cipherAlgo]; !ok {
- t.Errorf("default cipher %q is unknown", cipherAlgo)
+ t.Errorf("supported cipher %q is unknown", cipherAlgo)
+ }
+ }
+ for _, cipherAlgo := range preferredCiphers {
+ if _, ok := cipherModes[cipherAlgo]; !ok {
+ t.Errorf("preferred cipher %q is unknown", cipherAlgo)
}
}
}
@@ -67,9 +71,6 @@ func testPacketCipher(t *testing.T, cipher, mac string) {
}
func TestCBCOracleCounterMeasure(t *testing.T) {
- cipherModes[aes128cbcID] = &streamCipherMode{16, aes.BlockSize, 0, nil}
- defer delete(cipherModes, aes128cbcID)
-
kr := &kexResult{Hash: crypto.SHA1}
algs := directionAlgorithms{
Cipher: aes128cbcID,
diff --git a/vendor/golang.org/x/crypto/ssh/client_test.go b/vendor/golang.org/x/crypto/ssh/client_test.go
index ef95069ef..81f9599e1 100644
--- a/vendor/golang.org/x/crypto/ssh/client_test.go
+++ b/vendor/golang.org/x/crypto/ssh/client_test.go
@@ -5,41 +5,77 @@
package ssh
import (
- "net"
"strings"
"testing"
)
-func testClientVersion(t *testing.T, config *ClientConfig, expected string) {
- clientConn, serverConn := net.Pipe()
- defer clientConn.Close()
- receivedVersion := make(chan string, 1)
- config.HostKeyCallback = InsecureIgnoreHostKey()
- go func() {
- version, err := readVersion(serverConn)
- if err != nil {
- receivedVersion <- ""
- } else {
- receivedVersion <- string(version)
- }
- serverConn.Close()
- }()
- NewClientConn(clientConn, "", config)
- actual := <-receivedVersion
- if actual != expected {
- t.Fatalf("got %s; want %s", actual, expected)
+func TestClientVersion(t *testing.T) {
+ for _, tt := range []struct {
+ name string
+ version string
+ multiLine string
+ wantErr bool
+ }{
+ {
+ name: "default version",
+ version: packageVersion,
+ },
+ {
+ name: "custom version",
+ version: "SSH-2.0-CustomClientVersionString",
+ },
+ {
+ name: "good multi line version",
+ version: packageVersion,
+ multiLine: strings.Repeat("ignored\r\n", 20),
+ },
+ {
+ name: "bad multi line version",
+ version: packageVersion,
+ multiLine: "bad multi line version",
+ wantErr: true,
+ },
+ {
+ name: "long multi line version",
+ version: packageVersion,
+ multiLine: strings.Repeat("long multi line version\r\n", 50)[:256],
+ wantErr: true,
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ c1, c2, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+ defer c1.Close()
+ defer c2.Close()
+ go func() {
+ if tt.multiLine != "" {
+ c1.Write([]byte(tt.multiLine))
+ }
+ NewClientConn(c1, "", &ClientConfig{
+ ClientVersion: tt.version,
+ HostKeyCallback: InsecureIgnoreHostKey(),
+ })
+ c1.Close()
+ }()
+ conf := &ServerConfig{NoClientAuth: true}
+ conf.AddHostKey(testSigners["rsa"])
+ conn, _, _, err := NewServerConn(c2, conf)
+ if err == nil == tt.wantErr {
+ t.Fatalf("got err %v; wantErr %t", err, tt.wantErr)
+ }
+ if tt.wantErr {
+ // Don't verify the version on an expected error.
+ return
+ }
+ if got := string(conn.ClientVersion()); got != tt.version {
+ t.Fatalf("got %q; want %q", got, tt.version)
+ }
+ })
}
}
-func TestCustomClientVersion(t *testing.T) {
- version := "Test-Client-Version-0.0"
- testClientVersion(t, &ClientConfig{ClientVersion: version}, version)
-}
-
-func TestDefaultClientVersion(t *testing.T) {
- testClientVersion(t, &ClientConfig{}, packageVersion)
-}
-
func TestHostKeyCheck(t *testing.T) {
for _, tt := range []struct {
name string
diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go
index 135b4edd7..04f3620b3 100644
--- a/vendor/golang.org/x/crypto/ssh/common.go
+++ b/vendor/golang.org/x/crypto/ssh/common.go
@@ -24,11 +24,21 @@ const (
serviceSSH = "ssh-connection"
)
-// supportedCiphers specifies the supported ciphers in preference order.
+// supportedCiphers lists ciphers we support but might not recommend.
var supportedCiphers = []string{
"aes128-ctr", "aes192-ctr", "aes256-ctr",
"aes128-gcm@openssh.com",
- "arcfour256", "arcfour128",
+ chacha20Poly1305ID,
+ "arcfour256", "arcfour128", "arcfour",
+ aes128cbcID,
+ tripledescbcID,
+}
+
+// preferredCiphers specifies the default preference for ciphers.
+var preferredCiphers = []string{
+ "aes128-gcm@openssh.com",
+ chacha20Poly1305ID,
+ "aes128-ctr", "aes192-ctr", "aes256-ctr",
}
// supportedKexAlgos specifies the supported key-exchange algorithms in
@@ -211,7 +221,7 @@ func (c *Config) SetDefaults() {
c.Rand = rand.Reader
}
if c.Ciphers == nil {
- c.Ciphers = supportedCiphers
+ c.Ciphers = preferredCiphers
}
var ciphers []string
for _, c := range c.Ciphers {
diff --git a/vendor/golang.org/x/crypto/ssh/test/session_test.go b/vendor/golang.org/x/crypto/ssh/test/session_test.go
index 9e702effa..4eb7afde8 100644
--- a/vendor/golang.org/x/crypto/ssh/test/session_test.go
+++ b/vendor/golang.org/x/crypto/ssh/test/session_test.go
@@ -11,6 +11,7 @@ package test
import (
"bytes"
"errors"
+ "fmt"
"io"
"strings"
"testing"
@@ -324,31 +325,59 @@ func TestWindowChange(t *testing.T) {
}
}
+func testOneCipher(t *testing.T, cipher string, cipherOrder []string) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conf := clientConfig()
+ conf.Ciphers = []string{cipher}
+ // Don't fail if sshd doesn't have the cipher.
+ conf.Ciphers = append(conf.Ciphers, cipherOrder...)
+ conn, err := server.TryDial(conf)
+ if err != nil {
+ t.Fatalf("TryDial: %v", err)
+ }
+ defer conn.Close()
+
+ numBytes := 4096
+
+ // Exercise sending data to the server
+ if _, _, err := conn.Conn.SendRequest("drop-me", false, make([]byte, numBytes)); err != nil {
+ t.Fatalf("SendRequest: %v", err)
+ }
+
+ // Exercise receiving data from the server
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("NewSession: %v", err)
+ }
+
+ out, err := session.Output(fmt.Sprintf("dd if=/dev/zero of=/dev/stdout bs=%d count=1", numBytes))
+ if err != nil {
+ t.Fatalf("Output: %v", err)
+ }
+
+ if len(out) != numBytes {
+ t.Fatalf("got %d bytes, want %d bytes", len(out), numBytes)
+ }
+}
+
+var deprecatedCiphers = []string{
+ "aes128-cbc", "3des-cbc",
+ "arcfour128", "arcfour256",
+}
+
func TestCiphers(t *testing.T) {
var config ssh.Config
config.SetDefaults()
- cipherOrder := config.Ciphers
- // These ciphers will not be tested when commented out in cipher.go it will
- // fallback to the next available as per line 292.
- cipherOrder = append(cipherOrder, "aes128-cbc", "3des-cbc")
+ cipherOrder := append(config.Ciphers, deprecatedCiphers...)
for _, ciph := range cipherOrder {
t.Run(ciph, func(t *testing.T) {
- server := newServer(t)
- defer server.Shutdown()
- conf := clientConfig()
- conf.Ciphers = []string{ciph}
- // Don't fail if sshd doesn't have the cipher.
- conf.Ciphers = append(conf.Ciphers, cipherOrder...)
- conn, err := server.TryDial(conf)
- if err == nil {
- conn.Close()
- } else {
- t.Fatalf("failed for cipher %q", ciph)
- }
+ testOneCipher(t, ciph, cipherOrder)
})
}
}
+
func TestMACs(t *testing.T) {
var config ssh.Config
config.SetDefaults()
diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go
index 01150eb89..f6fae1db4 100644
--- a/vendor/golang.org/x/crypto/ssh/transport.go
+++ b/vendor/golang.org/x/crypto/ssh/transport.go
@@ -6,6 +6,7 @@ package ssh
import (
"bufio"
+ "bytes"
"errors"
"io"
"log"
@@ -232,52 +233,22 @@ var (
clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}}
)
-// generateKeys generates key material for IV, MAC and encryption.
-func generateKeys(d direction, algs directionAlgorithms, kex *kexResult) (iv, key, macKey []byte) {
+// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as
+// described in RFC 4253, section 6.4. direction should either be serverKeys
+// (to setup server->client keys) or clientKeys (for client->server keys).
+func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) {
cipherMode := cipherModes[algs.Cipher]
macMode := macModes[algs.MAC]
- iv = make([]byte, cipherMode.ivSize)
- key = make([]byte, cipherMode.keySize)
- macKey = make([]byte, macMode.keySize)
+ iv := make([]byte, cipherMode.ivSize)
+ key := make([]byte, cipherMode.keySize)
+ macKey := make([]byte, macMode.keySize)
generateKeyMaterial(iv, d.ivTag, kex)
generateKeyMaterial(key, d.keyTag, kex)
generateKeyMaterial(macKey, d.macKeyTag, kex)
- return
-}
-
-// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as
-// described in RFC 4253, section 6.4. direction should either be serverKeys
-// (to setup server->client keys) or clientKeys (for client->server keys).
-func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) {
- iv, key, macKey := generateKeys(d, algs, kex)
-
- if algs.Cipher == gcmCipherID {
- return newGCMCipher(iv, key)
- }
-
- if algs.Cipher == aes128cbcID {
- return newAESCBCCipher(iv, key, macKey, algs)
- }
- if algs.Cipher == tripledescbcID {
- return newTripleDESCBCCipher(iv, key, macKey, algs)
- }
-
- c := &streamPacketCipher{
- mac: macModes[algs.MAC].new(macKey),
- etm: macModes[algs.MAC].etm,
- }
- c.macResult = make([]byte, c.mac.Size())
-
- var err error
- c.cipher, err = cipherModes[algs.Cipher].createStream(key, iv)
- if err != nil {
- return nil, err
- }
-
- return c, nil
+ return cipherModes[algs.Cipher].create(key, iv, macKey, algs)
}
// generateKeyMaterial fills out with key material generated from tag, K, H
@@ -342,7 +313,7 @@ func readVersion(r io.Reader) ([]byte, error) {
var ok bool
var buf [1]byte
- for len(versionString) < maxVersionStringBytes {
+ for length := 0; length < maxVersionStringBytes; length++ {
_, err := io.ReadFull(r, buf[:])
if err != nil {
return nil, err
@@ -350,6 +321,13 @@ func readVersion(r io.Reader) ([]byte, error) {
// The RFC says that the version should be terminated with \r\n
// but several SSH servers actually only send a \n.
if buf[0] == '\n' {
+ if !bytes.HasPrefix(versionString, []byte("SSH-")) {
+ // RFC 4253 says we need to ignore all version string lines
+ // except the one containing the SSH version (provided that
+ // all the lines do not exceed 255 bytes in total).
+ versionString = versionString[:0]
+ continue
+ }
ok = true
break
}
diff --git a/vendor/golang.org/x/crypto/ssh/transport_test.go b/vendor/golang.org/x/crypto/ssh/transport_test.go
index 92d83abf9..8445e1e56 100644
--- a/vendor/golang.org/x/crypto/ssh/transport_test.go
+++ b/vendor/golang.org/x/crypto/ssh/transport_test.go
@@ -13,11 +13,13 @@ import (
)
func TestReadVersion(t *testing.T) {
- longversion := strings.Repeat("SSH-2.0-bla", 50)[:253]
+ longVersion := strings.Repeat("SSH-2.0-bla", 50)[:253]
+ multiLineVersion := strings.Repeat("ignored\r\n", 20) + "SSH-2.0-bla\r\n"
cases := map[string]string{
"SSH-2.0-bla\r\n": "SSH-2.0-bla",
"SSH-2.0-bla\n": "SSH-2.0-bla",
- longversion + "\r\n": longversion,
+ multiLineVersion: "SSH-2.0-bla",
+ longVersion + "\r\n": longVersion,
}
for in, want := range cases {
@@ -33,9 +35,11 @@ func TestReadVersion(t *testing.T) {
}
func TestReadVersionError(t *testing.T) {
- longversion := strings.Repeat("SSH-2.0-bla", 50)[:253]
+ longVersion := strings.Repeat("SSH-2.0-bla", 50)[:253]
+ multiLineVersion := strings.Repeat("ignored\r\n", 50) + "SSH-2.0-bla\r\n"
cases := []string{
- longversion + "too-long\r\n",
+ longVersion + "too-long\r\n",
+ multiLineVersion,
}
for _, in := range cases {
if _, err := readVersion(bytes.NewBufferString(in)); err == nil {
@@ -60,7 +64,7 @@ func TestExchangeVersionsBasic(t *testing.T) {
func TestExchangeVersions(t *testing.T) {
cases := []string{
"not\x000allowed",
- "not allowed\n",
+ "not allowed\x01\r\n",
}
for _, c := range cases {
buf := bytes.NewBufferString("SSH-2.0-bla\r\n")
diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go
index 893e272a9..e3c01d7c9 100644
--- a/vendor/golang.org/x/net/html/token.go
+++ b/vendor/golang.org/x/net/html/token.go
@@ -1161,8 +1161,8 @@ func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) {
return nil, nil, false
}
-// Token returns the next Token. The result's Data and Attr values remain valid
-// after subsequent Next calls.
+// Token returns the current Token. The result's Data and Attr values remain
+// valid after subsequent Next calls.
func (z *Tokenizer) Token() Token {
t := Token{Type: z.tt}
switch z.tt {
diff --git a/vendor/golang.org/x/net/http2/ciphers.go b/vendor/golang.org/x/net/http2/ciphers.go
index 698860b77..c9a0cf3b4 100644
--- a/vendor/golang.org/x/net/http2/ciphers.go
+++ b/vendor/golang.org/x/net/http2/ciphers.go
@@ -5,7 +5,7 @@
package http2
// A list of the possible cipher suite ids. Taken from
-// http://www.iana.org/assignments/tls-parameters/tls-parameters.txt
+// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt
const (
cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000
diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go
index b65fc6d42..088d6e2bd 100644
--- a/vendor/golang.org/x/net/http2/configure_transport.go
+++ b/vendor/golang.org/x/net/http2/configure_transport.go
@@ -73,7 +73,7 @@ type noDialH2RoundTripper struct{ t *Transport }
func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
res, err := rt.t.RoundTrip(req)
- if err == ErrNoCachedConn {
+ if isNoCachedConnError(err) {
return nil, http.ErrSkipAltProtocol
}
return res, err
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index c65f1a397..e6b321f4b 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -306,7 +306,26 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
return
}
-var ErrNoCachedConn = errors.New("http2: no cached connection was available")
+// noCachedConnError is the concrete type of ErrNoCachedConn, which
+// needs to be detected by net/http regardless of whether it's its
+// bundled version (in h2_bundle.go with a rewritten type name) or
+// from a user's x/net/http2. As such, as it has a unique method name
+// (IsHTTP2NoCachedConnError) that net/http sniffs for via func
+// isNoCachedConnError.
+type noCachedConnError struct{}
+
+func (noCachedConnError) IsHTTP2NoCachedConnError() {}
+func (noCachedConnError) Error() string { return "http2: no cached connection was available" }
+
+// isNoCachedConnError reports whether err is of type noCachedConnError
+// or its equivalent renamed type in net/http2's h2_bundle.go. Both types
+// may coexist in the same running program.
+func isNoCachedConnError(err error) bool {
+ _, ok := err.(interface{ IsHTTP2NoCachedConnError() })
+ return ok
+}
+
+var ErrNoCachedConn error = noCachedConnError{}
// RoundTripOpt are options for the Transport.RoundTripOpt method.
type RoundTripOpt struct {
diff --git a/vendor/golang.org/x/net/internal/iana/gen.go b/vendor/golang.org/x/net/internal/iana/gen.go
index 86c78b3bb..2a5c310c2 100644
--- a/vendor/golang.org/x/net/internal/iana/gen.go
+++ b/vendor/golang.org/x/net/internal/iana/gen.go
@@ -28,15 +28,15 @@ var registries = []struct {
parse func(io.Writer, io.Reader) error
}{
{
- "http://www.iana.org/assignments/dscp-registry/dscp-registry.xml",
+ "https://www.iana.org/assignments/dscp-registry/dscp-registry.xml",
parseDSCPRegistry,
},
{
- "http://www.iana.org/assignments/ipv4-tos-byte/ipv4-tos-byte.xml",
+ "https://www.iana.org/assignments/ipv4-tos-byte/ipv4-tos-byte.xml",
parseTOSTCByte,
},
{
- "http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml",
+ "https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml",
parseProtocolNumbers,
},
}
diff --git a/vendor/golang.org/x/net/ipv4/gen.go b/vendor/golang.org/x/net/ipv4/gen.go
index ffb44fe68..9d490fac9 100644
--- a/vendor/golang.org/x/net/ipv4/gen.go
+++ b/vendor/golang.org/x/net/ipv4/gen.go
@@ -72,7 +72,7 @@ var registries = []struct {
parse func(io.Writer, io.Reader) error
}{
{
- "http://www.iana.org/assignments/icmp-parameters/icmp-parameters.xml",
+ "https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xml",
parseICMPv4Parameters,
},
}
diff --git a/vendor/golang.org/x/net/ipv6/gen.go b/vendor/golang.org/x/net/ipv6/gen.go
index 41886ec72..47b7e9f0a 100644
--- a/vendor/golang.org/x/net/ipv6/gen.go
+++ b/vendor/golang.org/x/net/ipv6/gen.go
@@ -72,7 +72,7 @@ var registries = []struct {
parse func(io.Writer, io.Reader) error
}{
{
- "http://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xml",
+ "https://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xml",
parseICMPv6Parameters,
},
}
diff --git a/vendor/golang.org/x/sys/plan9/asm_plan9_arm.s b/vendor/golang.org/x/sys/plan9/asm_plan9_arm.s
new file mode 100644
index 000000000..afb7c0a9b
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/asm_plan9_arm.s
@@ -0,0 +1,25 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// System call support for plan9 on arm
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-32
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-44
+ JMP syscall·Syscall6(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·seek(SB),NOSPLIT,$0-36
+ JMP syscall·exit(SB)
diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go
new file mode 100644
index 000000000..8dd87239a
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go
@@ -0,0 +1,284 @@
+// mksyscall.pl -l32 -plan9 -tags plan9,arm syscall_plan9.go
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+
+// +build plan9,arm
+
+package plan9
+
+import "unsafe"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fd2path(fd int, buf []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_FD2PATH, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pipe(p *[2]int32) (err error) {
+ r0, _, e1 := Syscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func await(s []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(s) > 0 {
+ _p0 = unsafe.Pointer(&s[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_AWAIT, uintptr(_p0), uintptr(len(s)), 0)
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func open(path string, mode int) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ fd = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func create(path string, mode int, perm uint32) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
+ fd = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func remove(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func stat(path string, edir []byte) (n int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(edir) > 0 {
+ _p1 = unsafe.Pointer(&edir[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir)))
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func bind(name string, old string, flag int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(name)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(old)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_BIND, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mount(fd int, afd int, old string, flag int, aname string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(old)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(aname)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall6(SYS_MOUNT, uintptr(fd), uintptr(afd), uintptr(unsafe.Pointer(_p0)), uintptr(flag), uintptr(unsafe.Pointer(_p1)), 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func wstat(path string, edir []byte) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(edir) > 0 {
+ _p1 = unsafe.Pointer(&edir[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_WSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir)))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func chdir(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup(oldfd int, newfd int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), uintptr(newfd), 0)
+ fd = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pread(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0)
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0)
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Close(fd int) (err error) {
+ r0, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstat(fd int, edir []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(edir) > 0 {
+ _p0 = unsafe.Pointer(&edir[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir)))
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fwstat(fd int, edir []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(edir) > 0 {
+ _p0 = unsafe.Pointer(&edir[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_FWSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir)))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go
index bd475812b..95fd35317 100644
--- a/vendor/golang.org/x/sys/unix/dirent.go
+++ b/vendor/golang.org/x/sys/unix/dirent.go
@@ -6,97 +6,12 @@
package unix
-import "unsafe"
-
-// readInt returns the size-bytes unsigned integer in native byte order at offset off.
-func readInt(b []byte, off, size uintptr) (u uint64, ok bool) {
- if len(b) < int(off+size) {
- return 0, false
- }
- if isBigEndian {
- return readIntBE(b[off:], size), true
- }
- return readIntLE(b[off:], size), true
-}
-
-func readIntBE(b []byte, size uintptr) uint64 {
- switch size {
- case 1:
- return uint64(b[0])
- case 2:
- _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[1]) | uint64(b[0])<<8
- case 4:
- _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24
- case 8:
- _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
- uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
- default:
- panic("syscall: readInt with unsupported size")
- }
-}
-
-func readIntLE(b []byte, size uintptr) uint64 {
- switch size {
- case 1:
- return uint64(b[0])
- case 2:
- _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[0]) | uint64(b[1])<<8
- case 4:
- _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24
- case 8:
- _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
- uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- default:
- panic("syscall: readInt with unsupported size")
- }
-}
+import "syscall"
// ParseDirent parses up to max directory entries in buf,
// appending the names to names. It returns the number of
// bytes consumed from buf, the number of entries added
// to names, and the new names slice.
func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
- origlen := len(buf)
- count = 0
- for max != 0 && len(buf) > 0 {
- reclen, ok := direntReclen(buf)
- if !ok || reclen > uint64(len(buf)) {
- return origlen, count, names
- }
- rec := buf[:reclen]
- buf = buf[reclen:]
- ino, ok := direntIno(rec)
- if !ok {
- break
- }
- if ino == 0 { // File absent in directory.
- continue
- }
- const namoff = uint64(unsafe.Offsetof(Dirent{}.Name))
- namlen, ok := direntNamlen(rec)
- if !ok || namoff+namlen > uint64(len(rec)) {
- break
- }
- name := rec[namoff : namoff+namlen]
- for i, c := range name {
- if c == 0 {
- name = name[:i]
- break
- }
- }
- // Check for useless names before allocating a string.
- if string(name) == "." || string(name) == ".." {
- continue
- }
- max--
- count++
- names = append(names, string(name))
- }
- return origlen - len(buf), count, names
+ return syscall.ParseDirent(buf, max, names)
}
diff --git a/vendor/golang.org/x/sys/unix/linux/types.go b/vendor/golang.org/x/sys/unix/linux/types.go
index 929bbbf50..f429c8f78 100644
--- a/vendor/golang.org/x/sys/unix/linux/types.go
+++ b/vendor/golang.org/x/sys/unix/linux/types.go
@@ -47,11 +47,12 @@ package unix
#include <sys/utsname.h>
#include <sys/wait.h>
#include <linux/filter.h>
+#include <linux/icmpv6.h>
#include <linux/keyctl.h>
#include <linux/netlink.h>
#include <linux/perf_event.h>
#include <linux/rtnetlink.h>
-#include <linux/icmpv6.h>
+#include <linux/stat.h>
#include <asm/termbits.h>
#include <asm/ptrace.h>
#include <time.h>
@@ -116,6 +117,21 @@ struct stat {
#endif
+// These are defined in linux/fcntl.h, but including it globally causes
+// conflicts with fcntl.h
+#ifndef AT_STATX_SYNC_TYPE
+# define AT_STATX_SYNC_TYPE 0x6000 // Type of synchronisation required from statx()
+#endif
+#ifndef AT_STATX_SYNC_AS_STAT
+# define AT_STATX_SYNC_AS_STAT 0x0000 // - Do whatever stat() does
+#endif
+#ifndef AT_STATX_FORCE_SYNC
+# define AT_STATX_FORCE_SYNC 0x2000 // - Force the attributes to be sync'd with the server
+#endif
+#ifndef AT_STATX_DONT_SYNC
+# define AT_STATX_DONT_SYNC 0x4000 // - Don't sync attributes with the server
+#endif
+
#ifdef TCSETS2
// On systems that have "struct termios2" use this as type Termios.
typedef struct termios2 termios_t;
@@ -146,7 +162,21 @@ struct sockaddr_hci {
sa_family_t hci_family;
unsigned short hci_dev;
unsigned short hci_channel;
-};;
+};
+
+// copied from /usr/include/bluetooth/bluetooth.h
+#define BDADDR_BREDR 0x00
+#define BDADDR_LE_PUBLIC 0x01
+#define BDADDR_LE_RANDOM 0x02
+
+// copied from /usr/include/bluetooth/l2cap.h
+struct sockaddr_l2 {
+ sa_family_t l2_family;
+ unsigned short l2_psm;
+ uint8_t l2_bdaddr[6];
+ unsigned short l2_cid;
+ uint8_t l2_bdaddr_type;
+};
// copied from /usr/include/linux/un.h
struct my_sockaddr_un {
@@ -249,6 +279,10 @@ type Stat_t C.struct_stat
type Statfs_t C.struct_statfs
+type StatxTimestamp C.struct_statx_timestamp
+
+type Statx_t C.struct_statx
+
type Dirent C.struct_dirent
type Fsid C.fsid_t
@@ -290,6 +324,8 @@ type RawSockaddrNetlink C.struct_sockaddr_nl
type RawSockaddrHCI C.struct_sockaddr_hci
+type RawSockaddrL2 C.struct_sockaddr_l2
+
type RawSockaddrCAN C.struct_sockaddr_can
type RawSockaddrALG C.struct_sockaddr_alg
@@ -338,6 +374,7 @@ const (
SizeofSockaddrLinklayer = C.sizeof_struct_sockaddr_ll
SizeofSockaddrNetlink = C.sizeof_struct_sockaddr_nl
SizeofSockaddrHCI = C.sizeof_struct_sockaddr_hci
+ SizeofSockaddrL2 = C.sizeof_struct_sockaddr_l2
SizeofSockaddrCAN = C.sizeof_struct_sockaddr_can
SizeofSockaddrALG = C.sizeof_struct_sockaddr_alg
SizeofSockaddrVM = C.sizeof_struct_sockaddr_vm
@@ -513,9 +550,15 @@ type Ustat_t C.struct_ustat
type EpollEvent C.struct_my_epoll_event
const (
- AT_FDCWD = C.AT_FDCWD
- AT_NO_AUTOMOUNT = C.AT_NO_AUTOMOUNT
- AT_REMOVEDIR = C.AT_REMOVEDIR
+ AT_EMPTY_PATH = C.AT_EMPTY_PATH
+ AT_FDCWD = C.AT_FDCWD
+ AT_NO_AUTOMOUNT = C.AT_NO_AUTOMOUNT
+ AT_REMOVEDIR = C.AT_REMOVEDIR
+
+ AT_STATX_SYNC_AS_STAT = C.AT_STATX_SYNC_AS_STAT
+ AT_STATX_FORCE_SYNC = C.AT_STATX_FORCE_SYNC
+ AT_STATX_DONT_SYNC = C.AT_STATX_DONT_SYNC
+
AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
)
@@ -617,3 +660,11 @@ const (
_CPU_SETSIZE = C.__CPU_SETSIZE
_NCPUBITS = C.__NCPUBITS
)
+
+// Bluetooth
+
+const (
+ BDADDR_BREDR = C.BDADDR_BREDR
+ BDADDR_LE_PUBLIC = C.BDADDR_LE_PUBLIC
+ BDADDR_LE_RANDOM = C.BDADDR_LE_RANDOM
+)
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index a452554bc..4dd40c172 100755
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -187,6 +187,7 @@ struct ltchars {
#include <linux/vm_sockets.h>
#include <linux/taskstats.h>
#include <linux/genetlink.h>
+#include <linux/stat.h>
#include <linux/watchdog.h>
#include <net/route.h>
#include <asm/termbits.h>
@@ -428,6 +429,7 @@ ccflags="$@"
$2 ~ /^(TASKSTATS|TS)_/ ||
$2 ~ /^CGROUPSTATS_/ ||
$2 ~ /^GENL_/ ||
+ $2 ~ /^STATX_/ ||
$2 ~ /^UTIME_/ ||
$2 ~ /^XATTR_(CREATE|REPLACE)/ ||
$2 ~ /^ATTR_(BIT_MAP_COUNT|(CMN|VOL|FILE)_)/ ||
diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go
index dbdfd0a3f..23590bda3 100644
--- a/vendor/golang.org/x/sys/unix/mkpost.go
+++ b/vendor/golang.org/x/sys/unix/mkpost.go
@@ -61,14 +61,18 @@ func main() {
convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`)
b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte"))
+ // Remove spare fields (e.g. in Statx_t)
+ spareFieldsRegex := regexp.MustCompile(`X__spare\S*`)
+ b = spareFieldsRegex.ReplaceAll(b, []byte("_"))
+
+ // Remove cgo padding fields
+ removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`)
+ b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_"))
+
// We refuse to export private fields on s390x
if goarch == "s390x" && goos == "linux" {
- // Remove cgo padding fields
- removeFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`)
- b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
-
// Remove padding, hidden, or unused fields
- removeFieldsRegex = regexp.MustCompile(`X_\S+`)
+ removeFieldsRegex = regexp.MustCompile(`\bX_\S+`)
b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
}
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
index d6c472a75..b9598694c 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -36,6 +36,7 @@ func Getwd() (string, error) {
return "", ENOTSUP
}
+// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets.
type SockaddrDatalink struct {
Len uint8
Family uint8
@@ -76,18 +77,6 @@ func nametomib(name string) (mib []_C_int, err error) {
return buf[0 : n/siz], nil
}
-func direntIno(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino))
-}
-
-func direntReclen(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
-}
-
-func direntNamlen(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
-}
-
//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error)
func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) }
func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) }
diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
index 6dfc89a7e..777860bf0 100644
--- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
+++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
@@ -14,6 +14,7 @@ package unix
import "unsafe"
+// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets.
type SockaddrDatalink struct {
Len uint8
Family uint8
@@ -56,22 +57,6 @@ func nametomib(name string) (mib []_C_int, err error) {
return buf[0 : n/siz], nil
}
-func direntIno(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno))
-}
-
-func direntReclen(buf []byte) (uint64, bool) {
- namlen, ok := direntNamlen(buf)
- if !ok {
- return 0, false
- }
- return (16 + namlen + 1 + 7) &^ 7, true
-}
-
-func direntNamlen(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
-}
-
//sysnb pipe() (r int, w int, err error)
func Pipe(p []int) (err error) {
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go
index b8ecf6c78..89f2c3fc1 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go
@@ -14,6 +14,7 @@ package unix
import "unsafe"
+// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets.
type SockaddrDatalink struct {
Len uint8
Family uint8
@@ -54,18 +55,6 @@ func nametomib(name string) (mib []_C_int, err error) {
return buf[0 : n/siz], nil
}
-func direntIno(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno))
-}
-
-func direntReclen(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
-}
-
-func direntNamlen(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
-}
-
//sysnb pipe() (r int, w int, err error)
func Pipe(p []int) (err error) {
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index 44628566f..b48f77f92 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -420,6 +420,7 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) {
return unsafe.Pointer(&sa.raw), sl, nil
}
+// SockaddrLinklayer implements the Sockaddr interface for AF_PACKET type sockets.
type SockaddrLinklayer struct {
Protocol uint16
Ifindex int
@@ -446,6 +447,7 @@ func (sa *SockaddrLinklayer) sockaddr() (unsafe.Pointer, _Socklen, error) {
return unsafe.Pointer(&sa.raw), SizeofSockaddrLinklayer, nil
}
+// SockaddrNetlink implements the Sockaddr interface for AF_NETLINK type sockets.
type SockaddrNetlink struct {
Family uint16
Pad uint16
@@ -462,6 +464,8 @@ func (sa *SockaddrNetlink) sockaddr() (unsafe.Pointer, _Socklen, error) {
return unsafe.Pointer(&sa.raw), SizeofSockaddrNetlink, nil
}
+// SockaddrHCI implements the Sockaddr interface for AF_BLUETOOTH type sockets
+// using the HCI protocol.
type SockaddrHCI struct {
Dev uint16
Channel uint16
@@ -475,6 +479,31 @@ func (sa *SockaddrHCI) sockaddr() (unsafe.Pointer, _Socklen, error) {
return unsafe.Pointer(&sa.raw), SizeofSockaddrHCI, nil
}
+// SockaddrL2 implements the Sockaddr interface for AF_BLUETOOTH type sockets
+// using the L2CAP protocol.
+type SockaddrL2 struct {
+ PSM uint16
+ CID uint16
+ Addr [6]uint8
+ AddrType uint8
+ raw RawSockaddrL2
+}
+
+func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ sa.raw.Family = AF_BLUETOOTH
+ psm := (*[2]byte)(unsafe.Pointer(&sa.raw.Psm))
+ psm[0] = byte(sa.PSM)
+ psm[1] = byte(sa.PSM >> 8)
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.raw.Bdaddr[i] = sa.Addr[len(sa.Addr)-1-i]
+ }
+ cid := (*[2]byte)(unsafe.Pointer(&sa.raw.Cid))
+ cid[0] = byte(sa.CID)
+ cid[1] = byte(sa.CID >> 8)
+ sa.raw.Bdaddr_type = sa.AddrType
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrL2, nil
+}
+
// SockaddrCAN implements the Sockaddr interface for AF_CAN type sockets.
// The RxID and TxID fields are used for transport protocol addressing in
// (CAN_TP16, CAN_TP20, CAN_MCNET, and CAN_ISOTP), they can be left with
@@ -1197,22 +1226,6 @@ func ReadDirent(fd int, buf []byte) (n int, err error) {
return Getdents(fd, buf)
}
-func direntIno(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino))
-}
-
-func direntReclen(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
-}
-
-func direntNamlen(buf []byte) (uint64, bool) {
- reclen, ok := direntReclen(buf)
- if !ok {
- return 0, false
- }
- return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true
-}
-
//sys mount(source string, target string, fstype string, flags uintptr, data *byte) (err error)
func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
@@ -1318,6 +1331,7 @@ func Setgid(uid int) (err error) {
//sys Setpriority(which int, who int, prio int) (err error)
//sys Setxattr(path string, attr string, data []byte, flags int) (err error)
+//sys Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error)
//sys Sync()
//sys Syncfs(fd int) (err error)
//sysnb Sysinfo(info *Sysinfo_t) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_test.go b/vendor/golang.org/x/sys/unix/syscall_linux_test.go
index 31ae24c3e..78d28792d 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_test.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_test.go
@@ -20,9 +20,12 @@ func TestFchmodat(t *testing.T) {
defer chtmpdir(t)()
touch(t, "file1")
- os.Symlink("file1", "symlink1")
+ err := os.Symlink("file1", "symlink1")
+ if err != nil {
+ t.Fatal(err)
+ }
- err := unix.Fchmodat(unix.AT_FDCWD, "symlink1", 0444, 0)
+ err = unix.Fchmodat(unix.AT_FDCWD, "symlink1", 0444, 0)
if err != nil {
t.Fatalf("Fchmodat: unexpected error: %v", err)
}
@@ -239,7 +242,10 @@ func TestFstatat(t *testing.T) {
t.Errorf("Fstatat: returned stat does not match Stat")
}
- os.Symlink("file1", "symlink1")
+ err = os.Symlink("file1", "symlink1")
+ if err != nil {
+ t.Fatal(err)
+ }
err = unix.Lstat("symlink1", &st1)
if err != nil {
@@ -308,6 +314,96 @@ func TestSchedSetaffinity(t *testing.T) {
}
}
+func TestStatx(t *testing.T) {
+ var stx unix.Statx_t
+ err := unix.Statx(unix.AT_FDCWD, ".", 0, 0, &stx)
+ if err == unix.ENOSYS {
+ t.Skip("statx syscall is not available, skipping test")
+ } else if err != nil {
+ t.Fatalf("Statx: %v", err)
+ }
+
+ defer chtmpdir(t)()
+ touch(t, "file1")
+
+ var st unix.Stat_t
+ err = unix.Stat("file1", &st)
+ if err != nil {
+ t.Fatalf("Stat: %v", err)
+ }
+
+ flags := unix.AT_STATX_SYNC_AS_STAT
+ err = unix.Statx(unix.AT_FDCWD, "file1", flags, unix.STATX_ALL, &stx)
+ if err != nil {
+ t.Fatalf("Statx: %v", err)
+ }
+
+ if uint32(stx.Mode) != st.Mode {
+ t.Errorf("Statx: returned stat mode does not match Stat")
+ }
+
+ atime := unix.StatxTimestamp{Sec: int64(st.Atim.Sec), Nsec: uint32(st.Atim.Nsec)}
+ ctime := unix.StatxTimestamp{Sec: int64(st.Ctim.Sec), Nsec: uint32(st.Ctim.Nsec)}
+ mtime := unix.StatxTimestamp{Sec: int64(st.Mtim.Sec), Nsec: uint32(st.Mtim.Nsec)}
+
+ if stx.Atime != atime {
+ t.Errorf("Statx: returned stat atime does not match Stat")
+ }
+ if stx.Ctime != ctime {
+ t.Errorf("Statx: returned stat ctime does not match Stat")
+ }
+ if stx.Mtime != mtime {
+ t.Errorf("Statx: returned stat mtime does not match Stat")
+ }
+
+ err = os.Symlink("file1", "symlink1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = unix.Lstat("symlink1", &st)
+ if err != nil {
+ t.Fatalf("Lstat: %v", err)
+ }
+
+ err = unix.Statx(unix.AT_FDCWD, "symlink1", flags, unix.STATX_BASIC_STATS, &stx)
+ if err != nil {
+ t.Fatalf("Statx: %v", err)
+ }
+
+ // follow symlink, expect a regulat file
+ if stx.Mode&unix.S_IFREG == 0 {
+ t.Errorf("Statx: didn't follow symlink")
+ }
+
+ err = unix.Statx(unix.AT_FDCWD, "symlink1", flags|unix.AT_SYMLINK_NOFOLLOW, unix.STATX_ALL, &stx)
+ if err != nil {
+ t.Fatalf("Statx: %v", err)
+ }
+
+ // follow symlink, expect a symlink
+ if stx.Mode&unix.S_IFLNK == 0 {
+ t.Errorf("Statx: unexpectedly followed symlink")
+ }
+ if uint32(stx.Mode) != st.Mode {
+ t.Errorf("Statx: returned stat mode does not match Lstat")
+ }
+
+ atime = unix.StatxTimestamp{Sec: int64(st.Atim.Sec), Nsec: uint32(st.Atim.Nsec)}
+ ctime = unix.StatxTimestamp{Sec: int64(st.Ctim.Sec), Nsec: uint32(st.Ctim.Nsec)}
+ mtime = unix.StatxTimestamp{Sec: int64(st.Mtim.Sec), Nsec: uint32(st.Mtim.Nsec)}
+
+ if stx.Atime != atime {
+ t.Errorf("Statx: returned stat atime does not match Lstat")
+ }
+ if stx.Ctime != ctime {
+ t.Errorf("Statx: returned stat ctime does not match Lstat")
+ }
+ if stx.Mtime != mtime {
+ t.Errorf("Statx: returned stat mtime does not match Lstat")
+ }
+}
+
// utilities taken from os/os_test.go
func touch(t *testing.T, name string) {
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go
index d81106d10..71b707838 100644
--- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go
@@ -17,6 +17,7 @@ import (
"unsafe"
)
+// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets.
type SockaddrDatalink struct {
Len uint8
Family uint8
@@ -92,18 +93,6 @@ func nametomib(name string) (mib []_C_int, err error) {
return mib, nil
}
-func direntIno(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno))
-}
-
-func direntReclen(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
-}
-
-func direntNamlen(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
-}
-
//sysnb pipe() (fd1 int, fd2 int, err error)
func Pipe(p []int) (err error) {
if len(p) != 2 {
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go
index 553c2fb85..37556e775 100644
--- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go
@@ -18,6 +18,7 @@ import (
"unsafe"
)
+// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets.
type SockaddrDatalink struct {
Len uint8
Family uint8
@@ -42,18 +43,6 @@ func nametomib(name string) (mib []_C_int, err error) {
return nil, EINVAL
}
-func direntIno(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno))
-}
-
-func direntReclen(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
-}
-
-func direntNamlen(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
-}
-
//sysnb pipe(p *[2]_C_int) (err error)
func Pipe(p []int) (err error) {
if len(p) != 2 {
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go
index 9dc01e742..eca8d1d09 100644
--- a/vendor/golang.org/x/sys/unix/syscall_solaris.go
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -23,6 +23,7 @@ type syscallFunc uintptr
func rawSysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
+// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets.
type SockaddrDatalink struct {
Family uint16
Index uint16
@@ -34,22 +35,6 @@ type SockaddrDatalink struct {
raw RawSockaddrDatalink
}
-func direntIno(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino))
-}
-
-func direntReclen(buf []byte) (uint64, bool) {
- return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
-}
-
-func direntNamlen(buf []byte) (uint64, bool) {
- reclen, ok := direntReclen(buf)
- if !ok {
- return 0, false
- }
- return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true
-}
-
//sysnb pipe(p *[2]_C_int) (n int, err error)
func Pipe(p []int) (err error) {
diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go
index 35c1cd5ca..950cfa81f 100644
--- a/vendor/golang.org/x/sys/unix/syscall_unix.go
+++ b/vendor/golang.org/x/sys/unix/syscall_unix.go
@@ -149,16 +149,19 @@ func Write(fd int, p []byte) (n int, err error) {
// creation of IPv6 sockets to return EAFNOSUPPORT.
var SocketDisableIPv6 bool
+// Sockaddr represents a socket address.
type Sockaddr interface {
sockaddr() (ptr unsafe.Pointer, len _Socklen, err error) // lowercase; only we can define Sockaddrs
}
+// SockaddrInet4 implements the Sockaddr interface for AF_INET type sockets.
type SockaddrInet4 struct {
Port int
Addr [4]byte
raw RawSockaddrInet4
}
+// SockaddrInet6 implements the Sockaddr interface for AF_INET6 type sockets.
type SockaddrInet6 struct {
Port int
ZoneId uint32
@@ -166,6 +169,7 @@ type SockaddrInet6 struct {
raw RawSockaddrInet6
}
+// SockaddrUnix implements the Sockaddr interface for AF_UNIX type sockets.
type SockaddrUnix struct {
Name string
raw RawSockaddrUnix
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index 8947248f6..4fba476e3 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -1638,6 +1638,27 @@ const (
SPLICE_F_MORE = 0x4
SPLICE_F_MOVE = 0x1
SPLICE_F_NONBLOCK = 0x2
+ STATX_ALL = 0xfff
+ STATX_ATIME = 0x20
+ STATX_ATTR_APPEND = 0x20
+ STATX_ATTR_AUTOMOUNT = 0x1000
+ STATX_ATTR_COMPRESSED = 0x4
+ STATX_ATTR_ENCRYPTED = 0x800
+ STATX_ATTR_IMMUTABLE = 0x10
+ STATX_ATTR_NODUMP = 0x40
+ STATX_BASIC_STATS = 0x7ff
+ STATX_BLOCKS = 0x400
+ STATX_BTIME = 0x800
+ STATX_CTIME = 0x80
+ STATX_GID = 0x10
+ STATX_INO = 0x100
+ STATX_MODE = 0x2
+ STATX_MTIME = 0x40
+ STATX_NLINK = 0x4
+ STATX_SIZE = 0x200
+ STATX_TYPE = 0x1
+ STATX_UID = 0x8
+ STATX__RESERVED = 0x80000000
S_BLKSIZE = 0x200
S_IEXEC = 0x40
S_IFBLK = 0x6000
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index 4083cb2a8..7e2a108d8 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -1639,6 +1639,27 @@ const (
SPLICE_F_MORE = 0x4
SPLICE_F_MOVE = 0x1
SPLICE_F_NONBLOCK = 0x2
+ STATX_ALL = 0xfff
+ STATX_ATIME = 0x20
+ STATX_ATTR_APPEND = 0x20
+ STATX_ATTR_AUTOMOUNT = 0x1000
+ STATX_ATTR_COMPRESSED = 0x4
+ STATX_ATTR_ENCRYPTED = 0x800
+ STATX_ATTR_IMMUTABLE = 0x10
+ STATX_ATTR_NODUMP = 0x40
+ STATX_BASIC_STATS = 0x7ff
+ STATX_BLOCKS = 0x400
+ STATX_BTIME = 0x800
+ STATX_CTIME = 0x80
+ STATX_GID = 0x10
+ STATX_INO = 0x100
+ STATX_MODE = 0x2
+ STATX_MTIME = 0x40
+ STATX_NLINK = 0x4
+ STATX_SIZE = 0x200
+ STATX_TYPE = 0x1
+ STATX_UID = 0x8
+ STATX__RESERVED = 0x80000000
S_BLKSIZE = 0x200
S_IEXEC = 0x40
S_IFBLK = 0x6000
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index 27d38352b..250841bdc 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -1643,6 +1643,27 @@ const (
SPLICE_F_MORE = 0x4
SPLICE_F_MOVE = 0x1
SPLICE_F_NONBLOCK = 0x2
+ STATX_ALL = 0xfff
+ STATX_ATIME = 0x20
+ STATX_ATTR_APPEND = 0x20
+ STATX_ATTR_AUTOMOUNT = 0x1000
+ STATX_ATTR_COMPRESSED = 0x4
+ STATX_ATTR_ENCRYPTED = 0x800
+ STATX_ATTR_IMMUTABLE = 0x10
+ STATX_ATTR_NODUMP = 0x40
+ STATX_BASIC_STATS = 0x7ff
+ STATX_BLOCKS = 0x400
+ STATX_BTIME = 0x800
+ STATX_CTIME = 0x80
+ STATX_GID = 0x10
+ STATX_INO = 0x100
+ STATX_MODE = 0x2
+ STATX_MTIME = 0x40
+ STATX_NLINK = 0x4
+ STATX_SIZE = 0x200
+ STATX_TYPE = 0x1
+ STATX_UID = 0x8
+ STATX__RESERVED = 0x80000000
S_BLKSIZE = 0x200
S_IEXEC = 0x40
S_IFBLK = 0x6000
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index 69ad31470..f5d785610 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -1629,6 +1629,27 @@ const (
SPLICE_F_MORE = 0x4
SPLICE_F_MOVE = 0x1
SPLICE_F_NONBLOCK = 0x2
+ STATX_ALL = 0xfff
+ STATX_ATIME = 0x20
+ STATX_ATTR_APPEND = 0x20
+ STATX_ATTR_AUTOMOUNT = 0x1000
+ STATX_ATTR_COMPRESSED = 0x4
+ STATX_ATTR_ENCRYPTED = 0x800
+ STATX_ATTR_IMMUTABLE = 0x10
+ STATX_ATTR_NODUMP = 0x40
+ STATX_BASIC_STATS = 0x7ff
+ STATX_BLOCKS = 0x400
+ STATX_BTIME = 0x800
+ STATX_CTIME = 0x80
+ STATX_GID = 0x10
+ STATX_INO = 0x100
+ STATX_MODE = 0x2
+ STATX_MTIME = 0x40
+ STATX_NLINK = 0x4
+ STATX_SIZE = 0x200
+ STATX_TYPE = 0x1
+ STATX_UID = 0x8
+ STATX__RESERVED = 0x80000000
S_BLKSIZE = 0x200
S_IEXEC = 0x40
S_IFBLK = 0x6000
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index d131a4cc5..f45492db5 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -1641,6 +1641,27 @@ const (
SPLICE_F_MORE = 0x4
SPLICE_F_MOVE = 0x1
SPLICE_F_NONBLOCK = 0x2
+ STATX_ALL = 0xfff
+ STATX_ATIME = 0x20
+ STATX_ATTR_APPEND = 0x20
+ STATX_ATTR_AUTOMOUNT = 0x1000
+ STATX_ATTR_COMPRESSED = 0x4
+ STATX_ATTR_ENCRYPTED = 0x800
+ STATX_ATTR_IMMUTABLE = 0x10
+ STATX_ATTR_NODUMP = 0x40
+ STATX_BASIC_STATS = 0x7ff
+ STATX_BLOCKS = 0x400
+ STATX_BTIME = 0x800
+ STATX_CTIME = 0x80
+ STATX_GID = 0x10
+ STATX_INO = 0x100
+ STATX_MODE = 0x2
+ STATX_MTIME = 0x40
+ STATX_NLINK = 0x4
+ STATX_SIZE = 0x200
+ STATX_TYPE = 0x1
+ STATX_UID = 0x8
+ STATX__RESERVED = 0x80000000
S_BLKSIZE = 0x200
S_IEXEC = 0x40
S_IFBLK = 0x6000
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index 62dd20352..f5a64fba6 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -1641,6 +1641,27 @@ const (
SPLICE_F_MORE = 0x4
SPLICE_F_MOVE = 0x1
SPLICE_F_NONBLOCK = 0x2
+ STATX_ALL = 0xfff
+ STATX_ATIME = 0x20
+ STATX_ATTR_APPEND = 0x20
+ STATX_ATTR_AUTOMOUNT = 0x1000
+ STATX_ATTR_COMPRESSED = 0x4
+ STATX_ATTR_ENCRYPTED = 0x800
+ STATX_ATTR_IMMUTABLE = 0x10
+ STATX_ATTR_NODUMP = 0x40
+ STATX_BASIC_STATS = 0x7ff
+ STATX_BLOCKS = 0x400
+ STATX_BTIME = 0x800
+ STATX_CTIME = 0x80
+ STATX_GID = 0x10
+ STATX_INO = 0x100
+ STATX_MODE = 0x2
+ STATX_MTIME = 0x40
+ STATX_NLINK = 0x4
+ STATX_SIZE = 0x200
+ STATX_TYPE = 0x1
+ STATX_UID = 0x8
+ STATX__RESERVED = 0x80000000
S_BLKSIZE = 0x200
S_IEXEC = 0x40
S_IFBLK = 0x6000
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index dc8e56e30..db6d556b2 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -1641,6 +1641,27 @@ const (
SPLICE_F_MORE = 0x4
SPLICE_F_MOVE = 0x1
SPLICE_F_NONBLOCK = 0x2
+ STATX_ALL = 0xfff
+ STATX_ATIME = 0x20
+ STATX_ATTR_APPEND = 0x20
+ STATX_ATTR_AUTOMOUNT = 0x1000
+ STATX_ATTR_COMPRESSED = 0x4
+ STATX_ATTR_ENCRYPTED = 0x800
+ STATX_ATTR_IMMUTABLE = 0x10
+ STATX_ATTR_NODUMP = 0x40
+ STATX_BASIC_STATS = 0x7ff
+ STATX_BLOCKS = 0x400
+ STATX_BTIME = 0x800
+ STATX_CTIME = 0x80
+ STATX_GID = 0x10
+ STATX_INO = 0x100
+ STATX_MODE = 0x2
+ STATX_MTIME = 0x40
+ STATX_NLINK = 0x4
+ STATX_SIZE = 0x200
+ STATX_TYPE = 0x1
+ STATX_UID = 0x8
+ STATX__RESERVED = 0x80000000
S_BLKSIZE = 0x200
S_IEXEC = 0x40
S_IFBLK = 0x6000
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index 906766254..4a62a5509 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -1641,6 +1641,27 @@ const (
SPLICE_F_MORE = 0x4
SPLICE_F_MOVE = 0x1
SPLICE_F_NONBLOCK = 0x2
+ STATX_ALL = 0xfff
+ STATX_ATIME = 0x20
+ STATX_ATTR_APPEND = 0x20
+ STATX_ATTR_AUTOMOUNT = 0x1000
+ STATX_ATTR_COMPRESSED = 0x4
+ STATX_ATTR_ENCRYPTED = 0x800
+ STATX_ATTR_IMMUTABLE = 0x10
+ STATX_ATTR_NODUMP = 0x40
+ STATX_BASIC_STATS = 0x7ff
+ STATX_BLOCKS = 0x400
+ STATX_BTIME = 0x800
+ STATX_CTIME = 0x80
+ STATX_GID = 0x10
+ STATX_INO = 0x100
+ STATX_MODE = 0x2
+ STATX_MTIME = 0x40
+ STATX_NLINK = 0x4
+ STATX_SIZE = 0x200
+ STATX_TYPE = 0x1
+ STATX_UID = 0x8
+ STATX__RESERVED = 0x80000000
S_BLKSIZE = 0x200
S_IEXEC = 0x40
S_IFBLK = 0x6000
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index f6ca82c71..5e1e81e0c 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -1696,6 +1696,27 @@ const (
SPLICE_F_MORE = 0x4
SPLICE_F_MOVE = 0x1
SPLICE_F_NONBLOCK = 0x2
+ STATX_ALL = 0xfff
+ STATX_ATIME = 0x20
+ STATX_ATTR_APPEND = 0x20
+ STATX_ATTR_AUTOMOUNT = 0x1000
+ STATX_ATTR_COMPRESSED = 0x4
+ STATX_ATTR_ENCRYPTED = 0x800
+ STATX_ATTR_IMMUTABLE = 0x10
+ STATX_ATTR_NODUMP = 0x40
+ STATX_BASIC_STATS = 0x7ff
+ STATX_BLOCKS = 0x400
+ STATX_BTIME = 0x800
+ STATX_CTIME = 0x80
+ STATX_GID = 0x10
+ STATX_INO = 0x100
+ STATX_MODE = 0x2
+ STATX_MTIME = 0x40
+ STATX_NLINK = 0x4
+ STATX_SIZE = 0x200
+ STATX_TYPE = 0x1
+ STATX_UID = 0x8
+ STATX__RESERVED = 0x80000000
S_BLKSIZE = 0x200
S_IEXEC = 0x40
S_IFBLK = 0x6000
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index ddd256254..6a8032439 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -1696,6 +1696,27 @@ const (
SPLICE_F_MORE = 0x4
SPLICE_F_MOVE = 0x1
SPLICE_F_NONBLOCK = 0x2
+ STATX_ALL = 0xfff
+ STATX_ATIME = 0x20
+ STATX_ATTR_APPEND = 0x20
+ STATX_ATTR_AUTOMOUNT = 0x1000
+ STATX_ATTR_COMPRESSED = 0x4
+ STATX_ATTR_ENCRYPTED = 0x800
+ STATX_ATTR_IMMUTABLE = 0x10
+ STATX_ATTR_NODUMP = 0x40
+ STATX_BASIC_STATS = 0x7ff
+ STATX_BLOCKS = 0x400
+ STATX_BTIME = 0x800
+ STATX_CTIME = 0x80
+ STATX_GID = 0x10
+ STATX_INO = 0x100
+ STATX_MODE = 0x2
+ STATX_MTIME = 0x40
+ STATX_NLINK = 0x4
+ STATX_SIZE = 0x200
+ STATX_TYPE = 0x1
+ STATX_UID = 0x8
+ STATX__RESERVED = 0x80000000
S_BLKSIZE = 0x200
S_IEXEC = 0x40
S_IFBLK = 0x6000
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index fc304a68f..af5a89502 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -1700,6 +1700,27 @@ const (
SPLICE_F_MORE = 0x4
SPLICE_F_MOVE = 0x1
SPLICE_F_NONBLOCK = 0x2
+ STATX_ALL = 0xfff
+ STATX_ATIME = 0x20
+ STATX_ATTR_APPEND = 0x20
+ STATX_ATTR_AUTOMOUNT = 0x1000
+ STATX_ATTR_COMPRESSED = 0x4
+ STATX_ATTR_ENCRYPTED = 0x800
+ STATX_ATTR_IMMUTABLE = 0x10
+ STATX_ATTR_NODUMP = 0x40
+ STATX_BASIC_STATS = 0x7ff
+ STATX_BLOCKS = 0x400
+ STATX_BTIME = 0x800
+ STATX_CTIME = 0x80
+ STATX_GID = 0x10
+ STATX_INO = 0x100
+ STATX_MODE = 0x2
+ STATX_MTIME = 0x40
+ STATX_NLINK = 0x4
+ STATX_SIZE = 0x200
+ STATX_TYPE = 0x1
+ STATX_UID = 0x8
+ STATX__RESERVED = 0x80000000
S_BLKSIZE = 0x200
S_IEXEC = 0x40
S_IFBLK = 0x6000
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
index dcb95473c..ef9602c1e 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
@@ -1238,6 +1238,21 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Sync() {
SyscallNoError(SYS_SYNC, 0, 0, 0)
return
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
index badf57ee0..63054b358 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
@@ -1238,6 +1238,21 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Sync() {
SyscallNoError(SYS_SYNC, 0, 0, 0)
return
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
index 69765c3a1..8b10ee144 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
@@ -1238,6 +1238,21 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Sync() {
SyscallNoError(SYS_SYNC, 0, 0, 0)
return
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
index dc8e6422b..8f276d65f 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
@@ -1238,6 +1238,21 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Sync() {
SyscallNoError(SYS_SYNC, 0, 0, 0)
return
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
index 59f50c8cd..61169b331 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
@@ -1238,6 +1238,21 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Sync() {
SyscallNoError(SYS_SYNC, 0, 0, 0)
return
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
index 38033805b..4cb59b4a5 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
@@ -1238,6 +1238,21 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Sync() {
SyscallNoError(SYS_SYNC, 0, 0, 0)
return
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
index fee2f8532..0b547ae30 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
@@ -1238,6 +1238,21 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Sync() {
SyscallNoError(SYS_SYNC, 0, 0, 0)
return
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
index 4094d3d1c..cd94d3a83 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
@@ -1238,6 +1238,21 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Sync() {
SyscallNoError(SYS_SYNC, 0, 0, 0)
return
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
index d83bafb39..cdad555a5 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
@@ -1238,6 +1238,21 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Sync() {
SyscallNoError(SYS_SYNC, 0, 0, 0)
return
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
index 460971c06..38f4e44b6 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
@@ -1238,6 +1238,21 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Sync() {
SyscallNoError(SYS_SYNC, 0, 0, 0)
return
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
index b7ef121be..c443baf63 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
@@ -1238,6 +1238,21 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Sync() {
SyscallNoError(SYS_SYNC, 0, 0, 0)
return
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
index 1bb1a5e77..7aa206e3d 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
@@ -52,7 +52,7 @@ type Timex struct {
Errcnt int32
Stbcnt int32
Tai int32
- Pad_cgo_0 [44]byte
+ _ [44]byte
}
type Time_t int32
@@ -98,7 +98,7 @@ type _Gid_t uint32
type Stat_t struct {
Dev uint64
X__pad1 uint16
- Pad_cgo_0 [2]byte
+ _ [2]byte
X__st_ino uint32
Mode uint32
Nlink uint32
@@ -106,7 +106,7 @@ type Stat_t struct {
Gid uint32
Rdev uint64
X__pad2 uint16
- Pad_cgo_1 [2]byte
+ _ [2]byte
Size int64
Blksize int32
Blocks int64
@@ -131,13 +131,43 @@ type Statfs_t struct {
Spare [4]int32
}
+type StatxTimestamp struct {
+ Sec int64
+ Nsec uint32
+ X__reserved int32
+}
+
+type Statx_t struct {
+ Mask uint32
+ Blksize uint32
+ Attributes uint64
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Mode uint16
+ _ [1]uint16
+ Ino uint64
+ Size uint64
+ Blocks uint64
+ Attributes_mask uint64
+ Atime StatxTimestamp
+ Btime StatxTimestamp
+ Ctime StatxTimestamp
+ Mtime StatxTimestamp
+ Rdev_major uint32
+ Rdev_minor uint32
+ Dev_major uint32
+ Dev_minor uint32
+ _ [14]uint64
+}
+
type Dirent struct {
- Ino uint64
- Off int64
- Reclen uint16
- Type uint8
- Name [256]int8
- Pad_cgo_0 [1]byte
+ Ino uint64
+ Off int64
+ Reclen uint16
+ Type uint8
+ Name [256]int8
+ _ [1]byte
}
type Fsid struct {
@@ -224,11 +254,20 @@ type RawSockaddrHCI struct {
Channel uint16
}
+type RawSockaddrL2 struct {
+ Family uint16
+ Psm uint16
+ Bdaddr [6]uint8
+ Cid uint16
+ Bdaddr_type uint8
+ _ [1]byte
+}
+
type RawSockaddrCAN struct {
- Family uint16
- Pad_cgo_0 [2]byte
- Ifindex int32
- Addr [8]byte
+ Family uint16
+ _ [2]byte
+ Ifindex int32
+ Addr [8]byte
}
type RawSockaddrALG struct {
@@ -341,7 +380,7 @@ type TCPInfo struct {
Probes uint8
Backoff uint8
Options uint8
- Pad_cgo_0 [2]byte
+ _ [2]byte
Rto uint32
Ato uint32
Snd_mss uint32
@@ -376,6 +415,7 @@ const (
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
+ SizeofSockaddrL2 = 0xe
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
@@ -565,9 +605,9 @@ type SockFilter struct {
}
type SockFprog struct {
- Len uint16
- Pad_cgo_0 [2]byte
- Filter *SockFilter
+ Len uint16
+ _ [2]byte
+ Filter *SockFilter
}
type InotifyEvent struct {
@@ -643,9 +683,15 @@ type EpollEvent struct {
}
const (
- AT_FDCWD = -0x64
- AT_NO_AUTOMOUNT = 0x800
- AT_REMOVEDIR = 0x200
+ AT_EMPTY_PATH = 0x1000
+ AT_FDCWD = -0x64
+ AT_NO_AUTOMOUNT = 0x800
+ AT_REMOVEDIR = 0x200
+
+ AT_STATX_SYNC_AS_STAT = 0x0
+ AT_STATX_FORCE_SYNC = 0x2000
+ AT_STATX_DONT_SYNC = 0x4000
+
AT_SYMLINK_FOLLOW = 0x400
AT_SYMLINK_NOFOLLOW = 0x100
)
@@ -694,11 +740,11 @@ type Winsize struct {
type Taskstats struct {
Version uint16
- Pad_cgo_0 [2]byte
+ _ [2]byte
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- Pad_cgo_1 [6]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -710,13 +756,13 @@ type Taskstats struct {
Ac_comm [32]int8
Ac_sched uint8
Ac_pad [3]uint8
- Pad_cgo_2 [4]byte
+ _ [4]byte
Ac_uid uint32
Ac_gid uint32
Ac_pid uint32
Ac_ppid uint32
Ac_btime uint32
- Pad_cgo_3 [4]byte
+ _ [4]byte
Ac_etime uint64
Ac_utime uint64
Ac_stime uint64
@@ -817,3 +863,9 @@ const (
_CPU_SETSIZE = 0x400
_NCPUBITS = 0x20
)
+
+const (
+ BDADDR_BREDR = 0x0
+ BDADDR_LE_PUBLIC = 0x1
+ BDADDR_LE_RANDOM = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
index 081c60797..abb3d89ae 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
@@ -33,13 +33,13 @@ type Timeval struct {
type Timex struct {
Modes uint32
- Pad_cgo_0 [4]byte
+ _ [4]byte
Offset int64
Freq int64
Maxerror int64
Esterror int64
Status int32
- Pad_cgo_1 [4]byte
+ _ [4]byte
Constant int64
Precision int64
Tolerance int64
@@ -48,14 +48,14 @@ type Timex struct {
Ppsfreq int64
Jitter int64
Shift int32
- Pad_cgo_2 [4]byte
+ _ [4]byte
Stabil int64
Jitcnt int64
Calcnt int64
Errcnt int64
Stbcnt int64
Tai int32
- Pad_cgo_3 [44]byte
+ _ [44]byte
}
type Time_t int64
@@ -131,13 +131,43 @@ type Statfs_t struct {
Spare [4]int64
}
+type StatxTimestamp struct {
+ Sec int64
+ Nsec uint32
+ X__reserved int32
+}
+
+type Statx_t struct {
+ Mask uint32
+ Blksize uint32
+ Attributes uint64
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Mode uint16
+ _ [1]uint16
+ Ino uint64
+ Size uint64
+ Blocks uint64
+ Attributes_mask uint64
+ Atime StatxTimestamp
+ Btime StatxTimestamp
+ Ctime StatxTimestamp
+ Mtime StatxTimestamp
+ Rdev_major uint32
+ Rdev_minor uint32
+ Dev_major uint32
+ Dev_minor uint32
+ _ [14]uint64
+}
+
type Dirent struct {
- Ino uint64
- Off int64
- Reclen uint16
- Type uint8
- Name [256]int8
- Pad_cgo_0 [5]byte
+ Ino uint64
+ Off int64
+ Reclen uint16
+ Type uint8
+ Name [256]int8
+ _ [5]byte
}
type Fsid struct {
@@ -145,13 +175,13 @@ type Fsid struct {
}
type Flock_t struct {
- Type int16
- Whence int16
- Pad_cgo_0 [4]byte
- Start int64
- Len int64
- Pid int32
- Pad_cgo_1 [4]byte
+ Type int16
+ Whence int16
+ _ [4]byte
+ Start int64
+ Len int64
+ Pid int32
+ _ [4]byte
}
type FscryptPolicy struct {
@@ -226,11 +256,20 @@ type RawSockaddrHCI struct {
Channel uint16
}
+type RawSockaddrL2 struct {
+ Family uint16
+ Psm uint16
+ Bdaddr [6]uint8
+ Cid uint16
+ Bdaddr_type uint8
+ _ [1]byte
+}
+
type RawSockaddrCAN struct {
- Family uint16
- Pad_cgo_0 [2]byte
- Ifindex int32
- Addr [8]byte
+ Family uint16
+ _ [2]byte
+ Ifindex int32
+ Addr [8]byte
}
type RawSockaddrALG struct {
@@ -297,13 +336,13 @@ type PacketMreq struct {
type Msghdr struct {
Name *byte
Namelen uint32
- Pad_cgo_0 [4]byte
+ _ [4]byte
Iov *Iovec
Iovlen uint64
Control *byte
Controllen uint64
Flags int32
- Pad_cgo_1 [4]byte
+ _ [4]byte
}
type Cmsghdr struct {
@@ -345,7 +384,7 @@ type TCPInfo struct {
Probes uint8
Backoff uint8
Options uint8
- Pad_cgo_0 [2]byte
+ _ [2]byte
Rto uint32
Ato uint32
Snd_mss uint32
@@ -380,6 +419,7 @@ const (
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
+ SizeofSockaddrL2 = 0xe
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
@@ -569,9 +609,9 @@ type SockFilter struct {
}
type SockFprog struct {
- Len uint16
- Pad_cgo_0 [6]byte
- Filter *SockFilter
+ Len uint16
+ _ [6]byte
+ Filter *SockFilter
}
type InotifyEvent struct {
@@ -628,12 +668,12 @@ type Sysinfo_t struct {
Freeswap uint64
Procs uint16
Pad uint16
- Pad_cgo_0 [4]byte
+ _ [4]byte
Totalhigh uint64
Freehigh uint64
Unit uint32
X_f [0]int8
- Pad_cgo_1 [4]byte
+ _ [4]byte
}
type Utsname struct {
@@ -646,12 +686,12 @@ type Utsname struct {
}
type Ustat_t struct {
- Tfree int32
- Pad_cgo_0 [4]byte
- Tinode uint64
- Fname [6]int8
- Fpack [6]int8
- Pad_cgo_1 [4]byte
+ Tfree int32
+ _ [4]byte
+ Tinode uint64
+ Fname [6]int8
+ Fpack [6]int8
+ _ [4]byte
}
type EpollEvent struct {
@@ -661,9 +701,15 @@ type EpollEvent struct {
}
const (
- AT_FDCWD = -0x64
- AT_NO_AUTOMOUNT = 0x800
- AT_REMOVEDIR = 0x200
+ AT_EMPTY_PATH = 0x1000
+ AT_FDCWD = -0x64
+ AT_NO_AUTOMOUNT = 0x800
+ AT_REMOVEDIR = 0x200
+
+ AT_STATX_SYNC_AS_STAT = 0x0
+ AT_STATX_FORCE_SYNC = 0x2000
+ AT_STATX_DONT_SYNC = 0x4000
+
AT_SYMLINK_FOLLOW = 0x400
AT_SYMLINK_NOFOLLOW = 0x100
)
@@ -712,11 +758,11 @@ type Winsize struct {
type Taskstats struct {
Version uint16
- Pad_cgo_0 [2]byte
+ _ [2]byte
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- Pad_cgo_1 [6]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -728,13 +774,13 @@ type Taskstats struct {
Ac_comm [32]int8
Ac_sched uint8
Ac_pad [3]uint8
- Pad_cgo_2 [4]byte
+ _ [4]byte
Ac_uid uint32
Ac_gid uint32
Ac_pid uint32
Ac_ppid uint32
Ac_btime uint32
- Pad_cgo_3 [4]byte
+ _ [4]byte
Ac_etime uint64
Ac_utime uint64
Ac_stime uint64
@@ -835,3 +881,9 @@ const (
_CPU_SETSIZE = 0x400
_NCPUBITS = 0x40
)
+
+const (
+ BDADDR_BREDR = 0x0
+ BDADDR_LE_PUBLIC = 0x1
+ BDADDR_LE_RANDOM = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
index 904510d40..11654174d 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
@@ -52,7 +52,7 @@ type Timex struct {
Errcnt int32
Stbcnt int32
Tai int32
- Pad_cgo_0 [44]byte
+ _ [44]byte
}
type Time_t int32
@@ -98,7 +98,7 @@ type _Gid_t uint32
type Stat_t struct {
Dev uint64
X__pad1 uint16
- Pad_cgo_0 [2]byte
+ _ [2]byte
X__st_ino uint32
Mode uint32
Nlink uint32
@@ -106,10 +106,10 @@ type Stat_t struct {
Gid uint32
Rdev uint64
X__pad2 uint16
- Pad_cgo_1 [6]byte
+ _ [6]byte
Size int64
Blksize int32
- Pad_cgo_2 [4]byte
+ _ [4]byte
Blocks int64
Atim Timespec
Mtim Timespec
@@ -118,28 +118,58 @@ type Stat_t struct {
}
type Statfs_t struct {
- Type int32
- Bsize int32
- Blocks uint64
- Bfree uint64
- Bavail uint64
- Files uint64
- Ffree uint64
- Fsid Fsid
- Namelen int32
- Frsize int32
- Flags int32
- Spare [4]int32
- Pad_cgo_0 [4]byte
+ Type int32
+ Bsize int32
+ Blocks uint64
+ Bfree uint64
+ Bavail uint64
+ Files uint64
+ Ffree uint64
+ Fsid Fsid
+ Namelen int32
+ Frsize int32
+ Flags int32
+ Spare [4]int32
+ _ [4]byte
+}
+
+type StatxTimestamp struct {
+ Sec int64
+ Nsec uint32
+ X__reserved int32
+}
+
+type Statx_t struct {
+ Mask uint32
+ Blksize uint32
+ Attributes uint64
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Mode uint16
+ _ [1]uint16
+ Ino uint64
+ Size uint64
+ Blocks uint64
+ Attributes_mask uint64
+ Atime StatxTimestamp
+ Btime StatxTimestamp
+ Ctime StatxTimestamp
+ Mtime StatxTimestamp
+ Rdev_major uint32
+ Rdev_minor uint32
+ Dev_major uint32
+ Dev_minor uint32
+ _ [14]uint64
}
type Dirent struct {
- Ino uint64
- Off int64
- Reclen uint16
- Type uint8
- Name [256]uint8
- Pad_cgo_0 [5]byte
+ Ino uint64
+ Off int64
+ Reclen uint16
+ Type uint8
+ Name [256]uint8
+ _ [5]byte
}
type Fsid struct {
@@ -147,13 +177,13 @@ type Fsid struct {
}
type Flock_t struct {
- Type int16
- Whence int16
- Pad_cgo_0 [4]byte
- Start int64
- Len int64
- Pid int32
- Pad_cgo_1 [4]byte
+ Type int16
+ Whence int16
+ _ [4]byte
+ Start int64
+ Len int64
+ Pid int32
+ _ [4]byte
}
type FscryptPolicy struct {
@@ -228,11 +258,20 @@ type RawSockaddrHCI struct {
Channel uint16
}
+type RawSockaddrL2 struct {
+ Family uint16
+ Psm uint16
+ Bdaddr [6]uint8
+ Cid uint16
+ Bdaddr_type uint8
+ _ [1]byte
+}
+
type RawSockaddrCAN struct {
- Family uint16
- Pad_cgo_0 [2]byte
- Ifindex int32
- Addr [8]byte
+ Family uint16
+ _ [2]byte
+ Ifindex int32
+ Addr [8]byte
}
type RawSockaddrALG struct {
@@ -345,7 +384,7 @@ type TCPInfo struct {
Probes uint8
Backoff uint8
Options uint8
- Pad_cgo_0 [2]byte
+ _ [2]byte
Rto uint32
Ato uint32
Snd_mss uint32
@@ -380,6 +419,7 @@ const (
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
+ SizeofSockaddrL2 = 0xe
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
@@ -569,9 +609,9 @@ type SockFilter struct {
}
type SockFprog struct {
- Len uint16
- Pad_cgo_0 [2]byte
- Filter *SockFilter
+ Len uint16
+ _ [2]byte
+ Filter *SockFilter
}
type InotifyEvent struct {
@@ -632,9 +672,15 @@ type EpollEvent struct {
}
const (
- AT_FDCWD = -0x64
- AT_NO_AUTOMOUNT = 0x800
- AT_REMOVEDIR = 0x200
+ AT_EMPTY_PATH = 0x1000
+ AT_FDCWD = -0x64
+ AT_NO_AUTOMOUNT = 0x800
+ AT_REMOVEDIR = 0x200
+
+ AT_STATX_SYNC_AS_STAT = 0x0
+ AT_STATX_FORCE_SYNC = 0x2000
+ AT_STATX_DONT_SYNC = 0x4000
+
AT_SYMLINK_FOLLOW = 0x400
AT_SYMLINK_NOFOLLOW = 0x100
)
@@ -683,11 +729,11 @@ type Winsize struct {
type Taskstats struct {
Version uint16
- Pad_cgo_0 [2]byte
+ _ [2]byte
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- Pad_cgo_1 [6]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -699,13 +745,13 @@ type Taskstats struct {
Ac_comm [32]uint8
Ac_sched uint8
Ac_pad [3]uint8
- Pad_cgo_2 [4]byte
+ _ [4]byte
Ac_uid uint32
Ac_gid uint32
Ac_pid uint32
Ac_ppid uint32
Ac_btime uint32
- Pad_cgo_3 [4]byte
+ _ [4]byte
Ac_etime uint64
Ac_utime uint64
Ac_stime uint64
@@ -806,3 +852,9 @@ const (
_CPU_SETSIZE = 0x400
_NCPUBITS = 0x20
)
+
+const (
+ BDADDR_BREDR = 0x0
+ BDADDR_LE_PUBLIC = 0x1
+ BDADDR_LE_RANDOM = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
index 5da8cef72..0d0de46f6 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
@@ -33,13 +33,13 @@ type Timeval struct {
type Timex struct {
Modes uint32
- Pad_cgo_0 [4]byte
+ _ [4]byte
Offset int64
Freq int64
Maxerror int64
Esterror int64
Status int32
- Pad_cgo_1 [4]byte
+ _ [4]byte
Constant int64
Precision int64
Tolerance int64
@@ -48,14 +48,14 @@ type Timex struct {
Ppsfreq int64
Jitter int64
Shift int32
- Pad_cgo_2 [4]byte
+ _ [4]byte
Stabil int64
Jitcnt int64
Calcnt int64
Errcnt int64
Stbcnt int64
Tai int32
- Pad_cgo_3 [44]byte
+ _ [44]byte
}
type Time_t int64
@@ -132,13 +132,43 @@ type Statfs_t struct {
Spare [4]int64
}
+type StatxTimestamp struct {
+ Sec int64
+ Nsec uint32
+ X__reserved int32
+}
+
+type Statx_t struct {
+ Mask uint32
+ Blksize uint32
+ Attributes uint64
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Mode uint16
+ _ [1]uint16
+ Ino uint64
+ Size uint64
+ Blocks uint64
+ Attributes_mask uint64
+ Atime StatxTimestamp
+ Btime StatxTimestamp
+ Ctime StatxTimestamp
+ Mtime StatxTimestamp
+ Rdev_major uint32
+ Rdev_minor uint32
+ Dev_major uint32
+ Dev_minor uint32
+ _ [14]uint64
+}
+
type Dirent struct {
- Ino uint64
- Off int64
- Reclen uint16
- Type uint8
- Name [256]int8
- Pad_cgo_0 [5]byte
+ Ino uint64
+ Off int64
+ Reclen uint16
+ Type uint8
+ Name [256]int8
+ _ [5]byte
}
type Fsid struct {
@@ -146,13 +176,13 @@ type Fsid struct {
}
type Flock_t struct {
- Type int16
- Whence int16
- Pad_cgo_0 [4]byte
- Start int64
- Len int64
- Pid int32
- Pad_cgo_1 [4]byte
+ Type int16
+ Whence int16
+ _ [4]byte
+ Start int64
+ Len int64
+ Pid int32
+ _ [4]byte
}
type FscryptPolicy struct {
@@ -227,11 +257,20 @@ type RawSockaddrHCI struct {
Channel uint16
}
+type RawSockaddrL2 struct {
+ Family uint16
+ Psm uint16
+ Bdaddr [6]uint8
+ Cid uint16
+ Bdaddr_type uint8
+ _ [1]byte
+}
+
type RawSockaddrCAN struct {
- Family uint16
- Pad_cgo_0 [2]byte
- Ifindex int32
- Addr [8]byte
+ Family uint16
+ _ [2]byte
+ Ifindex int32
+ Addr [8]byte
}
type RawSockaddrALG struct {
@@ -298,13 +337,13 @@ type PacketMreq struct {
type Msghdr struct {
Name *byte
Namelen uint32
- Pad_cgo_0 [4]byte
+ _ [4]byte
Iov *Iovec
Iovlen uint64
Control *byte
Controllen uint64
Flags int32
- Pad_cgo_1 [4]byte
+ _ [4]byte
}
type Cmsghdr struct {
@@ -346,7 +385,7 @@ type TCPInfo struct {
Probes uint8
Backoff uint8
Options uint8
- Pad_cgo_0 [2]byte
+ _ [2]byte
Rto uint32
Ato uint32
Snd_mss uint32
@@ -381,6 +420,7 @@ const (
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
+ SizeofSockaddrL2 = 0xe
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
@@ -570,9 +610,9 @@ type SockFilter struct {
}
type SockFprog struct {
- Len uint16
- Pad_cgo_0 [6]byte
- Filter *SockFilter
+ Len uint16
+ _ [6]byte
+ Filter *SockFilter
}
type InotifyEvent struct {
@@ -606,12 +646,12 @@ type Sysinfo_t struct {
Freeswap uint64
Procs uint16
Pad uint16
- Pad_cgo_0 [4]byte
+ _ [4]byte
Totalhigh uint64
Freehigh uint64
Unit uint32
X_f [0]int8
- Pad_cgo_1 [4]byte
+ _ [4]byte
}
type Utsname struct {
@@ -624,12 +664,12 @@ type Utsname struct {
}
type Ustat_t struct {
- Tfree int32
- Pad_cgo_0 [4]byte
- Tinode uint64
- Fname [6]int8
- Fpack [6]int8
- Pad_cgo_1 [4]byte
+ Tfree int32
+ _ [4]byte
+ Tinode uint64
+ Fname [6]int8
+ Fpack [6]int8
+ _ [4]byte
}
type EpollEvent struct {
@@ -640,9 +680,15 @@ type EpollEvent struct {
}
const (
- AT_FDCWD = -0x64
- AT_NO_AUTOMOUNT = 0x800
- AT_REMOVEDIR = 0x200
+ AT_EMPTY_PATH = 0x1000
+ AT_FDCWD = -0x64
+ AT_NO_AUTOMOUNT = 0x800
+ AT_REMOVEDIR = 0x200
+
+ AT_STATX_SYNC_AS_STAT = 0x0
+ AT_STATX_FORCE_SYNC = 0x2000
+ AT_STATX_DONT_SYNC = 0x4000
+
AT_SYMLINK_FOLLOW = 0x400
AT_SYMLINK_NOFOLLOW = 0x100
)
@@ -691,11 +737,11 @@ type Winsize struct {
type Taskstats struct {
Version uint16
- Pad_cgo_0 [2]byte
+ _ [2]byte
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- Pad_cgo_1 [6]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -707,13 +753,13 @@ type Taskstats struct {
Ac_comm [32]int8
Ac_sched uint8
Ac_pad [3]uint8
- Pad_cgo_2 [4]byte
+ _ [4]byte
Ac_uid uint32
Ac_gid uint32
Ac_pid uint32
Ac_ppid uint32
Ac_btime uint32
- Pad_cgo_3 [4]byte
+ _ [4]byte
Ac_etime uint64
Ac_utime uint64
Ac_stime uint64
@@ -814,3 +860,9 @@ const (
_CPU_SETSIZE = 0x400
_NCPUBITS = 0x40
)
+
+const (
+ BDADDR_BREDR = 0x0
+ BDADDR_LE_PUBLIC = 0x1
+ BDADDR_LE_RANDOM = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
index 2707c3661..a9087c52a 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
@@ -52,7 +52,7 @@ type Timex struct {
Errcnt int32
Stbcnt int32
Tai int32
- Pad_cgo_0 [44]byte
+ _ [44]byte
}
type Time_t int32
@@ -116,29 +116,59 @@ type Stat_t struct {
}
type Statfs_t struct {
- Type int32
- Bsize int32
- Frsize int32
- Pad_cgo_0 [4]byte
- Blocks uint64
- Bfree uint64
- Files uint64
- Ffree uint64
- Bavail uint64
- Fsid Fsid
- Namelen int32
- Flags int32
- Spare [5]int32
- Pad_cgo_1 [4]byte
+ Type int32
+ Bsize int32
+ Frsize int32
+ _ [4]byte
+ Blocks uint64
+ Bfree uint64
+ Files uint64
+ Ffree uint64
+ Bavail uint64
+ Fsid Fsid
+ Namelen int32
+ Flags int32
+ Spare [5]int32
+ _ [4]byte
+}
+
+type StatxTimestamp struct {
+ Sec int64
+ Nsec uint32
+ X__reserved int32
+}
+
+type Statx_t struct {
+ Mask uint32
+ Blksize uint32
+ Attributes uint64
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Mode uint16
+ _ [1]uint16
+ Ino uint64
+ Size uint64
+ Blocks uint64
+ Attributes_mask uint64
+ Atime StatxTimestamp
+ Btime StatxTimestamp
+ Ctime StatxTimestamp
+ Mtime StatxTimestamp
+ Rdev_major uint32
+ Rdev_minor uint32
+ Dev_major uint32
+ Dev_minor uint32
+ _ [14]uint64
}
type Dirent struct {
- Ino uint64
- Off int64
- Reclen uint16
- Type uint8
- Name [256]int8
- Pad_cgo_0 [5]byte
+ Ino uint64
+ Off int64
+ Reclen uint16
+ Type uint8
+ Name [256]int8
+ _ [5]byte
}
type Fsid struct {
@@ -146,13 +176,13 @@ type Fsid struct {
}
type Flock_t struct {
- Type int16
- Whence int16
- Pad_cgo_0 [4]byte
- Start int64
- Len int64
- Pid int32
- Pad_cgo_1 [4]byte
+ Type int16
+ Whence int16
+ _ [4]byte
+ Start int64
+ Len int64
+ Pid int32
+ _ [4]byte
}
type FscryptPolicy struct {
@@ -227,11 +257,20 @@ type RawSockaddrHCI struct {
Channel uint16
}
+type RawSockaddrL2 struct {
+ Family uint16
+ Psm uint16
+ Bdaddr [6]uint8
+ Cid uint16
+ Bdaddr_type uint8
+ _ [1]byte
+}
+
type RawSockaddrCAN struct {
- Family uint16
- Pad_cgo_0 [2]byte
- Ifindex int32
- Addr [8]byte
+ Family uint16
+ _ [2]byte
+ Ifindex int32
+ Addr [8]byte
}
type RawSockaddrALG struct {
@@ -344,7 +383,7 @@ type TCPInfo struct {
Probes uint8
Backoff uint8
Options uint8
- Pad_cgo_0 [2]byte
+ _ [2]byte
Rto uint32
Ato uint32
Snd_mss uint32
@@ -379,6 +418,7 @@ const (
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
+ SizeofSockaddrL2 = 0xe
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
@@ -568,9 +608,9 @@ type SockFilter struct {
}
type SockFprog struct {
- Len uint16
- Pad_cgo_0 [2]byte
- Filter *SockFilter
+ Len uint16
+ _ [2]byte
+ Filter *SockFilter
}
type InotifyEvent struct {
@@ -637,9 +677,15 @@ type EpollEvent struct {
}
const (
- AT_FDCWD = -0x64
- AT_NO_AUTOMOUNT = 0x800
- AT_REMOVEDIR = 0x200
+ AT_EMPTY_PATH = 0x1000
+ AT_FDCWD = -0x64
+ AT_NO_AUTOMOUNT = 0x800
+ AT_REMOVEDIR = 0x200
+
+ AT_STATX_SYNC_AS_STAT = 0x0
+ AT_STATX_FORCE_SYNC = 0x2000
+ AT_STATX_DONT_SYNC = 0x4000
+
AT_SYMLINK_FOLLOW = 0x400
AT_SYMLINK_NOFOLLOW = 0x100
)
@@ -688,11 +734,11 @@ type Winsize struct {
type Taskstats struct {
Version uint16
- Pad_cgo_0 [2]byte
+ _ [2]byte
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- Pad_cgo_1 [6]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -704,13 +750,13 @@ type Taskstats struct {
Ac_comm [32]int8
Ac_sched uint8
Ac_pad [3]uint8
- Pad_cgo_2 [4]byte
+ _ [4]byte
Ac_uid uint32
Ac_gid uint32
Ac_pid uint32
Ac_ppid uint32
Ac_btime uint32
- Pad_cgo_3 [4]byte
+ _ [4]byte
Ac_etime uint64
Ac_utime uint64
Ac_stime uint64
@@ -811,3 +857,9 @@ const (
_CPU_SETSIZE = 0x400
_NCPUBITS = 0x20
)
+
+const (
+ BDADDR_BREDR = 0x0
+ BDADDR_LE_PUBLIC = 0x1
+ BDADDR_LE_RANDOM = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
index 23e9da62b..01e8f65ca 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
@@ -33,13 +33,13 @@ type Timeval struct {
type Timex struct {
Modes uint32
- Pad_cgo_0 [4]byte
+ _ [4]byte
Offset int64
Freq int64
Maxerror int64
Esterror int64
Status int32
- Pad_cgo_1 [4]byte
+ _ [4]byte
Constant int64
Precision int64
Tolerance int64
@@ -48,14 +48,14 @@ type Timex struct {
Ppsfreq int64
Jitter int64
Shift int32
- Pad_cgo_2 [4]byte
+ _ [4]byte
Stabil int64
Jitcnt int64
Calcnt int64
Errcnt int64
Stbcnt int64
Tai int32
- Pad_cgo_3 [44]byte
+ _ [44]byte
}
type Time_t int64
@@ -132,13 +132,43 @@ type Statfs_t struct {
Spare [5]int64
}
+type StatxTimestamp struct {
+ Sec int64
+ Nsec uint32
+ X__reserved int32
+}
+
+type Statx_t struct {
+ Mask uint32
+ Blksize uint32
+ Attributes uint64
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Mode uint16
+ _ [1]uint16
+ Ino uint64
+ Size uint64
+ Blocks uint64
+ Attributes_mask uint64
+ Atime StatxTimestamp
+ Btime StatxTimestamp
+ Ctime StatxTimestamp
+ Mtime StatxTimestamp
+ Rdev_major uint32
+ Rdev_minor uint32
+ Dev_major uint32
+ Dev_minor uint32
+ _ [14]uint64
+}
+
type Dirent struct {
- Ino uint64
- Off int64
- Reclen uint16
- Type uint8
- Name [256]int8
- Pad_cgo_0 [5]byte
+ Ino uint64
+ Off int64
+ Reclen uint16
+ Type uint8
+ Name [256]int8
+ _ [5]byte
}
type Fsid struct {
@@ -146,13 +176,13 @@ type Fsid struct {
}
type Flock_t struct {
- Type int16
- Whence int16
- Pad_cgo_0 [4]byte
- Start int64
- Len int64
- Pid int32
- Pad_cgo_1 [4]byte
+ Type int16
+ Whence int16
+ _ [4]byte
+ Start int64
+ Len int64
+ Pid int32
+ _ [4]byte
}
type FscryptPolicy struct {
@@ -227,11 +257,20 @@ type RawSockaddrHCI struct {
Channel uint16
}
+type RawSockaddrL2 struct {
+ Family uint16
+ Psm uint16
+ Bdaddr [6]uint8
+ Cid uint16
+ Bdaddr_type uint8
+ _ [1]byte
+}
+
type RawSockaddrCAN struct {
- Family uint16
- Pad_cgo_0 [2]byte
- Ifindex int32
- Addr [8]byte
+ Family uint16
+ _ [2]byte
+ Ifindex int32
+ Addr [8]byte
}
type RawSockaddrALG struct {
@@ -298,13 +337,13 @@ type PacketMreq struct {
type Msghdr struct {
Name *byte
Namelen uint32
- Pad_cgo_0 [4]byte
+ _ [4]byte
Iov *Iovec
Iovlen uint64
Control *byte
Controllen uint64
Flags int32
- Pad_cgo_1 [4]byte
+ _ [4]byte
}
type Cmsghdr struct {
@@ -346,7 +385,7 @@ type TCPInfo struct {
Probes uint8
Backoff uint8
Options uint8
- Pad_cgo_0 [2]byte
+ _ [2]byte
Rto uint32
Ato uint32
Snd_mss uint32
@@ -381,6 +420,7 @@ const (
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
+ SizeofSockaddrL2 = 0xe
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
@@ -570,9 +610,9 @@ type SockFilter struct {
}
type SockFprog struct {
- Len uint16
- Pad_cgo_0 [6]byte
- Filter *SockFilter
+ Len uint16
+ _ [6]byte
+ Filter *SockFilter
}
type InotifyEvent struct {
@@ -609,12 +649,12 @@ type Sysinfo_t struct {
Freeswap uint64
Procs uint16
Pad uint16
- Pad_cgo_0 [4]byte
+ _ [4]byte
Totalhigh uint64
Freehigh uint64
Unit uint32
X_f [0]int8
- Pad_cgo_1 [4]byte
+ _ [4]byte
}
type Utsname struct {
@@ -627,12 +667,12 @@ type Utsname struct {
}
type Ustat_t struct {
- Tfree int32
- Pad_cgo_0 [4]byte
- Tinode uint64
- Fname [6]int8
- Fpack [6]int8
- Pad_cgo_1 [4]byte
+ Tfree int32
+ _ [4]byte
+ Tinode uint64
+ Fname [6]int8
+ Fpack [6]int8
+ _ [4]byte
}
type EpollEvent struct {
@@ -642,9 +682,15 @@ type EpollEvent struct {
}
const (
- AT_FDCWD = -0x64
- AT_NO_AUTOMOUNT = 0x800
- AT_REMOVEDIR = 0x200
+ AT_EMPTY_PATH = 0x1000
+ AT_FDCWD = -0x64
+ AT_NO_AUTOMOUNT = 0x800
+ AT_REMOVEDIR = 0x200
+
+ AT_STATX_SYNC_AS_STAT = 0x0
+ AT_STATX_FORCE_SYNC = 0x2000
+ AT_STATX_DONT_SYNC = 0x4000
+
AT_SYMLINK_FOLLOW = 0x400
AT_SYMLINK_NOFOLLOW = 0x100
)
@@ -693,11 +739,11 @@ type Winsize struct {
type Taskstats struct {
Version uint16
- Pad_cgo_0 [2]byte
+ _ [2]byte
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- Pad_cgo_1 [6]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -709,13 +755,13 @@ type Taskstats struct {
Ac_comm [32]int8
Ac_sched uint8
Ac_pad [3]uint8
- Pad_cgo_2 [4]byte
+ _ [4]byte
Ac_uid uint32
Ac_gid uint32
Ac_pid uint32
Ac_ppid uint32
Ac_btime uint32
- Pad_cgo_3 [4]byte
+ _ [4]byte
Ac_etime uint64
Ac_utime uint64
Ac_stime uint64
@@ -816,3 +862,9 @@ const (
_CPU_SETSIZE = 0x400
_NCPUBITS = 0x40
)
+
+const (
+ BDADDR_BREDR = 0x0
+ BDADDR_LE_PUBLIC = 0x1
+ BDADDR_LE_RANDOM = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
index 1090dc159..6f9452d89 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
@@ -33,13 +33,13 @@ type Timeval struct {
type Timex struct {
Modes uint32
- Pad_cgo_0 [4]byte
+ _ [4]byte
Offset int64
Freq int64
Maxerror int64
Esterror int64
Status int32
- Pad_cgo_1 [4]byte
+ _ [4]byte
Constant int64
Precision int64
Tolerance int64
@@ -48,14 +48,14 @@ type Timex struct {
Ppsfreq int64
Jitter int64
Shift int32
- Pad_cgo_2 [4]byte
+ _ [4]byte
Stabil int64
Jitcnt int64
Calcnt int64
Errcnt int64
Stbcnt int64
Tai int32
- Pad_cgo_3 [44]byte
+ _ [44]byte
}
type Time_t int64
@@ -132,13 +132,43 @@ type Statfs_t struct {
Spare [5]int64
}
+type StatxTimestamp struct {
+ Sec int64
+ Nsec uint32
+ X__reserved int32
+}
+
+type Statx_t struct {
+ Mask uint32
+ Blksize uint32
+ Attributes uint64
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Mode uint16
+ _ [1]uint16
+ Ino uint64
+ Size uint64
+ Blocks uint64
+ Attributes_mask uint64
+ Atime StatxTimestamp
+ Btime StatxTimestamp
+ Ctime StatxTimestamp
+ Mtime StatxTimestamp
+ Rdev_major uint32
+ Rdev_minor uint32
+ Dev_major uint32
+ Dev_minor uint32
+ _ [14]uint64
+}
+
type Dirent struct {
- Ino uint64
- Off int64
- Reclen uint16
- Type uint8
- Name [256]int8
- Pad_cgo_0 [5]byte
+ Ino uint64
+ Off int64
+ Reclen uint16
+ Type uint8
+ Name [256]int8
+ _ [5]byte
}
type Fsid struct {
@@ -146,13 +176,13 @@ type Fsid struct {
}
type Flock_t struct {
- Type int16
- Whence int16
- Pad_cgo_0 [4]byte
- Start int64
- Len int64
- Pid int32
- Pad_cgo_1 [4]byte
+ Type int16
+ Whence int16
+ _ [4]byte
+ Start int64
+ Len int64
+ Pid int32
+ _ [4]byte
}
type FscryptPolicy struct {
@@ -227,11 +257,20 @@ type RawSockaddrHCI struct {
Channel uint16
}
+type RawSockaddrL2 struct {
+ Family uint16
+ Psm uint16
+ Bdaddr [6]uint8
+ Cid uint16
+ Bdaddr_type uint8
+ _ [1]byte
+}
+
type RawSockaddrCAN struct {
- Family uint16
- Pad_cgo_0 [2]byte
- Ifindex int32
- Addr [8]byte
+ Family uint16
+ _ [2]byte
+ Ifindex int32
+ Addr [8]byte
}
type RawSockaddrALG struct {
@@ -298,13 +337,13 @@ type PacketMreq struct {
type Msghdr struct {
Name *byte
Namelen uint32
- Pad_cgo_0 [4]byte
+ _ [4]byte
Iov *Iovec
Iovlen uint64
Control *byte
Controllen uint64
Flags int32
- Pad_cgo_1 [4]byte
+ _ [4]byte
}
type Cmsghdr struct {
@@ -346,7 +385,7 @@ type TCPInfo struct {
Probes uint8
Backoff uint8
Options uint8
- Pad_cgo_0 [2]byte
+ _ [2]byte
Rto uint32
Ato uint32
Snd_mss uint32
@@ -381,6 +420,7 @@ const (
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
+ SizeofSockaddrL2 = 0xe
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
@@ -570,9 +610,9 @@ type SockFilter struct {
}
type SockFprog struct {
- Len uint16
- Pad_cgo_0 [6]byte
- Filter *SockFilter
+ Len uint16
+ _ [6]byte
+ Filter *SockFilter
}
type InotifyEvent struct {
@@ -609,12 +649,12 @@ type Sysinfo_t struct {
Freeswap uint64
Procs uint16
Pad uint16
- Pad_cgo_0 [4]byte
+ _ [4]byte
Totalhigh uint64
Freehigh uint64
Unit uint32
X_f [0]int8
- Pad_cgo_1 [4]byte
+ _ [4]byte
}
type Utsname struct {
@@ -627,12 +667,12 @@ type Utsname struct {
}
type Ustat_t struct {
- Tfree int32
- Pad_cgo_0 [4]byte
- Tinode uint64
- Fname [6]int8
- Fpack [6]int8
- Pad_cgo_1 [4]byte
+ Tfree int32
+ _ [4]byte
+ Tinode uint64
+ Fname [6]int8
+ Fpack [6]int8
+ _ [4]byte
}
type EpollEvent struct {
@@ -642,9 +682,15 @@ type EpollEvent struct {
}
const (
- AT_FDCWD = -0x64
- AT_NO_AUTOMOUNT = 0x800
- AT_REMOVEDIR = 0x200
+ AT_EMPTY_PATH = 0x1000
+ AT_FDCWD = -0x64
+ AT_NO_AUTOMOUNT = 0x800
+ AT_REMOVEDIR = 0x200
+
+ AT_STATX_SYNC_AS_STAT = 0x0
+ AT_STATX_FORCE_SYNC = 0x2000
+ AT_STATX_DONT_SYNC = 0x4000
+
AT_SYMLINK_FOLLOW = 0x400
AT_SYMLINK_NOFOLLOW = 0x100
)
@@ -693,11 +739,11 @@ type Winsize struct {
type Taskstats struct {
Version uint16
- Pad_cgo_0 [2]byte
+ _ [2]byte
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- Pad_cgo_1 [6]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -709,13 +755,13 @@ type Taskstats struct {
Ac_comm [32]int8
Ac_sched uint8
Ac_pad [3]uint8
- Pad_cgo_2 [4]byte
+ _ [4]byte
Ac_uid uint32
Ac_gid uint32
Ac_pid uint32
Ac_ppid uint32
Ac_btime uint32
- Pad_cgo_3 [4]byte
+ _ [4]byte
Ac_etime uint64
Ac_utime uint64
Ac_stime uint64
@@ -816,3 +862,9 @@ const (
_CPU_SETSIZE = 0x400
_NCPUBITS = 0x40
)
+
+const (
+ BDADDR_BREDR = 0x0
+ BDADDR_LE_PUBLIC = 0x1
+ BDADDR_LE_RANDOM = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
index dff3f1f9c..6de721f78 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
@@ -52,7 +52,7 @@ type Timex struct {
Errcnt int32
Stbcnt int32
Tai int32
- Pad_cgo_0 [44]byte
+ _ [44]byte
}
type Time_t int32
@@ -116,29 +116,59 @@ type Stat_t struct {
}
type Statfs_t struct {
- Type int32
- Bsize int32
- Frsize int32
- Pad_cgo_0 [4]byte
- Blocks uint64
- Bfree uint64
- Files uint64
- Ffree uint64
- Bavail uint64
- Fsid Fsid
- Namelen int32
- Flags int32
- Spare [5]int32
- Pad_cgo_1 [4]byte
+ Type int32
+ Bsize int32
+ Frsize int32
+ _ [4]byte
+ Blocks uint64
+ Bfree uint64
+ Files uint64
+ Ffree uint64
+ Bavail uint64
+ Fsid Fsid
+ Namelen int32
+ Flags int32
+ Spare [5]int32
+ _ [4]byte
+}
+
+type StatxTimestamp struct {
+ Sec int64
+ Nsec uint32
+ X__reserved int32
+}
+
+type Statx_t struct {
+ Mask uint32
+ Blksize uint32
+ Attributes uint64
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Mode uint16
+ _ [1]uint16
+ Ino uint64
+ Size uint64
+ Blocks uint64
+ Attributes_mask uint64
+ Atime StatxTimestamp
+ Btime StatxTimestamp
+ Ctime StatxTimestamp
+ Mtime StatxTimestamp
+ Rdev_major uint32
+ Rdev_minor uint32
+ Dev_major uint32
+ Dev_minor uint32
+ _ [14]uint64
}
type Dirent struct {
- Ino uint64
- Off int64
- Reclen uint16
- Type uint8
- Name [256]int8
- Pad_cgo_0 [5]byte
+ Ino uint64
+ Off int64
+ Reclen uint16
+ Type uint8
+ Name [256]int8
+ _ [5]byte
}
type Fsid struct {
@@ -146,13 +176,13 @@ type Fsid struct {
}
type Flock_t struct {
- Type int16
- Whence int16
- Pad_cgo_0 [4]byte
- Start int64
- Len int64
- Pid int32
- Pad_cgo_1 [4]byte
+ Type int16
+ Whence int16
+ _ [4]byte
+ Start int64
+ Len int64
+ Pid int32
+ _ [4]byte
}
type FscryptPolicy struct {
@@ -227,11 +257,20 @@ type RawSockaddrHCI struct {
Channel uint16
}
+type RawSockaddrL2 struct {
+ Family uint16
+ Psm uint16
+ Bdaddr [6]uint8
+ Cid uint16
+ Bdaddr_type uint8
+ _ [1]byte
+}
+
type RawSockaddrCAN struct {
- Family uint16
- Pad_cgo_0 [2]byte
- Ifindex int32
- Addr [8]byte
+ Family uint16
+ _ [2]byte
+ Ifindex int32
+ Addr [8]byte
}
type RawSockaddrALG struct {
@@ -344,7 +383,7 @@ type TCPInfo struct {
Probes uint8
Backoff uint8
Options uint8
- Pad_cgo_0 [2]byte
+ _ [2]byte
Rto uint32
Ato uint32
Snd_mss uint32
@@ -379,6 +418,7 @@ const (
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
+ SizeofSockaddrL2 = 0xe
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
@@ -568,9 +608,9 @@ type SockFilter struct {
}
type SockFprog struct {
- Len uint16
- Pad_cgo_0 [2]byte
- Filter *SockFilter
+ Len uint16
+ _ [2]byte
+ Filter *SockFilter
}
type InotifyEvent struct {
@@ -637,9 +677,15 @@ type EpollEvent struct {
}
const (
- AT_FDCWD = -0x64
- AT_NO_AUTOMOUNT = 0x800
- AT_REMOVEDIR = 0x200
+ AT_EMPTY_PATH = 0x1000
+ AT_FDCWD = -0x64
+ AT_NO_AUTOMOUNT = 0x800
+ AT_REMOVEDIR = 0x200
+
+ AT_STATX_SYNC_AS_STAT = 0x0
+ AT_STATX_FORCE_SYNC = 0x2000
+ AT_STATX_DONT_SYNC = 0x4000
+
AT_SYMLINK_FOLLOW = 0x400
AT_SYMLINK_NOFOLLOW = 0x100
)
@@ -688,11 +734,11 @@ type Winsize struct {
type Taskstats struct {
Version uint16
- Pad_cgo_0 [2]byte
+ _ [2]byte
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- Pad_cgo_1 [6]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -704,13 +750,13 @@ type Taskstats struct {
Ac_comm [32]int8
Ac_sched uint8
Ac_pad [3]uint8
- Pad_cgo_2 [4]byte
+ _ [4]byte
Ac_uid uint32
Ac_gid uint32
Ac_pid uint32
Ac_ppid uint32
Ac_btime uint32
- Pad_cgo_3 [4]byte
+ _ [4]byte
Ac_etime uint64
Ac_utime uint64
Ac_stime uint64
@@ -811,3 +857,9 @@ const (
_CPU_SETSIZE = 0x400
_NCPUBITS = 0x20
)
+
+const (
+ BDADDR_BREDR = 0x0
+ BDADDR_LE_PUBLIC = 0x1
+ BDADDR_LE_RANDOM = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
index 86c747558..cb2701fd2 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
@@ -33,13 +33,13 @@ type Timeval struct {
type Timex struct {
Modes uint32
- Pad_cgo_0 [4]byte
+ _ [4]byte
Offset int64
Freq int64
Maxerror int64
Esterror int64
Status int32
- Pad_cgo_1 [4]byte
+ _ [4]byte
Constant int64
Precision int64
Tolerance int64
@@ -48,14 +48,14 @@ type Timex struct {
Ppsfreq int64
Jitter int64
Shift int32
- Pad_cgo_2 [4]byte
+ _ [4]byte
Stabil int64
Jitcnt int64
Calcnt int64
Errcnt int64
Stbcnt int64
Tai int32
- Pad_cgo_3 [44]byte
+ _ [44]byte
}
type Time_t int64
@@ -133,13 +133,43 @@ type Statfs_t struct {
Spare [4]int64
}
+type StatxTimestamp struct {
+ Sec int64
+ Nsec uint32
+ X__reserved int32
+}
+
+type Statx_t struct {
+ Mask uint32
+ Blksize uint32
+ Attributes uint64
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Mode uint16
+ _ [1]uint16
+ Ino uint64
+ Size uint64
+ Blocks uint64
+ Attributes_mask uint64
+ Atime StatxTimestamp
+ Btime StatxTimestamp
+ Ctime StatxTimestamp
+ Mtime StatxTimestamp
+ Rdev_major uint32
+ Rdev_minor uint32
+ Dev_major uint32
+ Dev_minor uint32
+ _ [14]uint64
+}
+
type Dirent struct {
- Ino uint64
- Off int64
- Reclen uint16
- Type uint8
- Name [256]uint8
- Pad_cgo_0 [5]byte
+ Ino uint64
+ Off int64
+ Reclen uint16
+ Type uint8
+ Name [256]uint8
+ _ [5]byte
}
type Fsid struct {
@@ -147,13 +177,13 @@ type Fsid struct {
}
type Flock_t struct {
- Type int16
- Whence int16
- Pad_cgo_0 [4]byte
- Start int64
- Len int64
- Pid int32
- Pad_cgo_1 [4]byte
+ Type int16
+ Whence int16
+ _ [4]byte
+ Start int64
+ Len int64
+ Pid int32
+ _ [4]byte
}
type FscryptPolicy struct {
@@ -228,11 +258,20 @@ type RawSockaddrHCI struct {
Channel uint16
}
+type RawSockaddrL2 struct {
+ Family uint16
+ Psm uint16
+ Bdaddr [6]uint8
+ Cid uint16
+ Bdaddr_type uint8
+ _ [1]byte
+}
+
type RawSockaddrCAN struct {
- Family uint16
- Pad_cgo_0 [2]byte
- Ifindex int32
- Addr [8]byte
+ Family uint16
+ _ [2]byte
+ Ifindex int32
+ Addr [8]byte
}
type RawSockaddrALG struct {
@@ -299,13 +338,13 @@ type PacketMreq struct {
type Msghdr struct {
Name *byte
Namelen uint32
- Pad_cgo_0 [4]byte
+ _ [4]byte
Iov *Iovec
Iovlen uint64
Control *byte
Controllen uint64
Flags int32
- Pad_cgo_1 [4]byte
+ _ [4]byte
}
type Cmsghdr struct {
@@ -347,7 +386,7 @@ type TCPInfo struct {
Probes uint8
Backoff uint8
Options uint8
- Pad_cgo_0 [2]byte
+ _ [2]byte
Rto uint32
Ato uint32
Snd_mss uint32
@@ -382,6 +421,7 @@ const (
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
+ SizeofSockaddrL2 = 0xe
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
@@ -571,9 +611,9 @@ type SockFilter struct {
}
type SockFprog struct {
- Len uint16
- Pad_cgo_0 [6]byte
- Filter *SockFilter
+ Len uint16
+ _ [6]byte
+ Filter *SockFilter
}
type InotifyEvent struct {
@@ -616,12 +656,12 @@ type Sysinfo_t struct {
Freeswap uint64
Procs uint16
Pad uint16
- Pad_cgo_0 [4]byte
+ _ [4]byte
Totalhigh uint64
Freehigh uint64
Unit uint32
X_f [0]uint8
- Pad_cgo_1 [4]byte
+ _ [4]byte
}
type Utsname struct {
@@ -634,12 +674,12 @@ type Utsname struct {
}
type Ustat_t struct {
- Tfree int32
- Pad_cgo_0 [4]byte
- Tinode uint64
- Fname [6]uint8
- Fpack [6]uint8
- Pad_cgo_1 [4]byte
+ Tfree int32
+ _ [4]byte
+ Tinode uint64
+ Fname [6]uint8
+ Fpack [6]uint8
+ _ [4]byte
}
type EpollEvent struct {
@@ -650,9 +690,15 @@ type EpollEvent struct {
}
const (
- AT_FDCWD = -0x64
- AT_NO_AUTOMOUNT = 0x800
- AT_REMOVEDIR = 0x200
+ AT_EMPTY_PATH = 0x1000
+ AT_FDCWD = -0x64
+ AT_NO_AUTOMOUNT = 0x800
+ AT_REMOVEDIR = 0x200
+
+ AT_STATX_SYNC_AS_STAT = 0x0
+ AT_STATX_FORCE_SYNC = 0x2000
+ AT_STATX_DONT_SYNC = 0x4000
+
AT_SYMLINK_FOLLOW = 0x400
AT_SYMLINK_NOFOLLOW = 0x100
)
@@ -701,11 +747,11 @@ type Winsize struct {
type Taskstats struct {
Version uint16
- Pad_cgo_0 [2]byte
+ _ [2]byte
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- Pad_cgo_1 [6]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -717,13 +763,13 @@ type Taskstats struct {
Ac_comm [32]uint8
Ac_sched uint8
Ac_pad [3]uint8
- Pad_cgo_2 [4]byte
+ _ [4]byte
Ac_uid uint32
Ac_gid uint32
Ac_pid uint32
Ac_ppid uint32
Ac_btime uint32
- Pad_cgo_3 [4]byte
+ _ [4]byte
Ac_etime uint64
Ac_utime uint64
Ac_stime uint64
@@ -824,3 +870,9 @@ const (
_CPU_SETSIZE = 0x400
_NCPUBITS = 0x40
)
+
+const (
+ BDADDR_BREDR = 0x0
+ BDADDR_LE_PUBLIC = 0x1
+ BDADDR_LE_RANDOM = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
index 1cc159ed9..fa5b15be0 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
@@ -33,13 +33,13 @@ type Timeval struct {
type Timex struct {
Modes uint32
- Pad_cgo_0 [4]byte
+ _ [4]byte
Offset int64
Freq int64
Maxerror int64
Esterror int64
Status int32
- Pad_cgo_1 [4]byte
+ _ [4]byte
Constant int64
Precision int64
Tolerance int64
@@ -48,14 +48,14 @@ type Timex struct {
Ppsfreq int64
Jitter int64
Shift int32
- Pad_cgo_2 [4]byte
+ _ [4]byte
Stabil int64
Jitcnt int64
Calcnt int64
Errcnt int64
Stbcnt int64
Tai int32
- Pad_cgo_3 [44]byte
+ _ [44]byte
}
type Time_t int64
@@ -133,13 +133,43 @@ type Statfs_t struct {
Spare [4]int64
}
+type StatxTimestamp struct {
+ Sec int64
+ Nsec uint32
+ X__reserved int32
+}
+
+type Statx_t struct {
+ Mask uint32
+ Blksize uint32
+ Attributes uint64
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Mode uint16
+ _ [1]uint16
+ Ino uint64
+ Size uint64
+ Blocks uint64
+ Attributes_mask uint64
+ Atime StatxTimestamp
+ Btime StatxTimestamp
+ Ctime StatxTimestamp
+ Mtime StatxTimestamp
+ Rdev_major uint32
+ Rdev_minor uint32
+ Dev_major uint32
+ Dev_minor uint32
+ _ [14]uint64
+}
+
type Dirent struct {
- Ino uint64
- Off int64
- Reclen uint16
- Type uint8
- Name [256]uint8
- Pad_cgo_0 [5]byte
+ Ino uint64
+ Off int64
+ Reclen uint16
+ Type uint8
+ Name [256]uint8
+ _ [5]byte
}
type Fsid struct {
@@ -147,13 +177,13 @@ type Fsid struct {
}
type Flock_t struct {
- Type int16
- Whence int16
- Pad_cgo_0 [4]byte
- Start int64
- Len int64
- Pid int32
- Pad_cgo_1 [4]byte
+ Type int16
+ Whence int16
+ _ [4]byte
+ Start int64
+ Len int64
+ Pid int32
+ _ [4]byte
}
type FscryptPolicy struct {
@@ -228,11 +258,20 @@ type RawSockaddrHCI struct {
Channel uint16
}
+type RawSockaddrL2 struct {
+ Family uint16
+ Psm uint16
+ Bdaddr [6]uint8
+ Cid uint16
+ Bdaddr_type uint8
+ _ [1]byte
+}
+
type RawSockaddrCAN struct {
- Family uint16
- Pad_cgo_0 [2]byte
- Ifindex int32
- Addr [8]byte
+ Family uint16
+ _ [2]byte
+ Ifindex int32
+ Addr [8]byte
}
type RawSockaddrALG struct {
@@ -299,13 +338,13 @@ type PacketMreq struct {
type Msghdr struct {
Name *byte
Namelen uint32
- Pad_cgo_0 [4]byte
+ _ [4]byte
Iov *Iovec
Iovlen uint64
Control *byte
Controllen uint64
Flags int32
- Pad_cgo_1 [4]byte
+ _ [4]byte
}
type Cmsghdr struct {
@@ -347,7 +386,7 @@ type TCPInfo struct {
Probes uint8
Backoff uint8
Options uint8
- Pad_cgo_0 [2]byte
+ _ [2]byte
Rto uint32
Ato uint32
Snd_mss uint32
@@ -382,6 +421,7 @@ const (
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
+ SizeofSockaddrL2 = 0xe
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
@@ -571,9 +611,9 @@ type SockFilter struct {
}
type SockFprog struct {
- Len uint16
- Pad_cgo_0 [6]byte
- Filter *SockFilter
+ Len uint16
+ _ [6]byte
+ Filter *SockFilter
}
type InotifyEvent struct {
@@ -616,12 +656,12 @@ type Sysinfo_t struct {
Freeswap uint64
Procs uint16
Pad uint16
- Pad_cgo_0 [4]byte
+ _ [4]byte
Totalhigh uint64
Freehigh uint64
Unit uint32
X_f [0]uint8
- Pad_cgo_1 [4]byte
+ _ [4]byte
}
type Utsname struct {
@@ -634,12 +674,12 @@ type Utsname struct {
}
type Ustat_t struct {
- Tfree int32
- Pad_cgo_0 [4]byte
- Tinode uint64
- Fname [6]uint8
- Fpack [6]uint8
- Pad_cgo_1 [4]byte
+ Tfree int32
+ _ [4]byte
+ Tinode uint64
+ Fname [6]uint8
+ Fpack [6]uint8
+ _ [4]byte
}
type EpollEvent struct {
@@ -650,9 +690,15 @@ type EpollEvent struct {
}
const (
- AT_FDCWD = -0x64
- AT_NO_AUTOMOUNT = 0x800
- AT_REMOVEDIR = 0x200
+ AT_EMPTY_PATH = 0x1000
+ AT_FDCWD = -0x64
+ AT_NO_AUTOMOUNT = 0x800
+ AT_REMOVEDIR = 0x200
+
+ AT_STATX_SYNC_AS_STAT = 0x0
+ AT_STATX_FORCE_SYNC = 0x2000
+ AT_STATX_DONT_SYNC = 0x4000
+
AT_SYMLINK_FOLLOW = 0x400
AT_SYMLINK_NOFOLLOW = 0x100
)
@@ -701,11 +747,11 @@ type Winsize struct {
type Taskstats struct {
Version uint16
- Pad_cgo_0 [2]byte
+ _ [2]byte
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- Pad_cgo_1 [6]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -717,13 +763,13 @@ type Taskstats struct {
Ac_comm [32]uint8
Ac_sched uint8
Ac_pad [3]uint8
- Pad_cgo_2 [4]byte
+ _ [4]byte
Ac_uid uint32
Ac_gid uint32
Ac_pid uint32
Ac_ppid uint32
Ac_btime uint32
- Pad_cgo_3 [4]byte
+ _ [4]byte
Ac_etime uint64
Ac_utime uint64
Ac_stime uint64
@@ -824,3 +870,9 @@ const (
_CPU_SETSIZE = 0x400
_NCPUBITS = 0x40
)
+
+const (
+ BDADDR_BREDR = 0x0
+ BDADDR_LE_PUBLIC = 0x1
+ BDADDR_LE_RANDOM = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
index 2d27c0fc5..64952cb78 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
@@ -132,6 +132,36 @@ type Statfs_t struct {
_ [4]byte
}
+type StatxTimestamp struct {
+ Sec int64
+ Nsec uint32
+ _ int32
+}
+
+type Statx_t struct {
+ Mask uint32
+ Blksize uint32
+ Attributes uint64
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Mode uint16
+ _ [1]uint16
+ Ino uint64
+ Size uint64
+ Blocks uint64
+ Attributes_mask uint64
+ Atime StatxTimestamp
+ Btime StatxTimestamp
+ Ctime StatxTimestamp
+ Mtime StatxTimestamp
+ Rdev_major uint32
+ Rdev_minor uint32
+ Dev_major uint32
+ Dev_minor uint32
+ _ [14]uint64
+}
+
type Dirent struct {
Ino uint64
Off int64
@@ -227,6 +257,15 @@ type RawSockaddrHCI struct {
Channel uint16
}
+type RawSockaddrL2 struct {
+ Family uint16
+ Psm uint16
+ Bdaddr [6]uint8
+ Cid uint16
+ Bdaddr_type uint8
+ _ [1]byte
+}
+
type RawSockaddrCAN struct {
Family uint16
_ [2]byte
@@ -381,6 +420,7 @@ const (
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
+ SizeofSockaddrL2 = 0xe
SizeofSockaddrCAN = 0x10
SizeofSockaddrALG = 0x58
SizeofSockaddrVM = 0x10
@@ -667,9 +707,15 @@ type EpollEvent struct {
}
const (
- AT_FDCWD = -0x64
- AT_NO_AUTOMOUNT = 0x800
- AT_REMOVEDIR = 0x200
+ AT_EMPTY_PATH = 0x1000
+ AT_FDCWD = -0x64
+ AT_NO_AUTOMOUNT = 0x800
+ AT_REMOVEDIR = 0x200
+
+ AT_STATX_SYNC_AS_STAT = 0x0
+ AT_STATX_FORCE_SYNC = 0x2000
+ AT_STATX_DONT_SYNC = 0x4000
+
AT_SYMLINK_FOLLOW = 0x400
AT_SYMLINK_NOFOLLOW = 0x100
)
@@ -841,3 +887,9 @@ const (
_CPU_SETSIZE = 0x400
_NCPUBITS = 0x40
)
+
+const (
+ BDADDR_BREDR = 0x0
+ BDADDR_LE_PUBLIC = 0x1
+ BDADDR_LE_RANDOM = 0x2
+)
diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml
new file mode 100644
index 000000000..7ef8b6c7f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/.travis.yml
@@ -0,0 +1,24 @@
+language: go
+
+go:
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
+
+go_import_path: google.golang.org/appengine
+
+install:
+ - go get -u -v $(go list -f '{{join .Imports "\n"}}{{"\n"}}{{join .TestImports "\n"}}' ./... | sort | uniq | grep -v appengine)
+ - mkdir /tmp/sdk
+ - curl -o /tmp/sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.40.zip"
+ - unzip -q /tmp/sdk.zip -d /tmp/sdk
+ - export PATH="$PATH:/tmp/sdk/go_appengine"
+ - export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py
+
+script:
+ - goapp version
+ - go version
+ - go test -v google.golang.org/appengine/...
+ - go test -v -race google.golang.org/appengine/...
+ - goapp test -v google.golang.org/appengine/...
diff --git a/vendor/google.golang.org/appengine/CONTRIBUTING.md b/vendor/google.golang.org/appengine/CONTRIBUTING.md
new file mode 100644
index 000000000..ffc298520
--- /dev/null
+++ b/vendor/google.golang.org/appengine/CONTRIBUTING.md
@@ -0,0 +1,90 @@
+# Contributing
+
+1. Sign one of the contributor license agreements below.
+1. Get the package:
+
+ `go get -d google.golang.org/appengine`
+1. Change into the checked out source:
+
+ `cd $GOPATH/src/google.golang.org/appengine`
+1. Fork the repo.
+1. Set your fork as a remote:
+
+ `git remote add fork git@github.com:GITHUB_USERNAME/appengine.git`
+1. Make changes, commit to your fork.
+1. Send a pull request with your changes.
+ The first line of your commit message is conventionally a one-line summary of the change, prefixed by the primary affected package, and is used as the title of your pull request.
+
+# Testing
+
+## Running system tests
+
+Download and install the [Go App Engine SDK](https://cloud.google.com/appengine/docs/go/download). Make sure the `go_appengine` dir is in your `PATH`.
+
+Set the `APPENGINE_DEV_APPSERVER` environment variable to `/path/to/go_appengine/dev_appserver.py`.
+
+Run tests with `goapp test`:
+
+```
+goapp test -v google.golang.org/appengine/...
+```
+
+## Contributor License Agreements
+
+Before we can accept your pull requests you'll need to sign a Contributor
+License Agreement (CLA):
+
+- **If you are an individual writing original source code** and **you own the
+intellectual property**, then you'll need to sign an [individual CLA][indvcla].
+- **If you work for a company that wants to allow you to contribute your work**,
+then you'll need to sign a [corporate CLA][corpcla].
+
+You can sign these electronically (just scroll to the bottom). After that,
+we'll be able to accept your pull requests.
+
+## Contributor Code of Conduct
+
+As contributors and maintainers of this project,
+and in the interest of fostering an open and welcoming community,
+we pledge to respect all people who contribute through reporting issues,
+posting feature requests, updating documentation,
+submitting pull requests or patches, and other activities.
+
+We are committed to making participation in this project
+a harassment-free experience for everyone,
+regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance,
+body size, race, ethnicity, age, religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery
+* Personal attacks
+* Trolling or insulting/derogatory comments
+* Public or private harassment
+* Publishing other's private information,
+such as physical or electronic
+addresses, without explicit permission
+* Other unethical or unprofessional conduct.
+
+Project maintainers have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct.
+By adopting this Code of Conduct,
+project maintainers commit themselves to fairly and consistently
+applying these principles to every aspect of managing this project.
+Project maintainers who do not follow or enforce the Code of Conduct
+may be permanently removed from the project team.
+
+This code of conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior
+may be reported by opening an issue
+or contacting one or more of the project maintainers.
+
+This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
+available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
+
+[indvcla]: https://developers.google.com/open-source/cla/individual
+[corpcla]: https://developers.google.com/open-source/cla/corporate
diff --git a/vendor/github.com/avct/uasurfer/LICENSE b/vendor/google.golang.org/appengine/LICENSE
index a092343b2..d64569567 100644
--- a/vendor/github.com/avct/uasurfer/LICENSE
+++ b/vendor/google.golang.org/appengine/LICENSE
@@ -3,9 +3,6 @@
Version 2.0, January 2004
http://www.apache.org/licenses/
- Copyright 2015 Avocet Systems Ltd.
- http://avocet.io/opensource
-
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
@@ -177,7 +174,20 @@
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
- Copyright 2015 Avocet Systems Ltd.
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -189,4 +199,4 @@
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
- limitations under the License. \ No newline at end of file
+ limitations under the License.
diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md
new file mode 100644
index 000000000..d86768a2c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/README.md
@@ -0,0 +1,73 @@
+# Go App Engine packages
+
+[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine)
+
+This repository supports the Go runtime on *App Engine standard*.
+It provides APIs for interacting with App Engine services.
+Its canonical import path is `google.golang.org/appengine`.
+
+See https://cloud.google.com/appengine/docs/go/
+for more information.
+
+File issue reports and feature requests on the [GitHub's issue
+tracker](https://github.com/golang/appengine/issues).
+
+## Upgrading an App Engine app to the flexible environment
+
+This package does not work on *App Engine flexible*.
+
+There are many differences between the App Engine standard environment and
+the flexible environment.
+
+See the [documentation on upgrading to the flexible environment](https://cloud.google.com/appengine/docs/flexible/go/upgrading).
+
+## Directory structure
+
+The top level directory of this repository is the `appengine` package. It
+contains the
+basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API
+packages are in subdirectories (e.g. `datastore`).
+
+There is an `internal` subdirectory that contains service protocol buffers,
+plus packages required for connectivity to make API calls. App Engine apps
+should not directly import any package under `internal`.
+
+## Updating from legacy (`import "appengine"`) packages
+
+If you're currently using the bare `appengine` packages
+(that is, not these ones, imported via `google.golang.org/appengine`),
+then you can use the `aefix` tool to help automate an upgrade to these packages.
+
+Run `go get google.golang.org/appengine/cmd/aefix` to install it.
+
+### 1. Update import paths
+
+The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`.
+You will need to update your code to use import paths starting with that; for instance,
+code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`.
+
+### 2. Update code using deprecated, removed or modified APIs
+
+Most App Engine services are available with exactly the same API.
+A few APIs were cleaned up, and there are some differences:
+
+* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`.
+* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`.
+* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead.
+* `appengine.Datacenter` now takes a `context.Context` argument.
+* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels.
+* `delay.Call` now returns an error.
+* `search.FieldLoadSaver` now handles document metadata.
+* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the
+ `context.Context` instead.
+* `aetest` no longer declares its own Context type, and uses the standard one instead.
+* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been
+ deprecated and unused for a long time.
+* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature.
+ Use `appengine.ModuleHostname`and `appengine.ModuleName` instead.
+* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated.
+ Use [Google Cloud Storage](https://godoc.org/cloud.google.com/go/storage) if the
+ feature you require is not present in the new
+ [blobstore package](https://google.golang.org/appengine/blobstore).
+* `appengine/socket` is not required on App Engine flexible environment / Managed VMs.
+ Use the standard `net` package instead.
diff --git a/vendor/google.golang.org/appengine/aetest/doc.go b/vendor/google.golang.org/appengine/aetest/doc.go
new file mode 100644
index 000000000..86ce8c2c0
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/doc.go
@@ -0,0 +1,42 @@
+/*
+Package aetest provides an API for running dev_appserver for use in tests.
+
+An example test file:
+
+ package foo_test
+
+ import (
+ "testing"
+
+ "google.golang.org/appengine/memcache"
+ "google.golang.org/appengine/aetest"
+ )
+
+ func TestFoo(t *testing.T) {
+ ctx, done, err := aetest.NewContext()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer done()
+
+ it := &memcache.Item{
+ Key: "some-key",
+ Value: []byte("some-value"),
+ }
+ err = memcache.Set(ctx, it)
+ if err != nil {
+ t.Fatalf("Set err: %v", err)
+ }
+ it, err = memcache.Get(ctx, "some-key")
+ if err != nil {
+ t.Fatalf("Get err: %v; want no error", err)
+ }
+ if g, w := string(it.Value), "some-value" ; g != w {
+ t.Errorf("retrieved Item.Value = %q, want %q", g, w)
+ }
+ }
+
+The environment variable APPENGINE_DEV_APPSERVER specifies the location of the
+dev_appserver.py executable to use. If unset, the system PATH is consulted.
+*/
+package aetest
diff --git a/vendor/google.golang.org/appengine/aetest/instance.go b/vendor/google.golang.org/appengine/aetest/instance.go
new file mode 100644
index 000000000..77323f751
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/instance.go
@@ -0,0 +1,55 @@
+package aetest
+
+import (
+ "io"
+ "net/http"
+ "time"
+
+ "golang.org/x/net/context"
+ "google.golang.org/appengine"
+)
+
+// Instance represents a running instance of the development API Server.
+type Instance interface {
+ // Close kills the child api_server.py process, releasing its resources.
+ io.Closer
+ // NewRequest returns an *http.Request associated with this instance.
+ NewRequest(method, urlStr string, body io.Reader) (*http.Request, error)
+}
+
+// Options is used to specify options when creating an Instance.
+type Options struct {
+ // AppID specifies the App ID to use during tests.
+ // By default, "testapp".
+ AppID string
+ // StronglyConsistentDatastore is whether the local datastore should be
+ // strongly consistent. This will diverge from production behaviour.
+ StronglyConsistentDatastore bool
+ // StartupTimeout is a duration to wait for instance startup.
+ // By default, 15 seconds.
+ StartupTimeout time.Duration
+}
+
+// NewContext starts an instance of the development API server, and returns
+// a context that will route all API calls to that server, as well as a
+// closure that must be called when the Context is no longer required.
+func NewContext() (context.Context, func(), error) {
+ inst, err := NewInstance(nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ req, err := inst.NewRequest("GET", "/", nil)
+ if err != nil {
+ inst.Close()
+ return nil, nil, err
+ }
+ ctx := appengine.NewContext(req)
+ return ctx, func() {
+ inst.Close()
+ }, nil
+}
+
+// PrepareDevAppserver is a hook which, if set, will be called before the
+// dev_appserver.py is started, each time it is started. If aetest.NewContext
+// is invoked from the goapp test tool, this hook is unnecessary.
+var PrepareDevAppserver func() error
diff --git a/vendor/google.golang.org/appengine/aetest/instance_classic.go b/vendor/google.golang.org/appengine/aetest/instance_classic.go
new file mode 100644
index 000000000..fbceaa505
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/instance_classic.go
@@ -0,0 +1,21 @@
+// +build appengine
+
+package aetest
+
+import "appengine/aetest"
+
+// NewInstance launches a running instance of api_server.py which can be used
+// for multiple test Contexts that delegate all App Engine API calls to that
+// instance.
+// If opts is nil the default values are used.
+func NewInstance(opts *Options) (Instance, error) {
+ aetest.PrepareDevAppserver = PrepareDevAppserver
+ var aeOpts *aetest.Options
+ if opts != nil {
+ aeOpts = &aetest.Options{
+ AppID: opts.AppID,
+ StronglyConsistentDatastore: opts.StronglyConsistentDatastore,
+ }
+ }
+ return aetest.NewInstance(aeOpts)
+}
diff --git a/vendor/google.golang.org/appengine/aetest/instance_test.go b/vendor/google.golang.org/appengine/aetest/instance_test.go
new file mode 100644
index 000000000..e7003afd9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/instance_test.go
@@ -0,0 +1,119 @@
+package aetest
+
+import (
+ "os"
+ "testing"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/datastore"
+ "google.golang.org/appengine/internal"
+ "google.golang.org/appengine/memcache"
+ "google.golang.org/appengine/user"
+)
+
+func TestBasicAPICalls(t *testing.T) {
+ // Only run the test if APPENGINE_DEV_APPSERVER is explicitly set.
+ if os.Getenv("APPENGINE_DEV_APPSERVER") == "" {
+ t.Skip("APPENGINE_DEV_APPSERVER not set")
+ }
+ resetEnv := internal.SetTestEnv()
+ defer resetEnv()
+
+ inst, err := NewInstance(nil)
+ if err != nil {
+ t.Fatalf("NewInstance: %v", err)
+ }
+ defer inst.Close()
+
+ req, err := inst.NewRequest("GET", "http://example.com/page", nil)
+ if err != nil {
+ t.Fatalf("NewRequest: %v", err)
+ }
+ ctx := appengine.NewContext(req)
+
+ it := &memcache.Item{
+ Key: "some-key",
+ Value: []byte("some-value"),
+ }
+ err = memcache.Set(ctx, it)
+ if err != nil {
+ t.Fatalf("Set err: %v", err)
+ }
+ it, err = memcache.Get(ctx, "some-key")
+ if err != nil {
+ t.Fatalf("Get err: %v; want no error", err)
+ }
+ if g, w := string(it.Value), "some-value"; g != w {
+ t.Errorf("retrieved Item.Value = %q, want %q", g, w)
+ }
+
+ type Entity struct{ Value string }
+ e := &Entity{Value: "foo"}
+ k := datastore.NewIncompleteKey(ctx, "Entity", nil)
+ k, err = datastore.Put(ctx, k, e)
+ if err != nil {
+ t.Fatalf("datastore.Put: %v", err)
+ }
+ e = new(Entity)
+ if err := datastore.Get(ctx, k, e); err != nil {
+ t.Fatalf("datastore.Get: %v", err)
+ }
+ if g, w := e.Value, "foo"; g != w {
+ t.Errorf("retrieved Entity.Value = %q, want %q", g, w)
+ }
+}
+
+func TestContext(t *testing.T) {
+ // Only run the test if APPENGINE_DEV_APPSERVER is explicitly set.
+ if os.Getenv("APPENGINE_DEV_APPSERVER") == "" {
+ t.Skip("APPENGINE_DEV_APPSERVER not set")
+ }
+
+ // Check that the context methods work.
+ _, done, err := NewContext()
+ if err != nil {
+ t.Fatalf("NewContext: %v", err)
+ }
+ done()
+}
+
+func TestUsers(t *testing.T) {
+ // Only run the test if APPENGINE_DEV_APPSERVER is explicitly set.
+ if os.Getenv("APPENGINE_DEV_APPSERVER") == "" {
+ t.Skip("APPENGINE_DEV_APPSERVER not set")
+ }
+
+ inst, err := NewInstance(nil)
+ if err != nil {
+ t.Fatalf("NewInstance: %v", err)
+ }
+ defer inst.Close()
+
+ req, err := inst.NewRequest("GET", "http://example.com/page", nil)
+ if err != nil {
+ t.Fatalf("NewRequest: %v", err)
+ }
+ ctx := appengine.NewContext(req)
+
+ if user := user.Current(ctx); user != nil {
+ t.Errorf("user.Current initially %v, want nil", user)
+ }
+
+ u := &user.User{
+ Email: "gopher@example.com",
+ Admin: true,
+ }
+ Login(u, req)
+
+ if got := user.Current(ctx); got.Email != u.Email {
+ t.Errorf("user.Current: %v, want %v", got, u)
+ }
+ if admin := user.IsAdmin(ctx); !admin {
+ t.Errorf("user.IsAdmin: %t, want true", admin)
+ }
+
+ Logout(req)
+ if user := user.Current(ctx); user != nil {
+ t.Errorf("user.Current after logout %v, want nil", user)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/aetest/instance_vm.go b/vendor/google.golang.org/appengine/aetest/instance_vm.go
new file mode 100644
index 000000000..829979000
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/instance_vm.go
@@ -0,0 +1,282 @@
+// +build !appengine
+
+package aetest
+
+import (
+ "bufio"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "time"
+
+ "golang.org/x/net/context"
+ "google.golang.org/appengine/internal"
+)
+
+// NewInstance launches a running instance of api_server.py which can be used
+// for multiple test Contexts that delegate all App Engine API calls to that
+// instance.
+// If opts is nil the default values are used.
+func NewInstance(opts *Options) (Instance, error) {
+ i := &instance{
+ opts: opts,
+ appID: "testapp",
+ startupTimeout: 15 * time.Second,
+ }
+ if opts != nil {
+ if opts.AppID != "" {
+ i.appID = opts.AppID
+ }
+ if opts.StartupTimeout > 0 {
+ i.startupTimeout = opts.StartupTimeout
+ }
+ }
+ if err := i.startChild(); err != nil {
+ return nil, err
+ }
+ return i, nil
+}
+
+func newSessionID() string {
+ var buf [16]byte
+ io.ReadFull(rand.Reader, buf[:])
+ return fmt.Sprintf("%x", buf[:])
+}
+
+// instance implements the Instance interface.
+type instance struct {
+ opts *Options
+ child *exec.Cmd
+ apiURL *url.URL // base URL of API HTTP server
+ adminURL string // base URL of admin HTTP server
+ appDir string
+ appID string
+ startupTimeout time.Duration
+ relFuncs []func() // funcs to release any associated contexts
+}
+
+// NewRequest returns an *http.Request associated with this instance.
+func (i *instance) NewRequest(method, urlStr string, body io.Reader) (*http.Request, error) {
+ req, err := http.NewRequest(method, urlStr, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Associate this request.
+ req, release := internal.RegisterTestRequest(req, i.apiURL, func(ctx context.Context) context.Context {
+ ctx = internal.WithAppIDOverride(ctx, "dev~"+i.appID)
+ return ctx
+ })
+ i.relFuncs = append(i.relFuncs, release)
+
+ return req, nil
+}
+
+// Close kills the child api_server.py process, releasing its resources.
+func (i *instance) Close() (err error) {
+ for _, rel := range i.relFuncs {
+ rel()
+ }
+ i.relFuncs = nil
+ child := i.child
+ if child == nil {
+ return nil
+ }
+ defer func() {
+ i.child = nil
+ err1 := os.RemoveAll(i.appDir)
+ if err == nil {
+ err = err1
+ }
+ }()
+
+ if p := child.Process; p != nil {
+ errc := make(chan error, 1)
+ go func() {
+ errc <- child.Wait()
+ }()
+
+ // Call the quit handler on the admin server.
+ res, err := http.Get(i.adminURL + "/quit")
+ if err != nil {
+ p.Kill()
+ return fmt.Errorf("unable to call /quit handler: %v", err)
+ }
+ res.Body.Close()
+ select {
+ case <-time.After(15 * time.Second):
+ p.Kill()
+ return errors.New("timeout killing child process")
+ case err = <-errc:
+ // Do nothing.
+ }
+ }
+ return
+}
+
+func fileExists(path string) bool {
+ _, err := os.Stat(path)
+ return err == nil
+}
+
+func findPython() (path string, err error) {
+ for _, name := range []string{"python2.7", "python"} {
+ path, err = exec.LookPath(name)
+ if err == nil {
+ return
+ }
+ }
+ return
+}
+
+func findDevAppserver() (string, error) {
+ if p := os.Getenv("APPENGINE_DEV_APPSERVER"); p != "" {
+ if fileExists(p) {
+ return p, nil
+ }
+ return "", fmt.Errorf("invalid APPENGINE_DEV_APPSERVER environment variable; path %q doesn't exist", p)
+ }
+ return exec.LookPath("dev_appserver.py")
+}
+
+var apiServerAddrRE = regexp.MustCompile(`Starting API server at: (\S+)`)
+var adminServerAddrRE = regexp.MustCompile(`Starting admin server at: (\S+)`)
+
+func (i *instance) startChild() (err error) {
+ if PrepareDevAppserver != nil {
+ if err := PrepareDevAppserver(); err != nil {
+ return err
+ }
+ }
+ python, err := findPython()
+ if err != nil {
+ return fmt.Errorf("Could not find python interpreter: %v", err)
+ }
+ devAppserver, err := findDevAppserver()
+ if err != nil {
+ return fmt.Errorf("Could not find dev_appserver.py: %v", err)
+ }
+
+ i.appDir, err = ioutil.TempDir("", "appengine-aetest")
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ os.RemoveAll(i.appDir)
+ }
+ }()
+ err = os.Mkdir(filepath.Join(i.appDir, "app"), 0755)
+ if err != nil {
+ return err
+ }
+ err = ioutil.WriteFile(filepath.Join(i.appDir, "app", "app.yaml"), []byte(i.appYAML()), 0644)
+ if err != nil {
+ return err
+ }
+ err = ioutil.WriteFile(filepath.Join(i.appDir, "app", "stubapp.go"), []byte(appSource), 0644)
+ if err != nil {
+ return err
+ }
+
+ appserverArgs := []string{
+ devAppserver,
+ "--port=0",
+ "--api_port=0",
+ "--admin_port=0",
+ "--automatic_restart=false",
+ "--skip_sdk_update_check=true",
+ "--clear_datastore=true",
+ "--clear_search_indexes=true",
+ "--datastore_path", filepath.Join(i.appDir, "datastore"),
+ }
+ if i.opts != nil && i.opts.StronglyConsistentDatastore {
+ appserverArgs = append(appserverArgs, "--datastore_consistency_policy=consistent")
+ }
+ appserverArgs = append(appserverArgs, filepath.Join(i.appDir, "app"))
+
+ i.child = exec.Command(python,
+ appserverArgs...,
+ )
+ i.child.Stdout = os.Stdout
+ var stderr io.Reader
+ stderr, err = i.child.StderrPipe()
+ if err != nil {
+ return err
+ }
+ stderr = io.TeeReader(stderr, os.Stderr)
+ if err = i.child.Start(); err != nil {
+ return err
+ }
+
+ // Read stderr until we have read the URLs of the API server and admin interface.
+ errc := make(chan error, 1)
+ go func() {
+ s := bufio.NewScanner(stderr)
+ for s.Scan() {
+ if match := apiServerAddrRE.FindStringSubmatch(s.Text()); match != nil {
+ u, err := url.Parse(match[1])
+ if err != nil {
+ errc <- fmt.Errorf("failed to parse API URL %q: %v", match[1], err)
+ return
+ }
+ i.apiURL = u
+ }
+ if match := adminServerAddrRE.FindStringSubmatch(s.Text()); match != nil {
+ i.adminURL = match[1]
+ }
+ if i.adminURL != "" && i.apiURL != nil {
+ break
+ }
+ }
+ errc <- s.Err()
+ }()
+
+ select {
+ case <-time.After(i.startupTimeout):
+ if p := i.child.Process; p != nil {
+ p.Kill()
+ }
+ return errors.New("timeout starting child process")
+ case err := <-errc:
+ if err != nil {
+ return fmt.Errorf("error reading child process stderr: %v", err)
+ }
+ }
+ if i.adminURL == "" {
+ return errors.New("unable to find admin server URL")
+ }
+ if i.apiURL == nil {
+ return errors.New("unable to find API server URL")
+ }
+ return nil
+}
+
+func (i *instance) appYAML() string {
+ return fmt.Sprintf(appYAMLTemplate, i.appID)
+}
+
+const appYAMLTemplate = `
+application: %s
+version: 1
+runtime: go
+api_version: go1
+
+handlers:
+- url: /.*
+ script: _go_app
+`
+
+const appSource = `
+package main
+import "google.golang.org/appengine"
+func main() { appengine.Main() }
+`
diff --git a/vendor/google.golang.org/appengine/aetest/user.go b/vendor/google.golang.org/appengine/aetest/user.go
new file mode 100644
index 000000000..bf9266f53
--- /dev/null
+++ b/vendor/google.golang.org/appengine/aetest/user.go
@@ -0,0 +1,36 @@
+package aetest
+
+import (
+ "hash/crc32"
+ "net/http"
+ "strconv"
+
+ "google.golang.org/appengine/user"
+)
+
+// Login causes the provided Request to act as though issued by the given user.
+func Login(u *user.User, req *http.Request) {
+ req.Header.Set("X-AppEngine-User-Email", u.Email)
+ id := u.ID
+ if id == "" {
+ id = strconv.Itoa(int(crc32.Checksum([]byte(u.Email), crc32.IEEETable)))
+ }
+ req.Header.Set("X-AppEngine-User-Id", id)
+ req.Header.Set("X-AppEngine-User-Federated-Identity", u.Email)
+ req.Header.Set("X-AppEngine-User-Federated-Provider", u.FederatedProvider)
+ if u.Admin {
+ req.Header.Set("X-AppEngine-User-Is-Admin", "1")
+ } else {
+ req.Header.Set("X-AppEngine-User-Is-Admin", "0")
+ }
+}
+
+// Logout causes the provided Request to act as though issued by a logged-out
+// user.
+func Logout(req *http.Request) {
+ req.Header.Del("X-AppEngine-User-Email")
+ req.Header.Del("X-AppEngine-User-Id")
+ req.Header.Del("X-AppEngine-User-Is-Admin")
+ req.Header.Del("X-AppEngine-User-Federated-Identity")
+ req.Header.Del("X-AppEngine-User-Federated-Provider")
+}
diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go
new file mode 100644
index 000000000..76dedc81d
--- /dev/null
+++ b/vendor/google.golang.org/appengine/appengine.go
@@ -0,0 +1,113 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package appengine provides basic functionality for Google App Engine.
+//
+// For more information on how to write Go apps for Google App Engine, see:
+// https://cloud.google.com/appengine/docs/go/
+package appengine // import "google.golang.org/appengine"
+
+import (
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// The gophers party all night; the rabbits provide the beats.
+
+// Main is the principal entry point for an app running in App Engine.
+//
+// On App Engine Flexible it installs a trivial health checker if one isn't
+// already registered, and starts listening on port 8080 (overridden by the
+// $PORT environment variable).
+//
+// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests
+// for details on how to do your own health checking.
+//
+// On App Engine Standard it ensures the server has started and is prepared to
+// receive requests.
+//
+// Main never returns.
+//
+// Main is designed so that the app's main package looks like this:
+//
+// package main
+//
+// import (
+// "google.golang.org/appengine"
+//
+// _ "myapp/package0"
+// _ "myapp/package1"
+// )
+//
+// func main() {
+// appengine.Main()
+// }
+//
+// The "myapp/packageX" packages are expected to register HTTP handlers
+// in their init functions.
+func Main() {
+ internal.Main()
+}
+
+// IsDevAppServer reports whether the App Engine app is running in the
+// development App Server.
+func IsDevAppServer() bool {
+ return internal.IsDevAppServer()
+}
+
+// NewContext returns a context for an in-flight HTTP request.
+// This function is cheap.
+func NewContext(req *http.Request) context.Context {
+ return internal.ReqContext(req)
+}
+
+// WithContext returns a copy of the parent context
+// and associates it with an in-flight HTTP request.
+// This function is cheap.
+func WithContext(parent context.Context, req *http.Request) context.Context {
+ return internal.WithContext(parent, req)
+}
+
+// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call.
+
+// BlobKey is a key for a blobstore blob.
+//
+// Conceptually, this type belongs in the blobstore package, but it lives in
+// the appengine package to avoid a circular dependency: blobstore depends on
+// datastore, and datastore needs to refer to the BlobKey type.
+type BlobKey string
+
+// GeoPoint represents a location as latitude/longitude in degrees.
+type GeoPoint struct {
+ Lat, Lng float64
+}
+
+// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude.
+func (g GeoPoint) Valid() bool {
+ return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180
+}
+
+// APICallFunc defines a function type for handling an API call.
+// See WithCallOverride.
+type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error
+
+// WithAPICallFunc returns a copy of the parent context
+// that will cause API calls to invoke f instead of their normal operation.
+//
+// This is intended for advanced users only.
+func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context {
+ return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f))
+}
+
+// APICall performs an API call.
+//
+// This is not intended for general use; it is exported for use in conjunction
+// with WithAPICallFunc.
+func APICall(ctx context.Context, service, method string, in, out proto.Message) error {
+ return internal.Call(ctx, service, method, in, out)
+}
diff --git a/vendor/google.golang.org/appengine/appengine_test.go b/vendor/google.golang.org/appengine/appengine_test.go
new file mode 100644
index 000000000..f1cf0a1b9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/appengine_test.go
@@ -0,0 +1,49 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+ "testing"
+)
+
+func TestValidGeoPoint(t *testing.T) {
+ testCases := []struct {
+ desc string
+ pt GeoPoint
+ want bool
+ }{
+ {
+ "valid",
+ GeoPoint{67.21, 13.37},
+ true,
+ },
+ {
+ "high lat",
+ GeoPoint{-90.01, 13.37},
+ false,
+ },
+ {
+ "low lat",
+ GeoPoint{90.01, 13.37},
+ false,
+ },
+ {
+ "high lng",
+ GeoPoint{67.21, 182},
+ false,
+ },
+ {
+ "low lng",
+ GeoPoint{67.21, -181},
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+ if got := tc.pt.Valid(); got != tc.want {
+ t.Errorf("%s: got %v, want %v", tc.desc, got, tc.want)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go
new file mode 100644
index 000000000..f4b645aad
--- /dev/null
+++ b/vendor/google.golang.org/appengine/appengine_vm.go
@@ -0,0 +1,20 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package appengine
+
+import (
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// BackgroundContext returns a context not associated with a request.
+// This should only be used when not servicing a request.
+// This only works in App Engine "flexible environment".
+func BackgroundContext() context.Context {
+ return internal.BackgroundContext()
+}
diff --git a/vendor/google.golang.org/appengine/blobstore/blobstore.go b/vendor/google.golang.org/appengine/blobstore/blobstore.go
new file mode 100644
index 000000000..1c8087b04
--- /dev/null
+++ b/vendor/google.golang.org/appengine/blobstore/blobstore.go
@@ -0,0 +1,276 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package blobstore provides a client for App Engine's persistent blob
+// storage service.
+package blobstore // import "google.golang.org/appengine/blobstore"
+
+import (
+ "bufio"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime"
+ "mime/multipart"
+ "net/http"
+ "net/textproto"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/datastore"
+ "google.golang.org/appengine/internal"
+
+ basepb "google.golang.org/appengine/internal/base"
+ blobpb "google.golang.org/appengine/internal/blobstore"
+)
+
+const (
+ blobInfoKind = "__BlobInfo__"
+ blobFileIndexKind = "__BlobFileIndex__"
+ zeroKey = appengine.BlobKey("")
+)
+
+// BlobInfo is the blob metadata that is stored in the datastore.
+// Filename may be empty.
+type BlobInfo struct {
+ BlobKey appengine.BlobKey
+ ContentType string `datastore:"content_type"`
+ CreationTime time.Time `datastore:"creation"`
+ Filename string `datastore:"filename"`
+ Size int64 `datastore:"size"`
+ MD5 string `datastore:"md5_hash"`
+
+ // ObjectName is the Google Cloud Storage name for this blob.
+ ObjectName string `datastore:"gs_object_name"`
+}
+
+// isErrFieldMismatch returns whether err is a datastore.ErrFieldMismatch.
+//
+// The blobstore stores blob metadata in the datastore. When loading that
+// metadata, it may contain fields that we don't care about. datastore.Get will
+// return datastore.ErrFieldMismatch in that case, so we ignore that specific
+// error.
+func isErrFieldMismatch(err error) bool {
+ _, ok := err.(*datastore.ErrFieldMismatch)
+ return ok
+}
+
+// Stat returns the BlobInfo for a provided blobKey. If no blob was found for
+// that key, Stat returns datastore.ErrNoSuchEntity.
+func Stat(c context.Context, blobKey appengine.BlobKey) (*BlobInfo, error) {
+ c, _ = appengine.Namespace(c, "") // Blobstore is always in the empty string namespace
+ dskey := datastore.NewKey(c, blobInfoKind, string(blobKey), 0, nil)
+ bi := &BlobInfo{
+ BlobKey: blobKey,
+ }
+ if err := datastore.Get(c, dskey, bi); err != nil && !isErrFieldMismatch(err) {
+ return nil, err
+ }
+ return bi, nil
+}
+
+// Send sets the headers on response to instruct App Engine to send a blob as
+// the response body. This is more efficient than reading and writing it out
+// manually and isn't subject to normal response size limits.
+func Send(response http.ResponseWriter, blobKey appengine.BlobKey) {
+ hdr := response.Header()
+ hdr.Set("X-AppEngine-BlobKey", string(blobKey))
+
+ if hdr.Get("Content-Type") == "" {
+ // This value is known to dev_appserver to mean automatic.
+ // In production this is remapped to the empty value which
+ // means automatic.
+ hdr.Set("Content-Type", "application/vnd.google.appengine.auto")
+ }
+}
+
+// UploadURL creates an upload URL for the form that the user will
+// fill out, passing the application path to load when the POST of the
+// form is completed. These URLs expire and should not be reused. The
+// opts parameter may be nil.
+func UploadURL(c context.Context, successPath string, opts *UploadURLOptions) (*url.URL, error) {
+ req := &blobpb.CreateUploadURLRequest{
+ SuccessPath: proto.String(successPath),
+ }
+ if opts != nil {
+ if n := opts.MaxUploadBytes; n != 0 {
+ req.MaxUploadSizeBytes = &n
+ }
+ if n := opts.MaxUploadBytesPerBlob; n != 0 {
+ req.MaxUploadSizePerBlobBytes = &n
+ }
+ if s := opts.StorageBucket; s != "" {
+ req.GsBucketName = &s
+ }
+ }
+ res := &blobpb.CreateUploadURLResponse{}
+ if err := internal.Call(c, "blobstore", "CreateUploadURL", req, res); err != nil {
+ return nil, err
+ }
+ return url.Parse(*res.Url)
+}
+
+// UploadURLOptions are the options to create an upload URL.
+type UploadURLOptions struct {
+ MaxUploadBytes int64 // optional
+ MaxUploadBytesPerBlob int64 // optional
+
+ // StorageBucket specifies the Google Cloud Storage bucket in which
+ // to store the blob.
+ // This is required if you use Cloud Storage instead of Blobstore.
+ // Your application must have permission to write to the bucket.
+ // You may optionally specify a bucket name and path in the format
+ // "bucket_name/path", in which case the included path will be the
+ // prefix of the uploaded object's name.
+ StorageBucket string
+}
+
+// Delete deletes a blob.
+func Delete(c context.Context, blobKey appengine.BlobKey) error {
+ return DeleteMulti(c, []appengine.BlobKey{blobKey})
+}
+
+// DeleteMulti deletes multiple blobs.
+func DeleteMulti(c context.Context, blobKey []appengine.BlobKey) error {
+ s := make([]string, len(blobKey))
+ for i, b := range blobKey {
+ s[i] = string(b)
+ }
+ req := &blobpb.DeleteBlobRequest{
+ BlobKey: s,
+ }
+ res := &basepb.VoidProto{}
+ if err := internal.Call(c, "blobstore", "DeleteBlob", req, res); err != nil {
+ return err
+ }
+ return nil
+}
+
+func errorf(format string, args ...interface{}) error {
+ return fmt.Errorf("blobstore: "+format, args...)
+}
+
+// ParseUpload parses the synthetic POST request that your app gets from
+// App Engine after a user's successful upload of blobs. Given the request,
+// ParseUpload returns a map of the blobs received (keyed by HTML form
+// element name) and other non-blob POST parameters.
+func ParseUpload(req *http.Request) (blobs map[string][]*BlobInfo, other url.Values, err error) {
+ _, params, err := mime.ParseMediaType(req.Header.Get("Content-Type"))
+ if err != nil {
+ return nil, nil, err
+ }
+ boundary := params["boundary"]
+ if boundary == "" {
+ return nil, nil, errorf("did not find MIME multipart boundary")
+ }
+
+ blobs = make(map[string][]*BlobInfo)
+ other = make(url.Values)
+
+ mreader := multipart.NewReader(io.MultiReader(req.Body, strings.NewReader("\r\n\r\n")), boundary)
+ for {
+ part, perr := mreader.NextPart()
+ if perr == io.EOF {
+ break
+ }
+ if perr != nil {
+ return nil, nil, errorf("error reading next mime part with boundary %q (len=%d): %v",
+ boundary, len(boundary), perr)
+ }
+
+ bi := &BlobInfo{}
+ ctype, params, err := mime.ParseMediaType(part.Header.Get("Content-Disposition"))
+ if err != nil {
+ return nil, nil, err
+ }
+ bi.Filename = params["filename"]
+ formKey := params["name"]
+
+ ctype, params, err = mime.ParseMediaType(part.Header.Get("Content-Type"))
+ if err != nil {
+ return nil, nil, err
+ }
+ bi.BlobKey = appengine.BlobKey(params["blob-key"])
+ if ctype != "message/external-body" || bi.BlobKey == "" {
+ if formKey != "" {
+ slurp, serr := ioutil.ReadAll(part)
+ if serr != nil {
+ return nil, nil, errorf("error reading %q MIME part", formKey)
+ }
+ other[formKey] = append(other[formKey], string(slurp))
+ }
+ continue
+ }
+
+ // App Engine sends a MIME header as the body of each MIME part.
+ tp := textproto.NewReader(bufio.NewReader(part))
+ header, mimeerr := tp.ReadMIMEHeader()
+ if mimeerr != nil {
+ return nil, nil, mimeerr
+ }
+ bi.Size, err = strconv.ParseInt(header.Get("Content-Length"), 10, 64)
+ if err != nil {
+ return nil, nil, err
+ }
+ bi.ContentType = header.Get("Content-Type")
+
+ // Parse the time from the MIME header like:
+ // X-AppEngine-Upload-Creation: 2011-03-15 21:38:34.712136
+ createDate := header.Get("X-AppEngine-Upload-Creation")
+ if createDate == "" {
+ return nil, nil, errorf("expected to find an X-AppEngine-Upload-Creation header")
+ }
+ bi.CreationTime, err = time.Parse("2006-01-02 15:04:05.000000", createDate)
+ if err != nil {
+ return nil, nil, errorf("error parsing X-AppEngine-Upload-Creation: %s", err)
+ }
+
+ if hdr := header.Get("Content-MD5"); hdr != "" {
+ md5, err := base64.URLEncoding.DecodeString(hdr)
+ if err != nil {
+ return nil, nil, errorf("bad Content-MD5 %q: %v", hdr, err)
+ }
+ bi.MD5 = string(md5)
+ }
+
+ // If the GCS object name was provided, record it.
+ bi.ObjectName = header.Get("X-AppEngine-Cloud-Storage-Object")
+
+ blobs[formKey] = append(blobs[formKey], bi)
+ }
+ return
+}
+
+// Reader is a blob reader.
+type Reader interface {
+ io.Reader
+ io.ReaderAt
+ io.Seeker
+}
+
+// NewReader returns a reader for a blob. It always succeeds; if the blob does
+// not exist then an error will be reported upon first read.
+func NewReader(c context.Context, blobKey appengine.BlobKey) Reader {
+ return openBlob(c, blobKey)
+}
+
+// BlobKeyForFile returns a BlobKey for a Google Storage file.
+// The filename should be of the form "/gs/bucket_name/object_name".
+func BlobKeyForFile(c context.Context, filename string) (appengine.BlobKey, error) {
+ req := &blobpb.CreateEncodedGoogleStorageKeyRequest{
+ Filename: &filename,
+ }
+ res := &blobpb.CreateEncodedGoogleStorageKeyResponse{}
+ if err := internal.Call(c, "blobstore", "CreateEncodedGoogleStorageKey", req, res); err != nil {
+ return "", err
+ }
+ return appengine.BlobKey(*res.BlobKey), nil
+}
diff --git a/vendor/google.golang.org/appengine/blobstore/blobstore_test.go b/vendor/google.golang.org/appengine/blobstore/blobstore_test.go
new file mode 100644
index 000000000..c2be7ef9b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/blobstore/blobstore_test.go
@@ -0,0 +1,183 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package blobstore
+
+import (
+ "io"
+ "os"
+ "strconv"
+ "strings"
+ "testing"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal/aetesting"
+
+ pb "google.golang.org/appengine/internal/blobstore"
+)
+
+const rbs = readBufferSize
+
+func min(x, y int) int {
+ if x < y {
+ return x
+ }
+ return y
+}
+
+func fakeFetchData(req *pb.FetchDataRequest, res *pb.FetchDataResponse) error {
+ i0 := int(*req.StartIndex)
+ i1 := int(*req.EndIndex + 1) // Blobstore's end-indices are inclusive; Go's are exclusive.
+ bk := *req.BlobKey
+ if i := strings.Index(bk, "."); i != -1 {
+ // Strip everything past the ".".
+ bk = bk[:i]
+ }
+ switch bk {
+ case "a14p":
+ const s = "abcdefghijklmnop"
+ i0 := min(len(s), i0)
+ i1 := min(len(s), i1)
+ res.Data = []byte(s[i0:i1])
+ case "longBlob":
+ res.Data = make([]byte, i1-i0)
+ for i := range res.Data {
+ res.Data[i] = 'A' + uint8(i0/rbs)
+ i0++
+ }
+ }
+ return nil
+}
+
+// step is one step of a readerTest.
+// It consists of a Reader method to call, the method arguments
+// (lenp, offset, whence) and the expected results.
+type step struct {
+ method string
+ lenp int
+ offset int64
+ whence int
+ want string
+ wantErr error
+}
+
+var readerTest = []struct {
+ blobKey string
+ step []step
+}{
+ {"noSuchBlobKey", []step{
+ {"Read", 8, 0, 0, "", io.EOF},
+ }},
+ {"a14p.0", []step{
+ // Test basic reads.
+ {"Read", 1, 0, 0, "a", nil},
+ {"Read", 3, 0, 0, "bcd", nil},
+ {"Read", 1, 0, 0, "e", nil},
+ {"Read", 2, 0, 0, "fg", nil},
+ // Test Seek.
+ {"Seek", 0, 2, os.SEEK_SET, "2", nil},
+ {"Read", 5, 0, 0, "cdefg", nil},
+ {"Seek", 0, 2, os.SEEK_CUR, "9", nil},
+ {"Read", 1, 0, 0, "j", nil},
+ // Test reads up to and past EOF.
+ {"Read", 5, 0, 0, "klmno", nil},
+ {"Read", 5, 0, 0, "p", nil},
+ {"Read", 5, 0, 0, "", io.EOF},
+ // Test ReadAt.
+ {"ReadAt", 4, 0, 0, "abcd", nil},
+ {"ReadAt", 4, 3, 0, "defg", nil},
+ {"ReadAt", 4, 12, 0, "mnop", nil},
+ {"ReadAt", 4, 13, 0, "nop", io.EOF},
+ {"ReadAt", 4, 99, 0, "", io.EOF},
+ }},
+ {"a14p.1", []step{
+ // Test Seek before any reads.
+ {"Seek", 0, 2, os.SEEK_SET, "2", nil},
+ {"Read", 1, 0, 0, "c", nil},
+ // Test that ReadAt doesn't affect the Read offset.
+ {"ReadAt", 3, 9, 0, "jkl", nil},
+ {"Read", 3, 0, 0, "def", nil},
+ }},
+ {"a14p.2", []step{
+ // Test ReadAt before any reads or seeks.
+ {"ReadAt", 2, 14, 0, "op", nil},
+ }},
+ {"longBlob.0", []step{
+ // Test basic read.
+ {"Read", 1, 0, 0, "A", nil},
+ // Test that Read returns early when the buffer is exhausted.
+ {"Seek", 0, rbs - 2, os.SEEK_SET, strconv.Itoa(rbs - 2), nil},
+ {"Read", 5, 0, 0, "AA", nil},
+ {"Read", 3, 0, 0, "BBB", nil},
+ // Test that what we just read is still in the buffer.
+ {"Seek", 0, rbs - 2, os.SEEK_SET, strconv.Itoa(rbs - 2), nil},
+ {"Read", 5, 0, 0, "AABBB", nil},
+ // Test ReadAt.
+ {"ReadAt", 3, rbs - 4, 0, "AAA", nil},
+ {"ReadAt", 6, rbs - 4, 0, "AAAABB", nil},
+ {"ReadAt", 8, rbs - 4, 0, "AAAABBBB", nil},
+ {"ReadAt", 5, rbs - 4, 0, "AAAAB", nil},
+ {"ReadAt", 2, rbs - 4, 0, "AA", nil},
+ // Test seeking backwards from the Read offset.
+ {"Seek", 0, 2*rbs - 8, os.SEEK_SET, strconv.Itoa(2*rbs - 8), nil},
+ {"Read", 1, 0, 0, "B", nil},
+ {"Read", 1, 0, 0, "B", nil},
+ {"Read", 1, 0, 0, "B", nil},
+ {"Read", 1, 0, 0, "B", nil},
+ {"Read", 8, 0, 0, "BBBBCCCC", nil},
+ }},
+ {"longBlob.1", []step{
+ // Test ReadAt with a slice larger than the buffer size.
+ {"LargeReadAt", 2*rbs - 2, 0, 0, strconv.Itoa(2*rbs - 2), nil},
+ {"LargeReadAt", 2*rbs - 1, 0, 0, strconv.Itoa(2*rbs - 1), nil},
+ {"LargeReadAt", 2*rbs + 0, 0, 0, strconv.Itoa(2*rbs + 0), nil},
+ {"LargeReadAt", 2*rbs + 1, 0, 0, strconv.Itoa(2*rbs + 1), nil},
+ {"LargeReadAt", 2*rbs + 2, 0, 0, strconv.Itoa(2*rbs + 2), nil},
+ {"LargeReadAt", 2*rbs - 2, 1, 0, strconv.Itoa(2*rbs - 2), nil},
+ {"LargeReadAt", 2*rbs - 1, 1, 0, strconv.Itoa(2*rbs - 1), nil},
+ {"LargeReadAt", 2*rbs + 0, 1, 0, strconv.Itoa(2*rbs + 0), nil},
+ {"LargeReadAt", 2*rbs + 1, 1, 0, strconv.Itoa(2*rbs + 1), nil},
+ {"LargeReadAt", 2*rbs + 2, 1, 0, strconv.Itoa(2*rbs + 2), nil},
+ }},
+}
+
+func TestReader(t *testing.T) {
+ for _, rt := range readerTest {
+ c := aetesting.FakeSingleContext(t, "blobstore", "FetchData", fakeFetchData)
+ r := NewReader(c, appengine.BlobKey(rt.blobKey))
+ for i, step := range rt.step {
+ var (
+ got string
+ gotErr error
+ n int
+ offset int64
+ )
+ switch step.method {
+ case "LargeReadAt":
+ p := make([]byte, step.lenp)
+ n, gotErr = r.ReadAt(p, step.offset)
+ got = strconv.Itoa(n)
+ case "Read":
+ p := make([]byte, step.lenp)
+ n, gotErr = r.Read(p)
+ got = string(p[:n])
+ case "ReadAt":
+ p := make([]byte, step.lenp)
+ n, gotErr = r.ReadAt(p, step.offset)
+ got = string(p[:n])
+ case "Seek":
+ offset, gotErr = r.Seek(step.offset, step.whence)
+ got = strconv.FormatInt(offset, 10)
+ default:
+ t.Fatalf("unknown method: %s", step.method)
+ }
+ if gotErr != step.wantErr {
+ t.Fatalf("%s step %d: got error %v want %v", rt.blobKey, i, gotErr, step.wantErr)
+ }
+ if got != step.want {
+ t.Fatalf("%s step %d: got %q want %q", rt.blobKey, i, got, step.want)
+ }
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/blobstore/read.go b/vendor/google.golang.org/appengine/blobstore/read.go
new file mode 100644
index 000000000..578b1f550
--- /dev/null
+++ b/vendor/google.golang.org/appengine/blobstore/read.go
@@ -0,0 +1,160 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package blobstore
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+
+ blobpb "google.golang.org/appengine/internal/blobstore"
+)
+
+// openBlob returns a reader for a blob. It always succeeds; if the blob does
+// not exist then an error will be reported upon first read.
+func openBlob(c context.Context, blobKey appengine.BlobKey) Reader {
+ return &reader{
+ c: c,
+ blobKey: blobKey,
+ }
+}
+
+const readBufferSize = 256 * 1024
+
+// reader is a blob reader. It implements the Reader interface.
+type reader struct {
+ c context.Context
+
+ // Either blobKey or filename is set:
+ blobKey appengine.BlobKey
+ filename string
+
+ closeFunc func() // is nil if unavailable or already closed.
+
+ // buf is the read buffer. r is how much of buf has been read.
+ // off is the offset of buf[0] relative to the start of the blob.
+ // An invariant is 0 <= r && r <= len(buf).
+ // Reads that don't require an RPC call will increment r but not off.
+ // Seeks may modify r without discarding the buffer, but only if the
+ // invariant can be maintained.
+ mu sync.Mutex
+ buf []byte
+ r int
+ off int64
+}
+
+func (r *reader) Close() error {
+ if f := r.closeFunc; f != nil {
+ f()
+ }
+ r.closeFunc = nil
+ return nil
+}
+
+func (r *reader) Read(p []byte) (int, error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.r == len(r.buf) {
+ if err := r.fetch(r.off + int64(r.r)); err != nil {
+ return 0, err
+ }
+ }
+ n := copy(p, r.buf[r.r:])
+ r.r += n
+ return n, nil
+}
+
+func (r *reader) ReadAt(p []byte, off int64) (int, error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ // Convert relative offsets to absolute offsets.
+ ab0 := r.off + int64(r.r)
+ ab1 := r.off + int64(len(r.buf))
+ ap0 := off
+ ap1 := off + int64(len(p))
+ // Check if we can satisfy the read entirely out of the existing buffer.
+ if r.off <= ap0 && ap1 <= ab1 {
+ // Convert off from an absolute offset to a relative offset.
+ rp0 := int(ap0 - r.off)
+ return copy(p, r.buf[rp0:]), nil
+ }
+ // Restore the original Read/Seek offset after ReadAt completes.
+ defer r.seek(ab0)
+ // Repeatedly fetch and copy until we have filled p.
+ n := 0
+ for len(p) > 0 {
+ if err := r.fetch(off + int64(n)); err != nil {
+ return n, err
+ }
+ r.r = copy(p, r.buf)
+ n += r.r
+ p = p[r.r:]
+ }
+ return n, nil
+}
+
+func (r *reader) Seek(offset int64, whence int) (ret int64, err error) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ switch whence {
+ case os.SEEK_SET:
+ ret = offset
+ case os.SEEK_CUR:
+ ret = r.off + int64(r.r) + offset
+ case os.SEEK_END:
+ return 0, errors.New("seeking relative to the end of a blob isn't supported")
+ default:
+ return 0, fmt.Errorf("invalid Seek whence value: %d", whence)
+ }
+ if ret < 0 {
+ return 0, errors.New("negative Seek offset")
+ }
+ return r.seek(ret)
+}
+
+// fetch fetches readBufferSize bytes starting at the given offset. On success,
+// the data is saved as r.buf.
+func (r *reader) fetch(off int64) error {
+ req := &blobpb.FetchDataRequest{
+ BlobKey: proto.String(string(r.blobKey)),
+ StartIndex: proto.Int64(off),
+ EndIndex: proto.Int64(off + readBufferSize - 1), // EndIndex is inclusive.
+ }
+ res := &blobpb.FetchDataResponse{}
+ if err := internal.Call(r.c, "blobstore", "FetchData", req, res); err != nil {
+ return err
+ }
+ if len(res.Data) == 0 {
+ return io.EOF
+ }
+ r.buf, r.r, r.off = res.Data, 0, off
+ return nil
+}
+
+// seek seeks to the given offset with an effective whence equal to SEEK_SET.
+// It discards the read buffer if the invariant cannot be maintained.
+func (r *reader) seek(off int64) (int64, error) {
+ delta := off - r.off
+ if delta >= 0 && delta < int64(len(r.buf)) {
+ r.r = int(delta)
+ return off, nil
+ }
+ r.buf, r.r, r.off = nil, 0, off
+ return off, nil
+}
diff --git a/vendor/google.golang.org/appengine/capability/capability.go b/vendor/google.golang.org/appengine/capability/capability.go
new file mode 100644
index 000000000..3a60bd55f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/capability/capability.go
@@ -0,0 +1,52 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package capability exposes information about outages and scheduled downtime
+for specific API capabilities.
+
+This package does not work in App Engine "flexible environment".
+
+Example:
+ if !capability.Enabled(c, "datastore_v3", "write") {
+ // show user a different page
+ }
+*/
+package capability // import "google.golang.org/appengine/capability"
+
+import (
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ "google.golang.org/appengine/log"
+
+ pb "google.golang.org/appengine/internal/capability"
+)
+
+// Enabled returns whether an API's capabilities are enabled.
+// The wildcard "*" capability matches every capability of an API.
+// If the underlying RPC fails (if the package is unknown, for example),
+// false is returned and information is written to the application log.
+func Enabled(ctx context.Context, api, capability string) bool {
+ req := &pb.IsEnabledRequest{
+ Package: &api,
+ Capability: []string{capability},
+ }
+ res := &pb.IsEnabledResponse{}
+ if err := internal.Call(ctx, "capability_service", "IsEnabled", req, res); err != nil {
+ log.Warningf(ctx, "capability.Enabled: RPC failed: %v", err)
+ return false
+ }
+ switch *res.SummaryStatus {
+ case pb.IsEnabledResponse_ENABLED,
+ pb.IsEnabledResponse_SCHEDULED_FUTURE,
+ pb.IsEnabledResponse_SCHEDULED_NOW:
+ return true
+ case pb.IsEnabledResponse_UNKNOWN:
+ log.Errorf(ctx, "capability.Enabled: unknown API capability %s/%s", api, capability)
+ return false
+ default:
+ return false
+ }
+}
diff --git a/vendor/google.golang.org/appengine/channel/channel.go b/vendor/google.golang.org/appengine/channel/channel.go
new file mode 100644
index 000000000..96945f6d6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/channel/channel.go
@@ -0,0 +1,87 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package channel implements the server side of App Engine's Channel API.
+
+Create creates a new channel associated with the given clientID,
+which must be unique to the client that will use the returned token.
+
+ token, err := channel.Create(c, "player1")
+ if err != nil {
+ // handle error
+ }
+ // return token to the client in an HTTP response
+
+Send sends a message to the client over the channel identified by clientID.
+
+ channel.Send(c, "player1", "Game over!")
+
+Deprecated: The Channel API feature has been deprecated and is going to be removed. See the Channel API Turndown document for details and timetable.
+
+https://cloud.google.com/appengine/docs/deprecations/channel
+*/
+package channel // import "google.golang.org/appengine/channel"
+
+import (
+ "encoding/json"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ basepb "google.golang.org/appengine/internal/base"
+ pb "google.golang.org/appengine/internal/channel"
+)
+
+// Create creates a channel and returns a token for use by the client.
+// The clientID is an application-provided string used to identify the client.
+func Create(c context.Context, clientID string) (token string, err error) {
+ req := &pb.CreateChannelRequest{
+ ApplicationKey: &clientID,
+ }
+ resp := &pb.CreateChannelResponse{}
+ err = internal.Call(c, service, "CreateChannel", req, resp)
+ token = resp.GetToken()
+ return token, remapError(err)
+}
+
+// Send sends a message on the channel associated with clientID.
+func Send(c context.Context, clientID, message string) error {
+ req := &pb.SendMessageRequest{
+ ApplicationKey: &clientID,
+ Message: &message,
+ }
+ resp := &basepb.VoidProto{}
+ return remapError(internal.Call(c, service, "SendChannelMessage", req, resp))
+}
+
+// SendJSON is a helper function that sends a JSON-encoded value
+// on the channel associated with clientID.
+func SendJSON(c context.Context, clientID string, value interface{}) error {
+ m, err := json.Marshal(value)
+ if err != nil {
+ return err
+ }
+ return Send(c, clientID, string(m))
+}
+
+// remapError fixes any APIError referencing "xmpp" into one referencing "channel".
+func remapError(err error) error {
+ if e, ok := err.(*internal.APIError); ok {
+ if e.Service == "xmpp" {
+ e.Service = "channel"
+ }
+ }
+ return err
+}
+
+var service = "xmpp" // prod
+
+func init() {
+ if appengine.IsDevAppServer() {
+ service = "channel" // dev
+ }
+ internal.RegisterErrorCodeMap("channel", pb.ChannelServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/channel/channel_test.go b/vendor/google.golang.org/appengine/channel/channel_test.go
new file mode 100644
index 000000000..c7498eb83
--- /dev/null
+++ b/vendor/google.golang.org/appengine/channel/channel_test.go
@@ -0,0 +1,21 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package channel
+
+import (
+ "testing"
+
+ "google.golang.org/appengine/internal"
+)
+
+func TestRemapError(t *testing.T) {
+ err := &internal.APIError{
+ Service: "xmpp",
+ }
+ err = remapError(err).(*internal.APIError)
+ if err.Service != "channel" {
+ t.Errorf("err.Service = %q, want %q", err.Service, "channel")
+ }
+}
diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql.go
new file mode 100644
index 000000000..7b27e6b12
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cloudsql/cloudsql.go
@@ -0,0 +1,62 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package cloudsql exposes access to Google Cloud SQL databases.
+
+This package does not work in App Engine "flexible environment".
+
+This package is intended for MySQL drivers to make App Engine-specific
+connections. Applications should use this package through database/sql:
+Select a pure Go MySQL driver that supports this package, and use sql.Open
+with protocol "cloudsql" and an address of the Cloud SQL instance.
+
+A Go MySQL driver that has been tested to work well with Cloud SQL
+is the go-sql-driver:
+ import "database/sql"
+ import _ "github.com/go-sql-driver/mysql"
+
+ db, err := sql.Open("mysql", "user@cloudsql(project-id:instance-name)/dbname")
+
+
+Another driver that works well with Cloud SQL is the mymysql driver:
+ import "database/sql"
+ import _ "github.com/ziutek/mymysql/godrv"
+
+ db, err := sql.Open("mymysql", "cloudsql:instance-name*dbname/user/password")
+
+
+Using either of these drivers, you can perform a standard SQL query.
+This example assumes there is a table named 'users' with
+columns 'first_name' and 'last_name':
+
+ rows, err := db.Query("SELECT first_name, last_name FROM users")
+ if err != nil {
+ log.Errorf(ctx, "db.Query: %v", err)
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ var firstName string
+ var lastName string
+ if err := rows.Scan(&firstName, &lastName); err != nil {
+ log.Errorf(ctx, "rows.Scan: %v", err)
+ continue
+ }
+ log.Infof(ctx, "First: %v - Last: %v", firstName, lastName)
+ }
+ if err := rows.Err(); err != nil {
+ log.Errorf(ctx, "Row error: %v", err)
+ }
+*/
+package cloudsql
+
+import (
+ "net"
+)
+
+// Dial connects to the named Cloud SQL instance.
+func Dial(instance string) (net.Conn, error) {
+ return connect(instance)
+}
diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go
new file mode 100644
index 000000000..af62dba14
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go
@@ -0,0 +1,17 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package cloudsql
+
+import (
+ "net"
+
+ "appengine/cloudsql"
+)
+
+func connect(instance string) (net.Conn, error) {
+ return cloudsql.Dial(instance)
+}
diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go
new file mode 100644
index 000000000..90fa7b31e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go
@@ -0,0 +1,16 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package cloudsql
+
+import (
+ "errors"
+ "net"
+)
+
+func connect(instance string) (net.Conn, error) {
+ return nil, errors.New(`cloudsql: not supported in App Engine "flexible environment"`)
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go b/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go
new file mode 100644
index 000000000..c66849e83
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go
@@ -0,0 +1,342 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Program aebundler turns a Go app into a fully self-contained tar file.
+// The app and its subdirectories (if any) are placed under "."
+// and the dependencies from $GOPATH are placed under ./_gopath/src.
+// A main func is synthesized if one does not exist.
+//
+// A sample Dockerfile to be used with this bundler could look like this:
+// FROM gcr.io/google-appengine/go-compat
+// ADD . /app
+// RUN GOPATH=/app/_gopath go build -tags appenginevm -o /app/_ah/exe
+package main
+
+import (
+ "archive/tar"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/token"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+var (
+ output = flag.String("o", "", "name of output tar file or '-' for stdout")
+ rootDir = flag.String("root", ".", "directory name of application root")
+ vm = flag.Bool("vm", true, `bundle an app for App Engine "flexible environment"`)
+
+ skipFiles = map[string]bool{
+ ".git": true,
+ ".gitconfig": true,
+ ".hg": true,
+ ".travis.yml": true,
+ }
+)
+
+const (
+ newMain = `package main
+import "google.golang.org/appengine"
+func main() {
+ appengine.Main()
+}
+`
+)
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+ fmt.Fprintf(os.Stderr, "\t%s -o <file.tar|->\tBundle app to named tar file or stdout\n", os.Args[0])
+ fmt.Fprintf(os.Stderr, "\noptional arguments:\n")
+ flag.PrintDefaults()
+}
+
+func main() {
+ flag.Usage = usage
+ flag.Parse()
+
+ var tags []string
+ if *vm {
+ tags = append(tags, "appenginevm")
+ } else {
+ tags = append(tags, "appengine")
+ }
+
+ tarFile := *output
+ if tarFile == "" {
+ usage()
+ errorf("Required -o flag not specified.")
+ }
+
+ app, err := analyze(tags)
+ if err != nil {
+ errorf("Error analyzing app: %v", err)
+ }
+ if err := app.bundle(tarFile); err != nil {
+ errorf("Unable to bundle app: %v", err)
+ }
+}
+
+// errorf prints the error message and exits.
+func errorf(format string, a ...interface{}) {
+ fmt.Fprintf(os.Stderr, "aebundler: "+format+"\n", a...)
+ os.Exit(1)
+}
+
+type app struct {
+ hasMain bool
+ appFiles []string
+ imports map[string]string
+}
+
+// analyze checks the app for building with the given build tags and returns hasMain,
+// app files, and a map of full directory import names to original import names.
+func analyze(tags []string) (*app, error) {
+ ctxt := buildContext(tags)
+ hasMain, appFiles, err := checkMain(ctxt)
+ if err != nil {
+ return nil, err
+ }
+ gopath := filepath.SplitList(ctxt.GOPATH)
+ im, err := imports(ctxt, *rootDir, gopath)
+ return &app{
+ hasMain: hasMain,
+ appFiles: appFiles,
+ imports: im,
+ }, err
+}
+
+// buildContext returns the context for building the source.
+func buildContext(tags []string) *build.Context {
+ return &build.Context{
+ GOARCH: build.Default.GOARCH,
+ GOOS: build.Default.GOOS,
+ GOROOT: build.Default.GOROOT,
+ GOPATH: build.Default.GOPATH,
+ Compiler: build.Default.Compiler,
+ BuildTags: append(build.Default.BuildTags, tags...),
+ }
+}
+
+// bundle bundles the app into the named tarFile ("-"==stdout).
+func (s *app) bundle(tarFile string) (err error) {
+ var out io.Writer
+ if tarFile == "-" {
+ out = os.Stdout
+ } else {
+ f, err := os.Create(tarFile)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if cerr := f.Close(); err == nil {
+ err = cerr
+ }
+ }()
+ out = f
+ }
+ tw := tar.NewWriter(out)
+
+ for srcDir, importName := range s.imports {
+ dstDir := "_gopath/src/" + importName
+ if err = copyTree(tw, dstDir, srcDir); err != nil {
+ return fmt.Errorf("unable to copy directory %v to %v: %v", srcDir, dstDir, err)
+ }
+ }
+ if err := copyTree(tw, ".", *rootDir); err != nil {
+ return fmt.Errorf("unable to copy root directory to /app: %v", err)
+ }
+ if !s.hasMain {
+ if err := synthesizeMain(tw, s.appFiles); err != nil {
+ return fmt.Errorf("unable to synthesize new main func: %v", err)
+ }
+ }
+
+ if err := tw.Close(); err != nil {
+ return fmt.Errorf("unable to close tar file %v: %v", tarFile, err)
+ }
+ return nil
+}
+
+// synthesizeMain generates a new main func and writes it to the tarball.
+func synthesizeMain(tw *tar.Writer, appFiles []string) error {
+ appMap := make(map[string]bool)
+ for _, f := range appFiles {
+ appMap[f] = true
+ }
+ var f string
+ for i := 0; i < 100; i++ {
+ f = fmt.Sprintf("app_main%d.go", i)
+ if !appMap[filepath.Join(*rootDir, f)] {
+ break
+ }
+ }
+ if appMap[filepath.Join(*rootDir, f)] {
+ return fmt.Errorf("unable to find unique name for %v", f)
+ }
+ hdr := &tar.Header{
+ Name: f,
+ Mode: 0644,
+ Size: int64(len(newMain)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ return fmt.Errorf("unable to write header for %v: %v", f, err)
+ }
+ if _, err := tw.Write([]byte(newMain)); err != nil {
+ return fmt.Errorf("unable to write %v to tar file: %v", f, err)
+ }
+ return nil
+}
+
+// imports returns a map of all import directories (recursively) used by the app.
+// The return value maps full directory names to original import names.
+func imports(ctxt *build.Context, srcDir string, gopath []string) (map[string]string, error) {
+ pkg, err := ctxt.ImportDir(srcDir, 0)
+ if err != nil {
+ return nil, fmt.Errorf("unable to analyze source: %v", err)
+ }
+
+ // Resolve all non-standard-library imports
+ result := make(map[string]string)
+ for _, v := range pkg.Imports {
+ if !strings.Contains(v, ".") {
+ continue
+ }
+ src, err := findInGopath(v, gopath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to find import %v in gopath %v: %v", v, gopath, err)
+ }
+ result[src] = v
+ im, err := imports(ctxt, src, gopath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse package %v: %v", src, err)
+ }
+ for k, v := range im {
+ result[k] = v
+ }
+ }
+ return result, nil
+}
+
+// findInGopath searches the gopath for the named import directory.
+func findInGopath(dir string, gopath []string) (string, error) {
+ for _, v := range gopath {
+ dst := filepath.Join(v, "src", dir)
+ if _, err := os.Stat(dst); err == nil {
+ return dst, nil
+ }
+ }
+ return "", fmt.Errorf("unable to find package %v in gopath %v", dir, gopath)
+}
+
+// copyTree copies srcDir to tar file dstDir, ignoring skipFiles.
+func copyTree(tw *tar.Writer, dstDir, srcDir string) error {
+ entries, err := ioutil.ReadDir(srcDir)
+ if err != nil {
+ return fmt.Errorf("unable to read dir %v: %v", srcDir, err)
+ }
+ for _, entry := range entries {
+ n := entry.Name()
+ if skipFiles[n] {
+ continue
+ }
+ s := filepath.Join(srcDir, n)
+ d := filepath.Join(dstDir, n)
+ if entry.IsDir() {
+ if err := copyTree(tw, d, s); err != nil {
+ return fmt.Errorf("unable to copy dir %v to %v: %v", s, d, err)
+ }
+ continue
+ }
+ if err := copyFile(tw, d, s); err != nil {
+ return fmt.Errorf("unable to copy dir %v to %v: %v", s, d, err)
+ }
+ }
+ return nil
+}
+
+// copyFile copies src to tar file dst.
+func copyFile(tw *tar.Writer, dst, src string) error {
+ s, err := os.Open(src)
+ if err != nil {
+ return fmt.Errorf("unable to open %v: %v", src, err)
+ }
+ defer s.Close()
+ fi, err := s.Stat()
+ if err != nil {
+ return fmt.Errorf("unable to stat %v: %v", src, err)
+ }
+
+ hdr, err := tar.FileInfoHeader(fi, dst)
+ if err != nil {
+ return fmt.Errorf("unable to create tar header for %v: %v", dst, err)
+ }
+ hdr.Name = dst
+ if err := tw.WriteHeader(hdr); err != nil {
+ return fmt.Errorf("unable to write header for %v: %v", dst, err)
+ }
+ _, err = io.Copy(tw, s)
+ if err != nil {
+ return fmt.Errorf("unable to copy %v to %v: %v", src, dst, err)
+ }
+ return nil
+}
+
+// checkMain verifies that there is a single "main" function.
+// It also returns a list of all Go source files in the app.
+func checkMain(ctxt *build.Context) (bool, []string, error) {
+ pkg, err := ctxt.ImportDir(*rootDir, 0)
+ if err != nil {
+ return false, nil, fmt.Errorf("unable to analyze source: %v", err)
+ }
+ if !pkg.IsCommand() {
+ errorf("Your app's package needs to be changed from %q to \"main\".\n", pkg.Name)
+ }
+ // Search for a "func main"
+ var hasMain bool
+ var appFiles []string
+ for _, f := range pkg.GoFiles {
+ n := filepath.Join(*rootDir, f)
+ appFiles = append(appFiles, n)
+ if hasMain, err = readFile(n); err != nil {
+ return false, nil, fmt.Errorf("error parsing %q: %v", n, err)
+ }
+ }
+ return hasMain, appFiles, nil
+}
+
+// isMain returns whether the given function declaration is a main function.
+// Such a function must be called "main", not have a receiver, and have no arguments or return types.
+func isMain(f *ast.FuncDecl) bool {
+ ft := f.Type
+ return f.Name.Name == "main" && f.Recv == nil && ft.Params.NumFields() == 0 && ft.Results.NumFields() == 0
+}
+
+// readFile reads and parses the Go source code file and returns whether it has a main function.
+func readFile(filename string) (hasMain bool, err error) {
+ var src []byte
+ src, err = ioutil.ReadFile(filename)
+ if err != nil {
+ return
+ }
+ fset := token.NewFileSet()
+ file, err := parser.ParseFile(fset, filename, src, 0)
+ for _, decl := range file.Decls {
+ funcDecl, ok := decl.(*ast.FuncDecl)
+ if !ok {
+ continue
+ }
+ if !isMain(funcDecl) {
+ continue
+ }
+ hasMain = true
+ break
+ }
+ return
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go b/vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go
new file mode 100644
index 000000000..8093c93ff
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go
@@ -0,0 +1,72 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Program aedeploy assists with deploying App Engine "flexible environment" Go apps to production.
+// A temporary directory is created; the app, its subdirectories, and all its
+// dependencies from $GOPATH are copied into the directory; then the app
+// is deployed to production with the provided command.
+//
+// The app must be in "package main".
+//
+// This command must be issued from within the root directory of the app
+// (where the app.yaml file is located).
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "os/exec"
+ "strings"
+)
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+ fmt.Fprintf(os.Stderr, "\t%s gcloud --verbosity debug app deploy --version myversion ./app.yaml\tDeploy app to production\n", os.Args[0])
+}
+
+var verbose bool
+
+// vlogf logs to stderr if the "-v" flag is provided.
+func vlogf(f string, v ...interface{}) {
+ if !verbose {
+ return
+ }
+ log.Printf("[aedeploy] "+f, v...)
+}
+
+func main() {
+ flag.BoolVar(&verbose, "v", false, "Verbose logging.")
+ flag.Usage = usage
+ flag.Parse()
+ if flag.NArg() < 1 {
+ usage()
+ os.Exit(1)
+ }
+
+ notice := func() {
+ fmt.Fprintln(os.Stderr, `NOTICE: aedeploy is deprecated. Just use "gcloud app deploy".`)
+ }
+
+ notice()
+ if err := deploy(); err != nil {
+ fmt.Fprintf(os.Stderr, os.Args[0]+": Error: %v\n", err)
+ notice()
+ fmt.Fprintln(os.Stderr, `You might need to update gcloud. Run "gcloud components update".`)
+ os.Exit(1)
+ }
+ notice() // Make sure they see it at the end.
+}
+
+// deploy calls the provided command to deploy the app from the temporary directory.
+func deploy() error {
+ vlogf("Running command %v", flag.Args())
+ cmd := exec.Command(flag.Arg(0), flag.Args()[1:]...)
+ cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
+ if err := cmd.Run(); err != nil {
+ return fmt.Errorf("unable to run %q: %v", strings.Join(flag.Args(), " "), err)
+ }
+ return nil
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aefix/ae.go b/vendor/google.golang.org/appengine/cmd/aefix/ae.go
new file mode 100644
index 000000000..0fe2d4ae9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aefix/ae.go
@@ -0,0 +1,185 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "go/ast"
+ "path"
+ "strconv"
+ "strings"
+)
+
+const (
+ ctxPackage = "golang.org/x/net/context"
+
+ newPackageBase = "google.golang.org/"
+ stutterPackage = false
+)
+
+func init() {
+ register(fix{
+ "ae",
+ "2016-04-15",
+ aeFn,
+ `Update old App Engine APIs to new App Engine APIs`,
+ })
+}
+
+// logMethod is the set of methods on appengine.Context used for logging.
+var logMethod = map[string]bool{
+ "Debugf": true,
+ "Infof": true,
+ "Warningf": true,
+ "Errorf": true,
+ "Criticalf": true,
+}
+
+// mapPackage turns "appengine" into "google.golang.org/appengine", etc.
+func mapPackage(s string) string {
+ if stutterPackage {
+ s += "/" + path.Base(s)
+ }
+ return newPackageBase + s
+}
+
+func aeFn(f *ast.File) bool {
+ // During the walk, we track the last thing seen that looks like
+ // an appengine.Context, and reset it once the walk leaves a func.
+ var lastContext *ast.Ident
+
+ fixed := false
+
+ // Update imports.
+ mainImp := "appengine"
+ for _, imp := range f.Imports {
+ pth, _ := strconv.Unquote(imp.Path.Value)
+ if pth == "appengine" || strings.HasPrefix(pth, "appengine/") {
+ newPth := mapPackage(pth)
+ imp.Path.Value = strconv.Quote(newPth)
+ fixed = true
+
+ if pth == "appengine" {
+ mainImp = newPth
+ }
+ }
+ }
+
+ // Update any API changes.
+ walk(f, func(n interface{}) {
+ if ft, ok := n.(*ast.FuncType); ok && ft.Params != nil {
+ // See if this func has an `appengine.Context arg`.
+ // If so, remember its identifier.
+ for _, param := range ft.Params.List {
+ if !isPkgDot(param.Type, "appengine", "Context") {
+ continue
+ }
+ if len(param.Names) == 1 {
+ lastContext = param.Names[0]
+ break
+ }
+ }
+ return
+ }
+
+ if as, ok := n.(*ast.AssignStmt); ok {
+ if len(as.Lhs) == 1 && len(as.Rhs) == 1 {
+ // If this node is an assignment from an appengine.NewContext invocation,
+ // remember the identifier on the LHS.
+ if isCall(as.Rhs[0], "appengine", "NewContext") {
+ if ident, ok := as.Lhs[0].(*ast.Ident); ok {
+ lastContext = ident
+ return
+ }
+ }
+ // x (=|:=) appengine.Timeout(y, z)
+ // should become
+ // x, _ (=|:=) context.WithTimeout(y, z)
+ if isCall(as.Rhs[0], "appengine", "Timeout") {
+ addImport(f, ctxPackage)
+ as.Lhs = append(as.Lhs, ast.NewIdent("_"))
+ // isCall already did the type checking.
+ sel := as.Rhs[0].(*ast.CallExpr).Fun.(*ast.SelectorExpr)
+ sel.X = ast.NewIdent("context")
+ sel.Sel = ast.NewIdent("WithTimeout")
+ fixed = true
+ return
+ }
+ }
+ return
+ }
+
+ // If this node is a FuncDecl, we've finished the function, so reset lastContext.
+ if _, ok := n.(*ast.FuncDecl); ok {
+ lastContext = nil
+ return
+ }
+
+ if call, ok := n.(*ast.CallExpr); ok {
+ if isPkgDot(call.Fun, "appengine", "Datacenter") && len(call.Args) == 0 {
+ insertContext(f, call, lastContext)
+ fixed = true
+ return
+ }
+ if isPkgDot(call.Fun, "taskqueue", "QueueStats") && len(call.Args) == 3 {
+ call.Args = call.Args[:2] // drop last arg
+ fixed = true
+ return
+ }
+
+ sel, ok := call.Fun.(*ast.SelectorExpr)
+ if !ok {
+ return
+ }
+ if lastContext != nil && refersTo(sel.X, lastContext) && logMethod[sel.Sel.Name] {
+ // c.Errorf(...)
+ // should become
+ // log.Errorf(c, ...)
+ addImport(f, mapPackage("appengine/log"))
+ sel.X = &ast.Ident{ // ast.NewIdent doesn't preserve the position.
+ NamePos: sel.X.Pos(),
+ Name: "log",
+ }
+ insertContext(f, call, lastContext)
+ fixed = true
+ return
+ }
+ }
+ })
+
+ // Change any `appengine.Context` to `context.Context`.
+ // Do this in a separate walk because the previous walk
+ // wants to identify "appengine.Context".
+ walk(f, func(n interface{}) {
+ expr, ok := n.(ast.Expr)
+ if ok && isPkgDot(expr, "appengine", "Context") {
+ addImport(f, ctxPackage)
+ // isPkgDot did the type checking.
+ n.(*ast.SelectorExpr).X.(*ast.Ident).Name = "context"
+ fixed = true
+ return
+ }
+ })
+
+ // The changes above might remove the need to import "appengine".
+ // Check if it's used, and drop it if it isn't.
+ if fixed && !usesImport(f, mainImp) {
+ deleteImport(f, mainImp)
+ }
+
+ return fixed
+}
+
+// ctx may be nil.
+func insertContext(f *ast.File, call *ast.CallExpr, ctx *ast.Ident) {
+ if ctx == nil {
+ // context is unknown, so use a plain "ctx".
+ ctx = ast.NewIdent("ctx")
+ } else {
+ // Create a fresh *ast.Ident so we drop the position information.
+ ctx = ast.NewIdent(ctx.Name)
+ }
+
+ call.Args = append([]ast.Expr{ctx}, call.Args...)
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aefix/ae_test.go b/vendor/google.golang.org/appengine/cmd/aefix/ae_test.go
new file mode 100644
index 000000000..21f5695b9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aefix/ae_test.go
@@ -0,0 +1,144 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package main
+
+func init() {
+ addTestCases(aeTests, nil)
+}
+
+var aeTests = []testCase{
+ // Collection of fixes:
+ // - imports
+ // - appengine.Timeout -> context.WithTimeout
+ // - add ctx arg to appengine.Datacenter
+ // - logging API
+ {
+ Name: "ae.0",
+ In: `package foo
+
+import (
+ "net/http"
+ "time"
+
+ "appengine"
+ "appengine/datastore"
+)
+
+func f(w http.ResponseWriter, r *http.Request) {
+ c := appengine.NewContext(r)
+
+ c = appengine.Timeout(c, 5*time.Second)
+ err := datastore.ErrNoSuchEntity
+ c.Errorf("Something interesting happened: %v", err)
+ _ = appengine.Datacenter()
+}
+`,
+ Out: `package foo
+
+import (
+ "net/http"
+ "time"
+
+ "golang.org/x/net/context"
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/datastore"
+ "google.golang.org/appengine/log"
+)
+
+func f(w http.ResponseWriter, r *http.Request) {
+ c := appengine.NewContext(r)
+
+ c, _ = context.WithTimeout(c, 5*time.Second)
+ err := datastore.ErrNoSuchEntity
+ log.Errorf(c, "Something interesting happened: %v", err)
+ _ = appengine.Datacenter(c)
+}
+`,
+ },
+
+ // Updating a function that takes an appengine.Context arg.
+ {
+ Name: "ae.1",
+ In: `package foo
+
+import (
+ "appengine"
+)
+
+func LogSomething(c2 appengine.Context) {
+ c2.Warningf("Stand back! I'm going to try science!")
+}
+`,
+ Out: `package foo
+
+import (
+ "golang.org/x/net/context"
+ "google.golang.org/appengine/log"
+)
+
+func LogSomething(c2 context.Context) {
+ log.Warningf(c2, "Stand back! I'm going to try science!")
+}
+`,
+ },
+
+ // Less widely used API changes:
+ // - drop maxTasks arg to taskqueue.QueueStats
+ {
+ Name: "ae.2",
+ In: `package foo
+
+import (
+ "appengine"
+ "appengine/taskqueue"
+)
+
+func f(ctx appengine.Context) {
+ stats, err := taskqueue.QueueStats(ctx, []string{"one", "two"}, 0)
+}
+`,
+ Out: `package foo
+
+import (
+ "golang.org/x/net/context"
+ "google.golang.org/appengine/taskqueue"
+)
+
+func f(ctx context.Context) {
+ stats, err := taskqueue.QueueStats(ctx, []string{"one", "two"})
+}
+`,
+ },
+
+ // Check that the main "appengine" import will not be dropped
+ // if an appengine.Context -> context.Context change happens
+ // but the appengine package is still referenced.
+ {
+ Name: "ae.3",
+ In: `package foo
+
+import (
+ "appengine"
+ "io"
+)
+
+func f(ctx appengine.Context, w io.Writer) {
+ _ = appengine.IsDevAppServer()
+}
+`,
+ Out: `package foo
+
+import (
+ "golang.org/x/net/context"
+ "google.golang.org/appengine"
+ "io"
+)
+
+func f(ctx context.Context, w io.Writer) {
+ _ = appengine.IsDevAppServer()
+}
+`,
+ },
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aefix/fix.go b/vendor/google.golang.org/appengine/cmd/aefix/fix.go
new file mode 100644
index 000000000..a100be794
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aefix/fix.go
@@ -0,0 +1,848 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "os"
+ "path"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+type fix struct {
+ name string
+ date string // date that fix was introduced, in YYYY-MM-DD format
+ f func(*ast.File) bool
+ desc string
+}
+
+// main runs sort.Sort(byName(fixes)) before printing list of fixes.
+type byName []fix
+
+func (f byName) Len() int { return len(f) }
+func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
+func (f byName) Less(i, j int) bool { return f[i].name < f[j].name }
+
+// main runs sort.Sort(byDate(fixes)) before applying fixes.
+type byDate []fix
+
+func (f byDate) Len() int { return len(f) }
+func (f byDate) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
+func (f byDate) Less(i, j int) bool { return f[i].date < f[j].date }
+
+var fixes []fix
+
+func register(f fix) {
+ fixes = append(fixes, f)
+}
+
+// walk traverses the AST x, calling visit(y) for each node y in the tree but
+// also with a pointer to each ast.Expr, ast.Stmt, and *ast.BlockStmt,
+// in a bottom-up traversal.
+func walk(x interface{}, visit func(interface{})) {
+ walkBeforeAfter(x, nop, visit)
+}
+
+func nop(interface{}) {}
+
+// walkBeforeAfter is like walk but calls before(x) before traversing
+// x's children and after(x) afterward.
+func walkBeforeAfter(x interface{}, before, after func(interface{})) {
+ before(x)
+
+ switch n := x.(type) {
+ default:
+ panic(fmt.Errorf("unexpected type %T in walkBeforeAfter", x))
+
+ case nil:
+
+ // pointers to interfaces
+ case *ast.Decl:
+ walkBeforeAfter(*n, before, after)
+ case *ast.Expr:
+ walkBeforeAfter(*n, before, after)
+ case *ast.Spec:
+ walkBeforeAfter(*n, before, after)
+ case *ast.Stmt:
+ walkBeforeAfter(*n, before, after)
+
+ // pointers to struct pointers
+ case **ast.BlockStmt:
+ walkBeforeAfter(*n, before, after)
+ case **ast.CallExpr:
+ walkBeforeAfter(*n, before, after)
+ case **ast.FieldList:
+ walkBeforeAfter(*n, before, after)
+ case **ast.FuncType:
+ walkBeforeAfter(*n, before, after)
+ case **ast.Ident:
+ walkBeforeAfter(*n, before, after)
+ case **ast.BasicLit:
+ walkBeforeAfter(*n, before, after)
+
+ // pointers to slices
+ case *[]ast.Decl:
+ walkBeforeAfter(*n, before, after)
+ case *[]ast.Expr:
+ walkBeforeAfter(*n, before, after)
+ case *[]*ast.File:
+ walkBeforeAfter(*n, before, after)
+ case *[]*ast.Ident:
+ walkBeforeAfter(*n, before, after)
+ case *[]ast.Spec:
+ walkBeforeAfter(*n, before, after)
+ case *[]ast.Stmt:
+ walkBeforeAfter(*n, before, after)
+
+ // These are ordered and grouped to match ../../pkg/go/ast/ast.go
+ case *ast.Field:
+ walkBeforeAfter(&n.Names, before, after)
+ walkBeforeAfter(&n.Type, before, after)
+ walkBeforeAfter(&n.Tag, before, after)
+ case *ast.FieldList:
+ for _, field := range n.List {
+ walkBeforeAfter(field, before, after)
+ }
+ case *ast.BadExpr:
+ case *ast.Ident:
+ case *ast.Ellipsis:
+ walkBeforeAfter(&n.Elt, before, after)
+ case *ast.BasicLit:
+ case *ast.FuncLit:
+ walkBeforeAfter(&n.Type, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+ case *ast.CompositeLit:
+ walkBeforeAfter(&n.Type, before, after)
+ walkBeforeAfter(&n.Elts, before, after)
+ case *ast.ParenExpr:
+ walkBeforeAfter(&n.X, before, after)
+ case *ast.SelectorExpr:
+ walkBeforeAfter(&n.X, before, after)
+ case *ast.IndexExpr:
+ walkBeforeAfter(&n.X, before, after)
+ walkBeforeAfter(&n.Index, before, after)
+ case *ast.SliceExpr:
+ walkBeforeAfter(&n.X, before, after)
+ if n.Low != nil {
+ walkBeforeAfter(&n.Low, before, after)
+ }
+ if n.High != nil {
+ walkBeforeAfter(&n.High, before, after)
+ }
+ case *ast.TypeAssertExpr:
+ walkBeforeAfter(&n.X, before, after)
+ walkBeforeAfter(&n.Type, before, after)
+ case *ast.CallExpr:
+ walkBeforeAfter(&n.Fun, before, after)
+ walkBeforeAfter(&n.Args, before, after)
+ case *ast.StarExpr:
+ walkBeforeAfter(&n.X, before, after)
+ case *ast.UnaryExpr:
+ walkBeforeAfter(&n.X, before, after)
+ case *ast.BinaryExpr:
+ walkBeforeAfter(&n.X, before, after)
+ walkBeforeAfter(&n.Y, before, after)
+ case *ast.KeyValueExpr:
+ walkBeforeAfter(&n.Key, before, after)
+ walkBeforeAfter(&n.Value, before, after)
+
+ case *ast.ArrayType:
+ walkBeforeAfter(&n.Len, before, after)
+ walkBeforeAfter(&n.Elt, before, after)
+ case *ast.StructType:
+ walkBeforeAfter(&n.Fields, before, after)
+ case *ast.FuncType:
+ walkBeforeAfter(&n.Params, before, after)
+ if n.Results != nil {
+ walkBeforeAfter(&n.Results, before, after)
+ }
+ case *ast.InterfaceType:
+ walkBeforeAfter(&n.Methods, before, after)
+ case *ast.MapType:
+ walkBeforeAfter(&n.Key, before, after)
+ walkBeforeAfter(&n.Value, before, after)
+ case *ast.ChanType:
+ walkBeforeAfter(&n.Value, before, after)
+
+ case *ast.BadStmt:
+ case *ast.DeclStmt:
+ walkBeforeAfter(&n.Decl, before, after)
+ case *ast.EmptyStmt:
+ case *ast.LabeledStmt:
+ walkBeforeAfter(&n.Stmt, before, after)
+ case *ast.ExprStmt:
+ walkBeforeAfter(&n.X, before, after)
+ case *ast.SendStmt:
+ walkBeforeAfter(&n.Chan, before, after)
+ walkBeforeAfter(&n.Value, before, after)
+ case *ast.IncDecStmt:
+ walkBeforeAfter(&n.X, before, after)
+ case *ast.AssignStmt:
+ walkBeforeAfter(&n.Lhs, before, after)
+ walkBeforeAfter(&n.Rhs, before, after)
+ case *ast.GoStmt:
+ walkBeforeAfter(&n.Call, before, after)
+ case *ast.DeferStmt:
+ walkBeforeAfter(&n.Call, before, after)
+ case *ast.ReturnStmt:
+ walkBeforeAfter(&n.Results, before, after)
+ case *ast.BranchStmt:
+ case *ast.BlockStmt:
+ walkBeforeAfter(&n.List, before, after)
+ case *ast.IfStmt:
+ walkBeforeAfter(&n.Init, before, after)
+ walkBeforeAfter(&n.Cond, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+ walkBeforeAfter(&n.Else, before, after)
+ case *ast.CaseClause:
+ walkBeforeAfter(&n.List, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+ case *ast.SwitchStmt:
+ walkBeforeAfter(&n.Init, before, after)
+ walkBeforeAfter(&n.Tag, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+ case *ast.TypeSwitchStmt:
+ walkBeforeAfter(&n.Init, before, after)
+ walkBeforeAfter(&n.Assign, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+ case *ast.CommClause:
+ walkBeforeAfter(&n.Comm, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+ case *ast.SelectStmt:
+ walkBeforeAfter(&n.Body, before, after)
+ case *ast.ForStmt:
+ walkBeforeAfter(&n.Init, before, after)
+ walkBeforeAfter(&n.Cond, before, after)
+ walkBeforeAfter(&n.Post, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+ case *ast.RangeStmt:
+ walkBeforeAfter(&n.Key, before, after)
+ walkBeforeAfter(&n.Value, before, after)
+ walkBeforeAfter(&n.X, before, after)
+ walkBeforeAfter(&n.Body, before, after)
+
+ case *ast.ImportSpec:
+ case *ast.ValueSpec:
+ walkBeforeAfter(&n.Type, before, after)
+ walkBeforeAfter(&n.Values, before, after)
+ walkBeforeAfter(&n.Names, before, after)
+ case *ast.TypeSpec:
+ walkBeforeAfter(&n.Type, before, after)
+
+ case *ast.BadDecl:
+ case *ast.GenDecl:
+ walkBeforeAfter(&n.Specs, before, after)
+ case *ast.FuncDecl:
+ if n.Recv != nil {
+ walkBeforeAfter(&n.Recv, before, after)
+ }
+ walkBeforeAfter(&n.Type, before, after)
+ if n.Body != nil {
+ walkBeforeAfter(&n.Body, before, after)
+ }
+
+ case *ast.File:
+ walkBeforeAfter(&n.Decls, before, after)
+
+ case *ast.Package:
+ walkBeforeAfter(&n.Files, before, after)
+
+ case []*ast.File:
+ for i := range n {
+ walkBeforeAfter(&n[i], before, after)
+ }
+ case []ast.Decl:
+ for i := range n {
+ walkBeforeAfter(&n[i], before, after)
+ }
+ case []ast.Expr:
+ for i := range n {
+ walkBeforeAfter(&n[i], before, after)
+ }
+ case []*ast.Ident:
+ for i := range n {
+ walkBeforeAfter(&n[i], before, after)
+ }
+ case []ast.Stmt:
+ for i := range n {
+ walkBeforeAfter(&n[i], before, after)
+ }
+ case []ast.Spec:
+ for i := range n {
+ walkBeforeAfter(&n[i], before, after)
+ }
+ }
+ after(x)
+}
+
+// imports returns true if f imports path.
+func imports(f *ast.File, path string) bool {
+ return importSpec(f, path) != nil
+}
+
+// importSpec returns the import spec if f imports path,
+// or nil otherwise.
+func importSpec(f *ast.File, path string) *ast.ImportSpec {
+ for _, s := range f.Imports {
+ if importPath(s) == path {
+ return s
+ }
+ }
+ return nil
+}
+
+// importPath returns the unquoted import path of s,
+// or "" if the path is not properly quoted.
+func importPath(s *ast.ImportSpec) string {
+ t, err := strconv.Unquote(s.Path.Value)
+ if err == nil {
+ return t
+ }
+ return ""
+}
+
+// declImports reports whether gen contains an import of path.
+func declImports(gen *ast.GenDecl, path string) bool {
+ if gen.Tok != token.IMPORT {
+ return false
+ }
+ for _, spec := range gen.Specs {
+ impspec := spec.(*ast.ImportSpec)
+ if importPath(impspec) == path {
+ return true
+ }
+ }
+ return false
+}
+
+// isPkgDot returns true if t is the expression "pkg.name"
+// where pkg is an imported identifier.
+func isPkgDot(t ast.Expr, pkg, name string) bool {
+ sel, ok := t.(*ast.SelectorExpr)
+ return ok && isTopName(sel.X, pkg) && sel.Sel.String() == name
+}
+
+// isPtrPkgDot returns true if f is the expression "*pkg.name"
+// where pkg is an imported identifier.
+func isPtrPkgDot(t ast.Expr, pkg, name string) bool {
+ ptr, ok := t.(*ast.StarExpr)
+ return ok && isPkgDot(ptr.X, pkg, name)
+}
+
+// isTopName returns true if n is a top-level unresolved identifier with the given name.
+func isTopName(n ast.Expr, name string) bool {
+ id, ok := n.(*ast.Ident)
+ return ok && id.Name == name && id.Obj == nil
+}
+
+// isName returns true if n is an identifier with the given name.
+func isName(n ast.Expr, name string) bool {
+ id, ok := n.(*ast.Ident)
+ return ok && id.String() == name
+}
+
+// isCall returns true if t is a call to pkg.name.
+func isCall(t ast.Expr, pkg, name string) bool {
+ call, ok := t.(*ast.CallExpr)
+ return ok && isPkgDot(call.Fun, pkg, name)
+}
+
+// If n is an *ast.Ident, isIdent returns it; otherwise isIdent returns nil.
+func isIdent(n interface{}) *ast.Ident {
+ id, _ := n.(*ast.Ident)
+ return id
+}
+
+// refersTo returns true if n is a reference to the same object as x.
+func refersTo(n ast.Node, x *ast.Ident) bool {
+ id, ok := n.(*ast.Ident)
+ // The test of id.Name == x.Name handles top-level unresolved
+ // identifiers, which all have Obj == nil.
+ return ok && id.Obj == x.Obj && id.Name == x.Name
+}
+
+// isBlank returns true if n is the blank identifier.
+func isBlank(n ast.Expr) bool {
+ return isName(n, "_")
+}
+
+// isEmptyString returns true if n is an empty string literal.
+func isEmptyString(n ast.Expr) bool {
+ lit, ok := n.(*ast.BasicLit)
+ return ok && lit.Kind == token.STRING && len(lit.Value) == 2
+}
+
+func warn(pos token.Pos, msg string, args ...interface{}) {
+ if pos.IsValid() {
+ msg = "%s: " + msg
+ arg1 := []interface{}{fset.Position(pos).String()}
+ args = append(arg1, args...)
+ }
+ fmt.Fprintf(os.Stderr, msg+"\n", args...)
+}
+
+// countUses returns the number of uses of the identifier x in scope.
+func countUses(x *ast.Ident, scope []ast.Stmt) int {
+ count := 0
+ ff := func(n interface{}) {
+ if n, ok := n.(ast.Node); ok && refersTo(n, x) {
+ count++
+ }
+ }
+ for _, n := range scope {
+ walk(n, ff)
+ }
+ return count
+}
+
+// rewriteUses replaces all uses of the identifier x and !x in scope
+// with f(x.Pos()) and fnot(x.Pos()).
+func rewriteUses(x *ast.Ident, f, fnot func(token.Pos) ast.Expr, scope []ast.Stmt) {
+ var lastF ast.Expr
+ ff := func(n interface{}) {
+ ptr, ok := n.(*ast.Expr)
+ if !ok {
+ return
+ }
+ nn := *ptr
+
+ // The child node was just walked and possibly replaced.
+ // If it was replaced and this is a negation, replace with fnot(p).
+ not, ok := nn.(*ast.UnaryExpr)
+ if ok && not.Op == token.NOT && not.X == lastF {
+ *ptr = fnot(nn.Pos())
+ return
+ }
+ if refersTo(nn, x) {
+ lastF = f(nn.Pos())
+ *ptr = lastF
+ }
+ }
+ for _, n := range scope {
+ walk(n, ff)
+ }
+}
+
+// assignsTo returns true if any of the code in scope assigns to or takes the address of x.
+func assignsTo(x *ast.Ident, scope []ast.Stmt) bool {
+ assigned := false
+ ff := func(n interface{}) {
+ if assigned {
+ return
+ }
+ switch n := n.(type) {
+ case *ast.UnaryExpr:
+ // use of &x
+ if n.Op == token.AND && refersTo(n.X, x) {
+ assigned = true
+ return
+ }
+ case *ast.AssignStmt:
+ for _, l := range n.Lhs {
+ if refersTo(l, x) {
+ assigned = true
+ return
+ }
+ }
+ }
+ }
+ for _, n := range scope {
+ if assigned {
+ break
+ }
+ walk(n, ff)
+ }
+ return assigned
+}
+
+// newPkgDot returns an ast.Expr referring to "pkg.name" at position pos.
+func newPkgDot(pos token.Pos, pkg, name string) ast.Expr {
+ return &ast.SelectorExpr{
+ X: &ast.Ident{
+ NamePos: pos,
+ Name: pkg,
+ },
+ Sel: &ast.Ident{
+ NamePos: pos,
+ Name: name,
+ },
+ }
+}
+
+// renameTop renames all references to the top-level name old.
+// It returns true if it makes any changes.
+func renameTop(f *ast.File, old, new string) bool {
+ var fixed bool
+
+ // Rename any conflicting imports
+ // (assuming package name is last element of path).
+ for _, s := range f.Imports {
+ if s.Name != nil {
+ if s.Name.Name == old {
+ s.Name.Name = new
+ fixed = true
+ }
+ } else {
+ _, thisName := path.Split(importPath(s))
+ if thisName == old {
+ s.Name = ast.NewIdent(new)
+ fixed = true
+ }
+ }
+ }
+
+ // Rename any top-level declarations.
+ for _, d := range f.Decls {
+ switch d := d.(type) {
+ case *ast.FuncDecl:
+ if d.Recv == nil && d.Name.Name == old {
+ d.Name.Name = new
+ d.Name.Obj.Name = new
+ fixed = true
+ }
+ case *ast.GenDecl:
+ for _, s := range d.Specs {
+ switch s := s.(type) {
+ case *ast.TypeSpec:
+ if s.Name.Name == old {
+ s.Name.Name = new
+ s.Name.Obj.Name = new
+ fixed = true
+ }
+ case *ast.ValueSpec:
+ for _, n := range s.Names {
+ if n.Name == old {
+ n.Name = new
+ n.Obj.Name = new
+ fixed = true
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Rename top-level old to new, both unresolved names
+ // (probably defined in another file) and names that resolve
+ // to a declaration we renamed.
+ walk(f, func(n interface{}) {
+ id, ok := n.(*ast.Ident)
+ if ok && isTopName(id, old) {
+ id.Name = new
+ fixed = true
+ }
+ if ok && id.Obj != nil && id.Name == old && id.Obj.Name == new {
+ id.Name = id.Obj.Name
+ fixed = true
+ }
+ })
+
+ return fixed
+}
+
+// matchLen returns the length of the longest prefix shared by x and y.
+func matchLen(x, y string) int {
+ i := 0
+ for i < len(x) && i < len(y) && x[i] == y[i] {
+ i++
+ }
+ return i
+}
+
+// addImport adds the import path to the file f, if absent.
+func addImport(f *ast.File, ipath string) (added bool) {
+ if imports(f, ipath) {
+ return false
+ }
+
+ // Determine name of import.
+ // Assume added imports follow convention of using last element.
+ _, name := path.Split(ipath)
+
+ // Rename any conflicting top-level references from name to name_.
+ renameTop(f, name, name+"_")
+
+ newImport := &ast.ImportSpec{
+ Path: &ast.BasicLit{
+ Kind: token.STRING,
+ Value: strconv.Quote(ipath),
+ },
+ }
+
+ // Find an import decl to add to.
+ var (
+ bestMatch = -1
+ lastImport = -1
+ impDecl *ast.GenDecl
+ impIndex = -1
+ )
+ for i, decl := range f.Decls {
+ gen, ok := decl.(*ast.GenDecl)
+ if ok && gen.Tok == token.IMPORT {
+ lastImport = i
+ // Do not add to import "C", to avoid disrupting the
+ // association with its doc comment, breaking cgo.
+ if declImports(gen, "C") {
+ continue
+ }
+
+ // Compute longest shared prefix with imports in this block.
+ for j, spec := range gen.Specs {
+ impspec := spec.(*ast.ImportSpec)
+ n := matchLen(importPath(impspec), ipath)
+ if n > bestMatch {
+ bestMatch = n
+ impDecl = gen
+ impIndex = j
+ }
+ }
+ }
+ }
+
+ // If no import decl found, add one after the last import.
+ if impDecl == nil {
+ impDecl = &ast.GenDecl{
+ Tok: token.IMPORT,
+ }
+ f.Decls = append(f.Decls, nil)
+ copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:])
+ f.Decls[lastImport+1] = impDecl
+ }
+
+ // Ensure the import decl has parentheses, if needed.
+ if len(impDecl.Specs) > 0 && !impDecl.Lparen.IsValid() {
+ impDecl.Lparen = impDecl.Pos()
+ }
+
+ insertAt := impIndex + 1
+ if insertAt == 0 {
+ insertAt = len(impDecl.Specs)
+ }
+ impDecl.Specs = append(impDecl.Specs, nil)
+ copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:])
+ impDecl.Specs[insertAt] = newImport
+ if insertAt > 0 {
+ // Assign same position as the previous import,
+ // so that the sorter sees it as being in the same block.
+ prev := impDecl.Specs[insertAt-1]
+ newImport.Path.ValuePos = prev.Pos()
+ newImport.EndPos = prev.Pos()
+ }
+
+ f.Imports = append(f.Imports, newImport)
+ return true
+}
+
+// deleteImport deletes the import path from the file f, if present.
+func deleteImport(f *ast.File, path string) (deleted bool) {
+ oldImport := importSpec(f, path)
+
+ // Find the import node that imports path, if any.
+ for i, decl := range f.Decls {
+ gen, ok := decl.(*ast.GenDecl)
+ if !ok || gen.Tok != token.IMPORT {
+ continue
+ }
+ for j, spec := range gen.Specs {
+ impspec := spec.(*ast.ImportSpec)
+ if oldImport != impspec {
+ continue
+ }
+
+ // We found an import spec that imports path.
+ // Delete it.
+ deleted = true
+ copy(gen.Specs[j:], gen.Specs[j+1:])
+ gen.Specs = gen.Specs[:len(gen.Specs)-1]
+
+ // If this was the last import spec in this decl,
+ // delete the decl, too.
+ if len(gen.Specs) == 0 {
+ copy(f.Decls[i:], f.Decls[i+1:])
+ f.Decls = f.Decls[:len(f.Decls)-1]
+ } else if len(gen.Specs) == 1 {
+ gen.Lparen = token.NoPos // drop parens
+ }
+ if j > 0 {
+ // We deleted an entry but now there will be
+ // a blank line-sized hole where the import was.
+ // Close the hole by making the previous
+ // import appear to "end" where this one did.
+ gen.Specs[j-1].(*ast.ImportSpec).EndPos = impspec.End()
+ }
+ break
+ }
+ }
+
+ // Delete it from f.Imports.
+ for i, imp := range f.Imports {
+ if imp == oldImport {
+ copy(f.Imports[i:], f.Imports[i+1:])
+ f.Imports = f.Imports[:len(f.Imports)-1]
+ break
+ }
+ }
+
+ return
+}
+
+// rewriteImport rewrites any import of path oldPath to path newPath.
+func rewriteImport(f *ast.File, oldPath, newPath string) (rewrote bool) {
+ for _, imp := range f.Imports {
+ if importPath(imp) == oldPath {
+ rewrote = true
+ // record old End, because the default is to compute
+ // it using the length of imp.Path.Value.
+ imp.EndPos = imp.End()
+ imp.Path.Value = strconv.Quote(newPath)
+ }
+ }
+ return
+}
+
+func usesImport(f *ast.File, path string) (used bool) {
+ spec := importSpec(f, path)
+ if spec == nil {
+ return
+ }
+
+ name := spec.Name.String()
+ switch name {
+ case "<nil>":
+ // If the package name is not explicitly specified,
+ // make an educated guess. This is not guaranteed to be correct.
+ lastSlash := strings.LastIndex(path, "/")
+ if lastSlash == -1 {
+ name = path
+ } else {
+ name = path[lastSlash+1:]
+ }
+ case "_", ".":
+ // Not sure if this import is used - err on the side of caution.
+ return true
+ }
+
+ walk(f, func(n interface{}) {
+ sel, ok := n.(*ast.SelectorExpr)
+ if ok && isTopName(sel.X, name) {
+ used = true
+ }
+ })
+
+ return
+}
+
+func expr(s string) ast.Expr {
+ x, err := parser.ParseExpr(s)
+ if err != nil {
+ panic("parsing " + s + ": " + err.Error())
+ }
+ // Remove position information to avoid spurious newlines.
+ killPos(reflect.ValueOf(x))
+ return x
+}
+
+var posType = reflect.TypeOf(token.Pos(0))
+
+func killPos(v reflect.Value) {
+ switch v.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ if !v.IsNil() {
+ killPos(v.Elem())
+ }
+ case reflect.Slice:
+ n := v.Len()
+ for i := 0; i < n; i++ {
+ killPos(v.Index(i))
+ }
+ case reflect.Struct:
+ n := v.NumField()
+ for i := 0; i < n; i++ {
+ f := v.Field(i)
+ if f.Type() == posType {
+ f.SetInt(0)
+ continue
+ }
+ killPos(f)
+ }
+ }
+}
+
+// A Rename describes a single renaming.
+type rename struct {
+ OldImport string // only apply rename if this import is present
+ NewImport string // add this import during rewrite
+ Old string // old name: p.T or *p.T
+ New string // new name: p.T or *p.T
+}
+
+func renameFix(tab []rename) func(*ast.File) bool {
+ return func(f *ast.File) bool {
+ return renameFixTab(f, tab)
+ }
+}
+
+func parseName(s string) (ptr bool, pkg, nam string) {
+ i := strings.Index(s, ".")
+ if i < 0 {
+ panic("parseName: invalid name " + s)
+ }
+ if strings.HasPrefix(s, "*") {
+ ptr = true
+ s = s[1:]
+ i--
+ }
+ pkg = s[:i]
+ nam = s[i+1:]
+ return
+}
+
+func renameFixTab(f *ast.File, tab []rename) bool {
+ fixed := false
+ added := map[string]bool{}
+ check := map[string]bool{}
+ for _, t := range tab {
+ if !imports(f, t.OldImport) {
+ continue
+ }
+ optr, opkg, onam := parseName(t.Old)
+ walk(f, func(n interface{}) {
+ np, ok := n.(*ast.Expr)
+ if !ok {
+ return
+ }
+ x := *np
+ if optr {
+ p, ok := x.(*ast.StarExpr)
+ if !ok {
+ return
+ }
+ x = p.X
+ }
+ if !isPkgDot(x, opkg, onam) {
+ return
+ }
+ if t.NewImport != "" && !added[t.NewImport] {
+ addImport(f, t.NewImport)
+ added[t.NewImport] = true
+ }
+ *np = expr(t.New)
+ check[t.OldImport] = true
+ fixed = true
+ })
+ }
+
+ for ipath := range check {
+ if !usesImport(f, ipath) {
+ deleteImport(f, ipath)
+ }
+ }
+ return fixed
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aefix/main.go b/vendor/google.golang.org/appengine/cmd/aefix/main.go
new file mode 100644
index 000000000..8e193a6ad
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aefix/main.go
@@ -0,0 +1,258 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/scanner"
+ "go/token"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "sort"
+ "strings"
+)
+
+var (
+ fset = token.NewFileSet()
+ exitCode = 0
+)
+
+var allowedRewrites = flag.String("r", "",
+ "restrict the rewrites to this comma-separated list")
+
+var forceRewrites = flag.String("force", "",
+ "force these fixes to run even if the code looks updated")
+
+var allowed, force map[string]bool
+
+var doDiff = flag.Bool("diff", false, "display diffs instead of rewriting files")
+
+// enable for debugging fix failures
+const debug = false // display incorrectly reformatted source and exit
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: aefix [-diff] [-r fixname,...] [-force fixname,...] [path ...]\n")
+ flag.PrintDefaults()
+ fmt.Fprintf(os.Stderr, "\nAvailable rewrites are:\n")
+ sort.Sort(byName(fixes))
+ for _, f := range fixes {
+ fmt.Fprintf(os.Stderr, "\n%s\n", f.name)
+ desc := strings.TrimSpace(f.desc)
+ desc = strings.Replace(desc, "\n", "\n\t", -1)
+ fmt.Fprintf(os.Stderr, "\t%s\n", desc)
+ }
+ os.Exit(2)
+}
+
+func main() {
+ flag.Usage = usage
+ flag.Parse()
+
+ sort.Sort(byDate(fixes))
+
+ if *allowedRewrites != "" {
+ allowed = make(map[string]bool)
+ for _, f := range strings.Split(*allowedRewrites, ",") {
+ allowed[f] = true
+ }
+ }
+
+ if *forceRewrites != "" {
+ force = make(map[string]bool)
+ for _, f := range strings.Split(*forceRewrites, ",") {
+ force[f] = true
+ }
+ }
+
+ if flag.NArg() == 0 {
+ if err := processFile("standard input", true); err != nil {
+ report(err)
+ }
+ os.Exit(exitCode)
+ }
+
+ for i := 0; i < flag.NArg(); i++ {
+ path := flag.Arg(i)
+ switch dir, err := os.Stat(path); {
+ case err != nil:
+ report(err)
+ case dir.IsDir():
+ walkDir(path)
+ default:
+ if err := processFile(path, false); err != nil {
+ report(err)
+ }
+ }
+ }
+
+ os.Exit(exitCode)
+}
+
+const parserMode = parser.ParseComments
+
+func gofmtFile(f *ast.File) ([]byte, error) {
+ var buf bytes.Buffer
+ if err := format.Node(&buf, fset, f); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func processFile(filename string, useStdin bool) error {
+ var f *os.File
+ var err error
+ var fixlog bytes.Buffer
+
+ if useStdin {
+ f = os.Stdin
+ } else {
+ f, err = os.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ }
+
+ src, err := ioutil.ReadAll(f)
+ if err != nil {
+ return err
+ }
+
+ file, err := parser.ParseFile(fset, filename, src, parserMode)
+ if err != nil {
+ return err
+ }
+
+ // Apply all fixes to file.
+ newFile := file
+ fixed := false
+ for _, fix := range fixes {
+ if allowed != nil && !allowed[fix.name] {
+ continue
+ }
+ if fix.f(newFile) {
+ fixed = true
+ fmt.Fprintf(&fixlog, " %s", fix.name)
+
+ // AST changed.
+ // Print and parse, to update any missing scoping
+ // or position information for subsequent fixers.
+ newSrc, err := gofmtFile(newFile)
+ if err != nil {
+ return err
+ }
+ newFile, err = parser.ParseFile(fset, filename, newSrc, parserMode)
+ if err != nil {
+ if debug {
+ fmt.Printf("%s", newSrc)
+ report(err)
+ os.Exit(exitCode)
+ }
+ return err
+ }
+ }
+ }
+ if !fixed {
+ return nil
+ }
+ fmt.Fprintf(os.Stderr, "%s: fixed %s\n", filename, fixlog.String()[1:])
+
+ // Print AST. We did that after each fix, so this appears
+ // redundant, but it is necessary to generate gofmt-compatible
+ // source code in a few cases. The official gofmt style is the
+ // output of the printer run on a standard AST generated by the parser,
+ // but the source we generated inside the loop above is the
+ // output of the printer run on a mangled AST generated by a fixer.
+ newSrc, err := gofmtFile(newFile)
+ if err != nil {
+ return err
+ }
+
+ if *doDiff {
+ data, err := diff(src, newSrc)
+ if err != nil {
+ return fmt.Errorf("computing diff: %s", err)
+ }
+ fmt.Printf("diff %s fixed/%s\n", filename, filename)
+ os.Stdout.Write(data)
+ return nil
+ }
+
+ if useStdin {
+ os.Stdout.Write(newSrc)
+ return nil
+ }
+
+ return ioutil.WriteFile(f.Name(), newSrc, 0)
+}
+
+var gofmtBuf bytes.Buffer
+
+func gofmt(n interface{}) string {
+ gofmtBuf.Reset()
+ if err := format.Node(&gofmtBuf, fset, n); err != nil {
+ return "<" + err.Error() + ">"
+ }
+ return gofmtBuf.String()
+}
+
+func report(err error) {
+ scanner.PrintError(os.Stderr, err)
+ exitCode = 2
+}
+
+func walkDir(path string) {
+ filepath.Walk(path, visitFile)
+}
+
+func visitFile(path string, f os.FileInfo, err error) error {
+ if err == nil && isGoFile(f) {
+ err = processFile(path, false)
+ }
+ if err != nil {
+ report(err)
+ }
+ return nil
+}
+
+func isGoFile(f os.FileInfo) bool {
+ // ignore non-Go files
+ name := f.Name()
+ return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go")
+}
+
+func diff(b1, b2 []byte) (data []byte, err error) {
+ f1, err := ioutil.TempFile("", "go-fix")
+ if err != nil {
+ return nil, err
+ }
+ defer os.Remove(f1.Name())
+ defer f1.Close()
+
+ f2, err := ioutil.TempFile("", "go-fix")
+ if err != nil {
+ return nil, err
+ }
+ defer os.Remove(f2.Name())
+ defer f2.Close()
+
+ f1.Write(b1)
+ f2.Write(b2)
+
+ data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput()
+ if len(data) > 0 {
+ // diff exits with a non-zero status when the files don't match.
+ // Ignore that failure as long as we get output.
+ err = nil
+ }
+ return
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aefix/main_test.go b/vendor/google.golang.org/appengine/cmd/aefix/main_test.go
new file mode 100644
index 000000000..2151bf29e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aefix/main_test.go
@@ -0,0 +1,129 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "go/ast"
+ "go/parser"
+ "strings"
+ "testing"
+)
+
+type testCase struct {
+ Name string
+ Fn func(*ast.File) bool
+ In string
+ Out string
+}
+
+var testCases []testCase
+
+func addTestCases(t []testCase, fn func(*ast.File) bool) {
+ // Fill in fn to avoid repetition in definitions.
+ if fn != nil {
+ for i := range t {
+ if t[i].Fn == nil {
+ t[i].Fn = fn
+ }
+ }
+ }
+ testCases = append(testCases, t...)
+}
+
+func fnop(*ast.File) bool { return false }
+
+func parseFixPrint(t *testing.T, fn func(*ast.File) bool, desc, in string, mustBeGofmt bool) (out string, fixed, ok bool) {
+ file, err := parser.ParseFile(fset, desc, in, parserMode)
+ if err != nil {
+ t.Errorf("%s: parsing: %v", desc, err)
+ return
+ }
+
+ outb, err := gofmtFile(file)
+ if err != nil {
+ t.Errorf("%s: printing: %v", desc, err)
+ return
+ }
+ if s := string(outb); in != s && mustBeGofmt {
+ t.Errorf("%s: not gofmt-formatted.\n--- %s\n%s\n--- %s | gofmt\n%s",
+ desc, desc, in, desc, s)
+ tdiff(t, in, s)
+ return
+ }
+
+ if fn == nil {
+ for _, fix := range fixes {
+ if fix.f(file) {
+ fixed = true
+ }
+ }
+ } else {
+ fixed = fn(file)
+ }
+
+ outb, err = gofmtFile(file)
+ if err != nil {
+ t.Errorf("%s: printing: %v", desc, err)
+ return
+ }
+
+ return string(outb), fixed, true
+}
+
+func TestRewrite(t *testing.T) {
+ for _, tt := range testCases {
+ // Apply fix: should get tt.Out.
+ out, fixed, ok := parseFixPrint(t, tt.Fn, tt.Name, tt.In, true)
+ if !ok {
+ continue
+ }
+
+ // reformat to get printing right
+ out, _, ok = parseFixPrint(t, fnop, tt.Name, out, false)
+ if !ok {
+ continue
+ }
+
+ if out != tt.Out {
+ t.Errorf("%s: incorrect output.\n", tt.Name)
+ if !strings.HasPrefix(tt.Name, "testdata/") {
+ t.Errorf("--- have\n%s\n--- want\n%s", out, tt.Out)
+ }
+ tdiff(t, out, tt.Out)
+ continue
+ }
+
+ if changed := out != tt.In; changed != fixed {
+ t.Errorf("%s: changed=%v != fixed=%v", tt.Name, changed, fixed)
+ continue
+ }
+
+ // Should not change if run again.
+ out2, fixed2, ok := parseFixPrint(t, tt.Fn, tt.Name+" output", out, true)
+ if !ok {
+ continue
+ }
+
+ if fixed2 {
+ t.Errorf("%s: applied fixes during second round", tt.Name)
+ continue
+ }
+
+ if out2 != out {
+ t.Errorf("%s: changed output after second round of fixes.\n--- output after first round\n%s\n--- output after second round\n%s",
+ tt.Name, out, out2)
+ tdiff(t, out, out2)
+ }
+ }
+}
+
+func tdiff(t *testing.T, a, b string) {
+ data, err := diff([]byte(a), []byte(b))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ t.Error(string(data))
+}
diff --git a/vendor/google.golang.org/appengine/cmd/aefix/typecheck.go b/vendor/google.golang.org/appengine/cmd/aefix/typecheck.go
new file mode 100644
index 000000000..d54d37547
--- /dev/null
+++ b/vendor/google.golang.org/appengine/cmd/aefix/typecheck.go
@@ -0,0 +1,673 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "os"
+ "reflect"
+ "strings"
+)
+
+// Partial type checker.
+//
+// The fact that it is partial is very important: the input is
+// an AST and a description of some type information to
+// assume about one or more packages, but not all the
+// packages that the program imports. The checker is
+// expected to do as much as it can with what it has been
+// given. There is not enough information supplied to do
+// a full type check, but the type checker is expected to
+// apply information that can be derived from variable
+// declarations, function and method returns, and type switches
+// as far as it can, so that the caller can still tell the types
+// of expression relevant to a particular fix.
+//
+// TODO(rsc,gri): Replace with go/typechecker.
+// Doing that could be an interesting test case for go/typechecker:
+// the constraints about working with partial information will
+// likely exercise it in interesting ways. The ideal interface would
+// be to pass typecheck a map from importpath to package API text
+// (Go source code), but for now we use data structures (TypeConfig, Type).
+//
+// The strings mostly use gofmt form.
+//
+// A Field or FieldList has as its type a comma-separated list
+// of the types of the fields. For example, the field list
+// x, y, z int
+// has type "int, int, int".
+
+// The prefix "type " is the type of a type.
+// For example, given
+// var x int
+// type T int
+// x's type is "int" but T's type is "type int".
+// mkType inserts the "type " prefix.
+// getType removes it.
+// isType tests for it.
+
+func mkType(t string) string {
+ return "type " + t
+}
+
+func getType(t string) string {
+ if !isType(t) {
+ return ""
+ }
+ return t[len("type "):]
+}
+
+func isType(t string) bool {
+ return strings.HasPrefix(t, "type ")
+}
+
+// TypeConfig describes the universe of relevant types.
+// For ease of creation, the types are all referred to by string
+// name (e.g., "reflect.Value"). TypeByName is the only place
+// where the strings are resolved.
+
+type TypeConfig struct {
+ Type map[string]*Type
+ Var map[string]string
+ Func map[string]string
+}
+
+// typeof returns the type of the given name, which may be of
+// the form "x" or "p.X".
+func (cfg *TypeConfig) typeof(name string) string {
+ if cfg.Var != nil {
+ if t := cfg.Var[name]; t != "" {
+ return t
+ }
+ }
+ if cfg.Func != nil {
+ if t := cfg.Func[name]; t != "" {
+ return "func()" + t
+ }
+ }
+ return ""
+}
+
+// Type describes the Fields and Methods of a type.
+// If the field or method cannot be found there, it is next
+// looked for in the Embed list.
+type Type struct {
+ Field map[string]string // map field name to type
+ Method map[string]string // map method name to comma-separated return types (should start with "func ")
+ Embed []string // list of types this type embeds (for extra methods)
+ Def string // definition of named type
+}
+
+// dot returns the type of "typ.name", making its decision
+// using the type information in cfg.
+func (typ *Type) dot(cfg *TypeConfig, name string) string {
+ if typ.Field != nil {
+ if t := typ.Field[name]; t != "" {
+ return t
+ }
+ }
+ if typ.Method != nil {
+ if t := typ.Method[name]; t != "" {
+ return t
+ }
+ }
+
+ for _, e := range typ.Embed {
+ etyp := cfg.Type[e]
+ if etyp != nil {
+ if t := etyp.dot(cfg, name); t != "" {
+ return t
+ }
+ }
+ }
+
+ return ""
+}
+
+// typecheck type checks the AST f assuming the information in cfg.
+// It returns two maps with type information:
+// typeof maps AST nodes to type information in gofmt string form.
+// assign maps type strings to lists of expressions that were assigned
+// to values of another type that were assigned to that type.
+func typecheck(cfg *TypeConfig, f *ast.File) (typeof map[interface{}]string, assign map[string][]interface{}) {
+ typeof = make(map[interface{}]string)
+ assign = make(map[string][]interface{})
+ cfg1 := &TypeConfig{}
+ *cfg1 = *cfg // make copy so we can add locally
+ copied := false
+
+ // gather function declarations
+ for _, decl := range f.Decls {
+ fn, ok := decl.(*ast.FuncDecl)
+ if !ok {
+ continue
+ }
+ typecheck1(cfg, fn.Type, typeof, assign)
+ t := typeof[fn.Type]
+ if fn.Recv != nil {
+ // The receiver must be a type.
+ rcvr := typeof[fn.Recv]
+ if !isType(rcvr) {
+ if len(fn.Recv.List) != 1 {
+ continue
+ }
+ rcvr = mkType(gofmt(fn.Recv.List[0].Type))
+ typeof[fn.Recv.List[0].Type] = rcvr
+ }
+ rcvr = getType(rcvr)
+ if rcvr != "" && rcvr[0] == '*' {
+ rcvr = rcvr[1:]
+ }
+ typeof[rcvr+"."+fn.Name.Name] = t
+ } else {
+ if isType(t) {
+ t = getType(t)
+ } else {
+ t = gofmt(fn.Type)
+ }
+ typeof[fn.Name] = t
+
+ // Record typeof[fn.Name.Obj] for future references to fn.Name.
+ typeof[fn.Name.Obj] = t
+ }
+ }
+
+ // gather struct declarations
+ for _, decl := range f.Decls {
+ d, ok := decl.(*ast.GenDecl)
+ if ok {
+ for _, s := range d.Specs {
+ switch s := s.(type) {
+ case *ast.TypeSpec:
+ if cfg1.Type[s.Name.Name] != nil {
+ break
+ }
+ if !copied {
+ copied = true
+ // Copy map lazily: it's time.
+ cfg1.Type = make(map[string]*Type)
+ for k, v := range cfg.Type {
+ cfg1.Type[k] = v
+ }
+ }
+ t := &Type{Field: map[string]string{}}
+ cfg1.Type[s.Name.Name] = t
+ switch st := s.Type.(type) {
+ case *ast.StructType:
+ for _, f := range st.Fields.List {
+ for _, n := range f.Names {
+ t.Field[n.Name] = gofmt(f.Type)
+ }
+ }
+ case *ast.ArrayType, *ast.StarExpr, *ast.MapType:
+ t.Def = gofmt(st)
+ }
+ }
+ }
+ }
+ }
+
+ typecheck1(cfg1, f, typeof, assign)
+ return typeof, assign
+}
+
+func makeExprList(a []*ast.Ident) []ast.Expr {
+ var b []ast.Expr
+ for _, x := range a {
+ b = append(b, x)
+ }
+ return b
+}
+
+// Typecheck1 is the recursive form of typecheck.
+// It is like typecheck but adds to the information in typeof
+// instead of allocating a new map.
+func typecheck1(cfg *TypeConfig, f interface{}, typeof map[interface{}]string, assign map[string][]interface{}) {
+ // set sets the type of n to typ.
+ // If isDecl is true, n is being declared.
+ set := func(n ast.Expr, typ string, isDecl bool) {
+ if typeof[n] != "" || typ == "" {
+ if typeof[n] != typ {
+ assign[typ] = append(assign[typ], n)
+ }
+ return
+ }
+ typeof[n] = typ
+
+ // If we obtained typ from the declaration of x
+ // propagate the type to all the uses.
+ // The !isDecl case is a cheat here, but it makes
+ // up in some cases for not paying attention to
+ // struct fields. The real type checker will be
+ // more accurate so we won't need the cheat.
+ if id, ok := n.(*ast.Ident); ok && id.Obj != nil && (isDecl || typeof[id.Obj] == "") {
+ typeof[id.Obj] = typ
+ }
+ }
+
+ // Type-check an assignment lhs = rhs.
+ // If isDecl is true, this is := so we can update
+ // the types of the objects that lhs refers to.
+ typecheckAssign := func(lhs, rhs []ast.Expr, isDecl bool) {
+ if len(lhs) > 1 && len(rhs) == 1 {
+ if _, ok := rhs[0].(*ast.CallExpr); ok {
+ t := split(typeof[rhs[0]])
+ // Lists should have same length but may not; pair what can be paired.
+ for i := 0; i < len(lhs) && i < len(t); i++ {
+ set(lhs[i], t[i], isDecl)
+ }
+ return
+ }
+ }
+ if len(lhs) == 1 && len(rhs) == 2 {
+ // x = y, ok
+ rhs = rhs[:1]
+ } else if len(lhs) == 2 && len(rhs) == 1 {
+ // x, ok = y
+ lhs = lhs[:1]
+ }
+
+ // Match as much as we can.
+ for i := 0; i < len(lhs) && i < len(rhs); i++ {
+ x, y := lhs[i], rhs[i]
+ if typeof[y] != "" {
+ set(x, typeof[y], isDecl)
+ } else {
+ set(y, typeof[x], false)
+ }
+ }
+ }
+
+ expand := func(s string) string {
+ typ := cfg.Type[s]
+ if typ != nil && typ.Def != "" {
+ return typ.Def
+ }
+ return s
+ }
+
+ // The main type check is a recursive algorithm implemented
+ // by walkBeforeAfter(n, before, after).
+ // Most of it is bottom-up, but in a few places we need
+ // to know the type of the function we are checking.
+ // The before function records that information on
+ // the curfn stack.
+ var curfn []*ast.FuncType
+
+ before := func(n interface{}) {
+ // push function type on stack
+ switch n := n.(type) {
+ case *ast.FuncDecl:
+ curfn = append(curfn, n.Type)
+ case *ast.FuncLit:
+ curfn = append(curfn, n.Type)
+ }
+ }
+
+ // After is the real type checker.
+ after := func(n interface{}) {
+ if n == nil {
+ return
+ }
+ if false && reflect.TypeOf(n).Kind() == reflect.Ptr { // debugging trace
+ defer func() {
+ if t := typeof[n]; t != "" {
+ pos := fset.Position(n.(ast.Node).Pos())
+ fmt.Fprintf(os.Stderr, "%s: typeof[%s] = %s\n", pos, gofmt(n), t)
+ }
+ }()
+ }
+
+ switch n := n.(type) {
+ case *ast.FuncDecl, *ast.FuncLit:
+ // pop function type off stack
+ curfn = curfn[:len(curfn)-1]
+
+ case *ast.FuncType:
+ typeof[n] = mkType(joinFunc(split(typeof[n.Params]), split(typeof[n.Results])))
+
+ case *ast.FieldList:
+ // Field list is concatenation of sub-lists.
+ t := ""
+ for _, field := range n.List {
+ if t != "" {
+ t += ", "
+ }
+ t += typeof[field]
+ }
+ typeof[n] = t
+
+ case *ast.Field:
+ // Field is one instance of the type per name.
+ all := ""
+ t := typeof[n.Type]
+ if !isType(t) {
+ // Create a type, because it is typically *T or *p.T
+ // and we might care about that type.
+ t = mkType(gofmt(n.Type))
+ typeof[n.Type] = t
+ }
+ t = getType(t)
+ if len(n.Names) == 0 {
+ all = t
+ } else {
+ for _, id := range n.Names {
+ if all != "" {
+ all += ", "
+ }
+ all += t
+ typeof[id.Obj] = t
+ typeof[id] = t
+ }
+ }
+ typeof[n] = all
+
+ case *ast.ValueSpec:
+ // var declaration. Use type if present.
+ if n.Type != nil {
+ t := typeof[n.Type]
+ if !isType(t) {
+ t = mkType(gofmt(n.Type))
+ typeof[n.Type] = t
+ }
+ t = getType(t)
+ for _, id := range n.Names {
+ set(id, t, true)
+ }
+ }
+ // Now treat same as assignment.
+ typecheckAssign(makeExprList(n.Names), n.Values, true)
+
+ case *ast.AssignStmt:
+ typecheckAssign(n.Lhs, n.Rhs, n.Tok == token.DEFINE)
+
+ case *ast.Ident:
+ // Identifier can take its type from underlying object.
+ if t := typeof[n.Obj]; t != "" {
+ typeof[n] = t
+ }
+
+ case *ast.SelectorExpr:
+ // Field or method.
+ name := n.Sel.Name
+ if t := typeof[n.X]; t != "" {
+ if strings.HasPrefix(t, "*") {
+ t = t[1:] // implicit *
+ }
+ if typ := cfg.Type[t]; typ != nil {
+ if t := typ.dot(cfg, name); t != "" {
+ typeof[n] = t
+ return
+ }
+ }
+ tt := typeof[t+"."+name]
+ if isType(tt) {
+ typeof[n] = getType(tt)
+ return
+ }
+ }
+ // Package selector.
+ if x, ok := n.X.(*ast.Ident); ok && x.Obj == nil {
+ str := x.Name + "." + name
+ if cfg.Type[str] != nil {
+ typeof[n] = mkType(str)
+ return
+ }
+ if t := cfg.typeof(x.Name + "." + name); t != "" {
+ typeof[n] = t
+ return
+ }
+ }
+
+ case *ast.CallExpr:
+ // make(T) has type T.
+ if isTopName(n.Fun, "make") && len(n.Args) >= 1 {
+ typeof[n] = gofmt(n.Args[0])
+ return
+ }
+ // new(T) has type *T
+ if isTopName(n.Fun, "new") && len(n.Args) == 1 {
+ typeof[n] = "*" + gofmt(n.Args[0])
+ return
+ }
+ // Otherwise, use type of function to determine arguments.
+ t := typeof[n.Fun]
+ in, out := splitFunc(t)
+ if in == nil && out == nil {
+ return
+ }
+ typeof[n] = join(out)
+ for i, arg := range n.Args {
+ if i >= len(in) {
+ break
+ }
+ if typeof[arg] == "" {
+ typeof[arg] = in[i]
+ }
+ }
+
+ case *ast.TypeAssertExpr:
+ // x.(type) has type of x.
+ if n.Type == nil {
+ typeof[n] = typeof[n.X]
+ return
+ }
+ // x.(T) has type T.
+ if t := typeof[n.Type]; isType(t) {
+ typeof[n] = getType(t)
+ } else {
+ typeof[n] = gofmt(n.Type)
+ }
+
+ case *ast.SliceExpr:
+ // x[i:j] has type of x.
+ typeof[n] = typeof[n.X]
+
+ case *ast.IndexExpr:
+ // x[i] has key type of x's type.
+ t := expand(typeof[n.X])
+ if strings.HasPrefix(t, "[") || strings.HasPrefix(t, "map[") {
+ // Lazy: assume there are no nested [] in the array
+ // length or map key type.
+ if i := strings.Index(t, "]"); i >= 0 {
+ typeof[n] = t[i+1:]
+ }
+ }
+
+ case *ast.StarExpr:
+ // *x for x of type *T has type T when x is an expr.
+ // We don't use the result when *x is a type, but
+ // compute it anyway.
+ t := expand(typeof[n.X])
+ if isType(t) {
+ typeof[n] = "type *" + getType(t)
+ } else if strings.HasPrefix(t, "*") {
+ typeof[n] = t[len("*"):]
+ }
+
+ case *ast.UnaryExpr:
+ // &x for x of type T has type *T.
+ t := typeof[n.X]
+ if t != "" && n.Op == token.AND {
+ typeof[n] = "*" + t
+ }
+
+ case *ast.CompositeLit:
+ // T{...} has type T.
+ typeof[n] = gofmt(n.Type)
+
+ case *ast.ParenExpr:
+ // (x) has type of x.
+ typeof[n] = typeof[n.X]
+
+ case *ast.RangeStmt:
+ t := expand(typeof[n.X])
+ if t == "" {
+ return
+ }
+ var key, value string
+ if t == "string" {
+ key, value = "int", "rune"
+ } else if strings.HasPrefix(t, "[") {
+ key = "int"
+ if i := strings.Index(t, "]"); i >= 0 {
+ value = t[i+1:]
+ }
+ } else if strings.HasPrefix(t, "map[") {
+ if i := strings.Index(t, "]"); i >= 0 {
+ key, value = t[4:i], t[i+1:]
+ }
+ }
+ changed := false
+ if n.Key != nil && key != "" {
+ changed = true
+ set(n.Key, key, n.Tok == token.DEFINE)
+ }
+ if n.Value != nil && value != "" {
+ changed = true
+ set(n.Value, value, n.Tok == token.DEFINE)
+ }
+ // Ugly failure of vision: already type-checked body.
+ // Do it again now that we have that type info.
+ if changed {
+ typecheck1(cfg, n.Body, typeof, assign)
+ }
+
+ case *ast.TypeSwitchStmt:
+ // Type of variable changes for each case in type switch,
+ // but go/parser generates just one variable.
+ // Repeat type check for each case with more precise
+ // type information.
+ as, ok := n.Assign.(*ast.AssignStmt)
+ if !ok {
+ return
+ }
+ varx, ok := as.Lhs[0].(*ast.Ident)
+ if !ok {
+ return
+ }
+ t := typeof[varx]
+ for _, cas := range n.Body.List {
+ cas := cas.(*ast.CaseClause)
+ if len(cas.List) == 1 {
+ // Variable has specific type only when there is
+ // exactly one type in the case list.
+ if tt := typeof[cas.List[0]]; isType(tt) {
+ tt = getType(tt)
+ typeof[varx] = tt
+ typeof[varx.Obj] = tt
+ typecheck1(cfg, cas.Body, typeof, assign)
+ }
+ }
+ }
+ // Restore t.
+ typeof[varx] = t
+ typeof[varx.Obj] = t
+
+ case *ast.ReturnStmt:
+ if len(curfn) == 0 {
+ // Probably can't happen.
+ return
+ }
+ f := curfn[len(curfn)-1]
+ res := n.Results
+ if f.Results != nil {
+ t := split(typeof[f.Results])
+ for i := 0; i < len(res) && i < len(t); i++ {
+ set(res[i], t[i], false)
+ }
+ }
+ }
+ }
+ walkBeforeAfter(f, before, after)
+}
+
+// Convert between function type strings and lists of types.
+// Using strings makes this a little harder, but it makes
+// a lot of the rest of the code easier. This will all go away
+// when we can use go/typechecker directly.
+
+// splitFunc splits "func(x,y,z) (a,b,c)" into ["x", "y", "z"] and ["a", "b", "c"].
+func splitFunc(s string) (in, out []string) {
+ if !strings.HasPrefix(s, "func(") {
+ return nil, nil
+ }
+
+ i := len("func(") // index of beginning of 'in' arguments
+ nparen := 0
+ for j := i; j < len(s); j++ {
+ switch s[j] {
+ case '(':
+ nparen++
+ case ')':
+ nparen--
+ if nparen < 0 {
+ // found end of parameter list
+ out := strings.TrimSpace(s[j+1:])
+ if len(out) >= 2 && out[0] == '(' && out[len(out)-1] == ')' {
+ out = out[1 : len(out)-1]
+ }
+ return split(s[i:j]), split(out)
+ }
+ }
+ }
+ return nil, nil
+}
+
+// joinFunc is the inverse of splitFunc.
+func joinFunc(in, out []string) string {
+ outs := ""
+ if len(out) == 1 {
+ outs = " " + out[0]
+ } else if len(out) > 1 {
+ outs = " (" + join(out) + ")"
+ }
+ return "func(" + join(in) + ")" + outs
+}
+
+// split splits "int, float" into ["int", "float"] and splits "" into [].
+func split(s string) []string {
+ out := []string{}
+ i := 0 // current type being scanned is s[i:j].
+ nparen := 0
+ for j := 0; j < len(s); j++ {
+ switch s[j] {
+ case ' ':
+ if i == j {
+ i++
+ }
+ case '(':
+ nparen++
+ case ')':
+ nparen--
+ if nparen < 0 {
+ // probably can't happen
+ return nil
+ }
+ case ',':
+ if nparen == 0 {
+ if i < j {
+ out = append(out, s[i:j])
+ }
+ i = j + 1
+ }
+ }
+ }
+ if nparen != 0 {
+ // probably can't happen
+ return nil
+ }
+ if i < len(s) {
+ out = append(out, s[i:])
+ }
+ return out
+}
+
+// join is the inverse of split.
+func join(x []string) string {
+ return strings.Join(x, ", ")
+}
diff --git a/vendor/google.golang.org/appengine/datastore/datastore.go b/vendor/google.golang.org/appengine/datastore/datastore.go
new file mode 100644
index 000000000..576bc5013
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/datastore.go
@@ -0,0 +1,407 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+var (
+ // ErrInvalidEntityType is returned when functions like Get or Next are
+ // passed a dst or src argument of invalid type.
+ ErrInvalidEntityType = errors.New("datastore: invalid entity type")
+ // ErrInvalidKey is returned when an invalid key is presented.
+ ErrInvalidKey = errors.New("datastore: invalid key")
+ // ErrNoSuchEntity is returned when no entity was found for a given key.
+ ErrNoSuchEntity = errors.New("datastore: no such entity")
+)
+
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct.
+// StructType is the type of the struct pointed to by the destination argument
+// passed to Get or to Iterator.Next.
+type ErrFieldMismatch struct {
+ StructType reflect.Type
+ FieldName string
+ Reason string
+}
+
+func (e *ErrFieldMismatch) Error() string {
+ return fmt.Sprintf("datastore: cannot load field %q into a %q: %s",
+ e.FieldName, e.StructType, e.Reason)
+}
+
+// protoToKey converts a Reference proto to a *Key. If the key is invalid,
+// protoToKey will return the invalid key along with ErrInvalidKey.
+func protoToKey(r *pb.Reference) (k *Key, err error) {
+ appID := r.GetApp()
+ namespace := r.GetNameSpace()
+ for _, e := range r.Path.Element {
+ k = &Key{
+ kind: e.GetType(),
+ stringID: e.GetName(),
+ intID: e.GetId(),
+ parent: k,
+ appID: appID,
+ namespace: namespace,
+ }
+ if !k.valid() {
+ return k, ErrInvalidKey
+ }
+ }
+ return
+}
+
+// keyToProto converts a *Key to a Reference proto.
+func keyToProto(defaultAppID string, k *Key) *pb.Reference {
+ appID := k.appID
+ if appID == "" {
+ appID = defaultAppID
+ }
+ n := 0
+ for i := k; i != nil; i = i.parent {
+ n++
+ }
+ e := make([]*pb.Path_Element, n)
+ for i := k; i != nil; i = i.parent {
+ n--
+ e[n] = &pb.Path_Element{
+ Type: &i.kind,
+ }
+ // At most one of {Name,Id} should be set.
+ // Neither will be set for incomplete keys.
+ if i.stringID != "" {
+ e[n].Name = &i.stringID
+ } else if i.intID != 0 {
+ e[n].Id = &i.intID
+ }
+ }
+ var namespace *string
+ if k.namespace != "" {
+ namespace = proto.String(k.namespace)
+ }
+ return &pb.Reference{
+ App: proto.String(appID),
+ NameSpace: namespace,
+ Path: &pb.Path{
+ Element: e,
+ },
+ }
+}
+
+// multiKeyToProto is a batch version of keyToProto.
+func multiKeyToProto(appID string, key []*Key) []*pb.Reference {
+ ret := make([]*pb.Reference, len(key))
+ for i, k := range key {
+ ret[i] = keyToProto(appID, k)
+ }
+ return ret
+}
+
+// multiValid is a batch version of Key.valid. It returns an error, not a
+// []bool.
+func multiValid(key []*Key) error {
+ invalid := false
+ for _, k := range key {
+ if !k.valid() {
+ invalid = true
+ break
+ }
+ }
+ if !invalid {
+ return nil
+ }
+ err := make(appengine.MultiError, len(key))
+ for i, k := range key {
+ if !k.valid() {
+ err[i] = ErrInvalidKey
+ }
+ }
+ return err
+}
+
+// It's unfortunate that the two semantically equivalent concepts pb.Reference
+// and pb.PropertyValue_ReferenceValue aren't the same type. For example, the
+// two have different protobuf field numbers.
+
+// referenceValueToKey is the same as protoToKey except the input is a
+// PropertyValue_ReferenceValue instead of a Reference.
+func referenceValueToKey(r *pb.PropertyValue_ReferenceValue) (k *Key, err error) {
+ appID := r.GetApp()
+ namespace := r.GetNameSpace()
+ for _, e := range r.Pathelement {
+ k = &Key{
+ kind: e.GetType(),
+ stringID: e.GetName(),
+ intID: e.GetId(),
+ parent: k,
+ appID: appID,
+ namespace: namespace,
+ }
+ if !k.valid() {
+ return nil, ErrInvalidKey
+ }
+ }
+ return
+}
+
+// keyToReferenceValue is the same as keyToProto except the output is a
+// PropertyValue_ReferenceValue instead of a Reference.
+func keyToReferenceValue(defaultAppID string, k *Key) *pb.PropertyValue_ReferenceValue {
+ ref := keyToProto(defaultAppID, k)
+ pe := make([]*pb.PropertyValue_ReferenceValue_PathElement, len(ref.Path.Element))
+ for i, e := range ref.Path.Element {
+ pe[i] = &pb.PropertyValue_ReferenceValue_PathElement{
+ Type: e.Type,
+ Id: e.Id,
+ Name: e.Name,
+ }
+ }
+ return &pb.PropertyValue_ReferenceValue{
+ App: ref.App,
+ NameSpace: ref.NameSpace,
+ Pathelement: pe,
+ }
+}
+
+type multiArgType int
+
+const (
+ multiArgTypeInvalid multiArgType = iota
+ multiArgTypePropertyLoadSaver
+ multiArgTypeStruct
+ multiArgTypeStructPtr
+ multiArgTypeInterface
+)
+
+// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct
+// type S, for some interface type I, or some non-interface non-pointer type P
+// such that P or *P implements PropertyLoadSaver.
+//
+// It returns what category the slice's elements are, and the reflect.Type
+// that represents S, I or P.
+//
+// As a special case, PropertyList is an invalid type for v.
+func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {
+ if v.Kind() != reflect.Slice {
+ return multiArgTypeInvalid, nil
+ }
+ if v.Type() == typeOfPropertyList {
+ return multiArgTypeInvalid, nil
+ }
+ elemType = v.Type().Elem()
+ if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) {
+ return multiArgTypePropertyLoadSaver, elemType
+ }
+ switch elemType.Kind() {
+ case reflect.Struct:
+ return multiArgTypeStruct, elemType
+ case reflect.Interface:
+ return multiArgTypeInterface, elemType
+ case reflect.Ptr:
+ elemType = elemType.Elem()
+ if elemType.Kind() == reflect.Struct {
+ return multiArgTypeStructPtr, elemType
+ }
+ }
+ return multiArgTypeInvalid, nil
+}
+
+// Get loads the entity stored for k into dst, which must be a struct pointer
+// or implement PropertyLoadSaver. If there is no such entity for the key, Get
+// returns ErrNoSuchEntity.
+//
+// The values of dst's unmatched struct fields are not modified, and matching
+// slice-typed fields are not reset before appending to them. In particular, it
+// is recommended to pass a pointer to a zero valued struct on each Get call.
+//
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct. ErrFieldMismatch is only returned if
+// dst is a struct pointer.
+func Get(c context.Context, key *Key, dst interface{}) error {
+ if dst == nil { // GetMulti catches nil interface; we need to catch nil ptr here
+ return ErrInvalidEntityType
+ }
+ err := GetMulti(c, []*Key{key}, []interface{}{dst})
+ if me, ok := err.(appengine.MultiError); ok {
+ return me[0]
+ }
+ return err
+}
+
+// GetMulti is a batch version of Get.
+//
+// dst must be a []S, []*S, []I or []P, for some struct type S, some interface
+// type I, or some non-interface non-pointer type P such that P or *P
+// implements PropertyLoadSaver. If an []I, each element must be a valid dst
+// for Get: it must be a struct pointer or implement PropertyLoadSaver.
+//
+// As a special case, PropertyList is an invalid type for dst, even though a
+// PropertyList is a slice of structs. It is treated as invalid to avoid being
+// mistakenly passed when []PropertyList was intended.
+func GetMulti(c context.Context, key []*Key, dst interface{}) error {
+ v := reflect.ValueOf(dst)
+ multiArgType, _ := checkMultiArg(v)
+ if multiArgType == multiArgTypeInvalid {
+ return errors.New("datastore: dst has invalid type")
+ }
+ if len(key) != v.Len() {
+ return errors.New("datastore: key and dst slices have different length")
+ }
+ if len(key) == 0 {
+ return nil
+ }
+ if err := multiValid(key); err != nil {
+ return err
+ }
+ req := &pb.GetRequest{
+ Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key),
+ }
+ res := &pb.GetResponse{}
+ if err := internal.Call(c, "datastore_v3", "Get", req, res); err != nil {
+ return err
+ }
+ if len(key) != len(res.Entity) {
+ return errors.New("datastore: internal error: server returned the wrong number of entities")
+ }
+ multiErr, any := make(appengine.MultiError, len(key)), false
+ for i, e := range res.Entity {
+ if e.Entity == nil {
+ multiErr[i] = ErrNoSuchEntity
+ } else {
+ elem := v.Index(i)
+ if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
+ elem = elem.Addr()
+ }
+ if multiArgType == multiArgTypeStructPtr && elem.IsNil() {
+ elem.Set(reflect.New(elem.Type().Elem()))
+ }
+ multiErr[i] = loadEntity(elem.Interface(), e.Entity)
+ }
+ if multiErr[i] != nil {
+ any = true
+ }
+ }
+ if any {
+ return multiErr
+ }
+ return nil
+}
+
+// Put saves the entity src into the datastore with key k. src must be a struct
+// pointer or implement PropertyLoadSaver; if a struct pointer then any
+// unexported fields of that struct will be skipped. If k is an incomplete key,
+// the returned key will be a unique key generated by the datastore.
+func Put(c context.Context, key *Key, src interface{}) (*Key, error) {
+ k, err := PutMulti(c, []*Key{key}, []interface{}{src})
+ if err != nil {
+ if me, ok := err.(appengine.MultiError); ok {
+ return nil, me[0]
+ }
+ return nil, err
+ }
+ return k[0], nil
+}
+
+// PutMulti is a batch version of Put.
+//
+// src must satisfy the same conditions as the dst argument to GetMulti.
+func PutMulti(c context.Context, key []*Key, src interface{}) ([]*Key, error) {
+ v := reflect.ValueOf(src)
+ multiArgType, _ := checkMultiArg(v)
+ if multiArgType == multiArgTypeInvalid {
+ return nil, errors.New("datastore: src has invalid type")
+ }
+ if len(key) != v.Len() {
+ return nil, errors.New("datastore: key and src slices have different length")
+ }
+ if len(key) == 0 {
+ return nil, nil
+ }
+ appID := internal.FullyQualifiedAppID(c)
+ if err := multiValid(key); err != nil {
+ return nil, err
+ }
+ req := &pb.PutRequest{}
+ for i := range key {
+ elem := v.Index(i)
+ if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
+ elem = elem.Addr()
+ }
+ sProto, err := saveEntity(appID, key[i], elem.Interface())
+ if err != nil {
+ return nil, err
+ }
+ req.Entity = append(req.Entity, sProto)
+ }
+ res := &pb.PutResponse{}
+ if err := internal.Call(c, "datastore_v3", "Put", req, res); err != nil {
+ return nil, err
+ }
+ if len(key) != len(res.Key) {
+ return nil, errors.New("datastore: internal error: server returned the wrong number of keys")
+ }
+ ret := make([]*Key, len(key))
+ for i := range ret {
+ var err error
+ ret[i], err = protoToKey(res.Key[i])
+ if err != nil || ret[i].Incomplete() {
+ return nil, errors.New("datastore: internal error: server returned an invalid key")
+ }
+ }
+ return ret, nil
+}
+
+// Delete deletes the entity for the given key.
+func Delete(c context.Context, key *Key) error {
+ err := DeleteMulti(c, []*Key{key})
+ if me, ok := err.(appengine.MultiError); ok {
+ return me[0]
+ }
+ return err
+}
+
+// DeleteMulti is a batch version of Delete.
+func DeleteMulti(c context.Context, key []*Key) error {
+ if len(key) == 0 {
+ return nil
+ }
+ if err := multiValid(key); err != nil {
+ return err
+ }
+ req := &pb.DeleteRequest{
+ Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key),
+ }
+ res := &pb.DeleteResponse{}
+ return internal.Call(c, "datastore_v3", "Delete", req, res)
+}
+
+func namespaceMod(m proto.Message, namespace string) {
+ // pb.Query is the only type that has a name_space field.
+ // All other namespace support in datastore is in the keys.
+ switch m := m.(type) {
+ case *pb.Query:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ }
+}
+
+func init() {
+ internal.NamespaceMods["datastore_v3"] = namespaceMod
+ internal.RegisterErrorCodeMap("datastore_v3", pb.Error_ErrorCode_name)
+ internal.RegisterTimeoutErrorCode("datastore_v3", int32(pb.Error_TIMEOUT))
+}
diff --git a/vendor/google.golang.org/appengine/datastore/datastore_test.go b/vendor/google.golang.org/appengine/datastore/datastore_test.go
new file mode 100644
index 000000000..b3888e9d1
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/datastore_test.go
@@ -0,0 +1,1744 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+ "time"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+const testAppID = "testApp"
+
+type (
+ myBlob []byte
+ myByte byte
+ myString string
+)
+
+func makeMyByteSlice(n int) []myByte {
+ b := make([]myByte, n)
+ for i := range b {
+ b[i] = myByte(i)
+ }
+ return b
+}
+
+func makeInt8Slice(n int) []int8 {
+ b := make([]int8, n)
+ for i := range b {
+ b[i] = int8(i)
+ }
+ return b
+}
+
+func makeUint8Slice(n int) []uint8 {
+ b := make([]uint8, n)
+ for i := range b {
+ b[i] = uint8(i)
+ }
+ return b
+}
+
+func newKey(stringID string, parent *Key) *Key {
+ return &Key{
+ kind: "kind",
+ stringID: stringID,
+ intID: 0,
+ parent: parent,
+ appID: testAppID,
+ }
+}
+
+var (
+ testKey0 = newKey("name0", nil)
+ testKey1a = newKey("name1", nil)
+ testKey1b = newKey("name1", nil)
+ testKey2a = newKey("name2", testKey0)
+ testKey2b = newKey("name2", testKey0)
+ testGeoPt0 = appengine.GeoPoint{Lat: 1.2, Lng: 3.4}
+ testGeoPt1 = appengine.GeoPoint{Lat: 5, Lng: 10}
+ testBadGeoPt = appengine.GeoPoint{Lat: 1000, Lng: 34}
+
+ now = time.Unix(1e9, 0).UTC()
+)
+
+type B0 struct {
+ B []byte
+}
+
+type B1 struct {
+ B []int8
+}
+
+type B2 struct {
+ B myBlob
+}
+
+type B3 struct {
+ B []myByte
+}
+
+type B4 struct {
+ B [][]byte
+}
+
+type B5 struct {
+ B ByteString
+}
+
+type C0 struct {
+ I int
+ C chan int
+}
+
+type C1 struct {
+ I int
+ C *chan int
+}
+
+type C2 struct {
+ I int
+ C []chan int
+}
+
+type C3 struct {
+ C string
+}
+
+type E struct{}
+
+type G0 struct {
+ G appengine.GeoPoint
+}
+
+type G1 struct {
+ G []appengine.GeoPoint
+}
+
+type K0 struct {
+ K *Key
+}
+
+type K1 struct {
+ K []*Key
+}
+
+type S struct {
+ St string
+}
+
+type NoOmit struct {
+ A string
+ B int `datastore:"Bb"`
+ C bool `datastore:",noindex"`
+}
+
+type OmitAll struct {
+ A string `datastore:",omitempty"`
+ B int `datastore:"Bb,omitempty"`
+ C bool `datastore:",omitempty,noindex"`
+ F []int `datastore:",omitempty"`
+}
+
+type Omit struct {
+ A string `datastore:",omitempty"`
+ B int `datastore:"Bb,omitempty"`
+ C bool `datastore:",omitempty,noindex"`
+ F []int `datastore:",omitempty"`
+ S `datastore:",omitempty"`
+}
+
+type NoOmits struct {
+ No []NoOmit `datastore:",omitempty"`
+ S `datastore:",omitempty"`
+ Ss S `datastore:",omitempty"`
+}
+
+type N0 struct {
+ X0
+ Nonymous X0
+ Ignore string `datastore:"-"`
+ Other string
+}
+
+type N1 struct {
+ X0
+ Nonymous []X0
+ Ignore string `datastore:"-"`
+ Other string
+}
+
+type N2 struct {
+ N1 `datastore:"red"`
+ Green N1 `datastore:"green"`
+ Blue N1
+ White N1 `datastore:"-"`
+}
+
+type O0 struct {
+ I int64
+}
+
+type O1 struct {
+ I int32
+}
+
+type U0 struct {
+ U uint
+}
+
+type U1 struct {
+ U string
+}
+
+type T struct {
+ T time.Time
+}
+
+type X0 struct {
+ S string
+ I int
+ i int
+}
+
+type X1 struct {
+ S myString
+ I int32
+ J int64
+}
+
+type X2 struct {
+ Z string
+ i int
+}
+
+type X3 struct {
+ S bool
+ I int
+}
+
+type Y0 struct {
+ B bool
+ F []float64
+ G []float64
+}
+
+type Y1 struct {
+ B bool
+ F float64
+}
+
+type Y2 struct {
+ B bool
+ F []int64
+}
+
+type Tagged struct {
+ A int `datastore:"a,noindex"`
+ B []int `datastore:"b"`
+ C int `datastore:",noindex"`
+ D int `datastore:""`
+ E int
+ // The "flatten" option is parsed but ignored for now.
+ F int `datastore:",noindex,flatten"`
+ G int `datastore:",flatten"`
+ I int `datastore:"-"`
+ J int `datastore:",noindex" json:"j"`
+
+ Y0 `datastore:"-"`
+ Z chan int `datastore:"-,"`
+}
+
+type InvalidTagged1 struct {
+ I int `datastore:"\t"`
+}
+
+type InvalidTagged2 struct {
+ I int
+ J int `datastore:"I"`
+}
+
+type Inner1 struct {
+ W int32
+ X string
+}
+
+type Inner2 struct {
+ Y float64
+}
+
+type Inner3 struct {
+ Z bool
+}
+
+type Outer struct {
+ A int16
+ I []Inner1
+ J Inner2
+ Inner3
+}
+
+type OuterEquivalent struct {
+ A int16
+ IDotW []int32 `datastore:"I.W"`
+ IDotX []string `datastore:"I.X"`
+ JDotY float64 `datastore:"J.Y"`
+ Z bool
+}
+
+type Dotted struct {
+ A DottedA `datastore:"A0.A1.A2"`
+}
+
+type DottedA struct {
+ B DottedB `datastore:"B3"`
+}
+
+type DottedB struct {
+ C int `datastore:"C4.C5"`
+}
+
+type SliceOfSlices struct {
+ I int
+ S []struct {
+ J int
+ F []float64
+ }
+}
+
+type Recursive struct {
+ I int
+ R []Recursive
+}
+
+type MutuallyRecursive0 struct {
+ I int
+ R []MutuallyRecursive1
+}
+
+type MutuallyRecursive1 struct {
+ I int
+ R []MutuallyRecursive0
+}
+
+type Doubler struct {
+ S string
+ I int64
+ B bool
+}
+
+type Repeat struct {
+ Key string
+ Value []byte
+}
+
+type Repeated struct {
+ Repeats []Repeat
+}
+
+func (d *Doubler) Load(props []Property) error {
+ return LoadStruct(d, props)
+}
+
+type EmbeddedTime struct {
+ time.Time
+}
+
+type SpecialTime struct {
+ MyTime EmbeddedTime
+}
+
+func (d *Doubler) Save() ([]Property, error) {
+ // Save the default Property slice to an in-memory buffer (a PropertyList).
+ props, err := SaveStruct(d)
+ if err != nil {
+ return nil, err
+ }
+ var list PropertyList
+ if err := list.Load(props); err != nil {
+ return nil, err
+ }
+
+ // Edit that PropertyList, and send it on.
+ for i := range list {
+ switch v := list[i].Value.(type) {
+ case string:
+ // + means string concatenation.
+ list[i].Value = v + v
+ case int64:
+ // + means integer addition.
+ list[i].Value = v + v
+ }
+ }
+ return list.Save()
+}
+
+var _ PropertyLoadSaver = (*Doubler)(nil)
+
+type Deriver struct {
+ S, Derived, Ignored string
+}
+
+func (e *Deriver) Load(props []Property) error {
+ for _, p := range props {
+ if p.Name != "S" {
+ continue
+ }
+ e.S = p.Value.(string)
+ e.Derived = "derived+" + e.S
+ }
+ return nil
+}
+
+func (e *Deriver) Save() ([]Property, error) {
+ return []Property{
+ {
+ Name: "S",
+ Value: e.S,
+ },
+ }, nil
+}
+
+var _ PropertyLoadSaver = (*Deriver)(nil)
+
+type BadMultiPropEntity struct{}
+
+func (e *BadMultiPropEntity) Load(props []Property) error {
+ return errors.New("unimplemented")
+}
+
+func (e *BadMultiPropEntity) Save() ([]Property, error) {
+ // Write multiple properties with the same name "I", but Multiple is false.
+ var props []Property
+ for i := 0; i < 3; i++ {
+ props = append(props, Property{
+ Name: "I",
+ Value: int64(i),
+ })
+ }
+ return props, nil
+}
+
+var _ PropertyLoadSaver = (*BadMultiPropEntity)(nil)
+
+type BK struct {
+ Key appengine.BlobKey
+}
+
+type testCase struct {
+ desc string
+ src interface{}
+ want interface{}
+ putErr string
+ getErr string
+}
+
+var testCases = []testCase{
+ {
+ "chan save fails",
+ &C0{I: -1},
+ &E{},
+ "unsupported struct field",
+ "",
+ },
+ {
+ "*chan save fails",
+ &C1{I: -1},
+ &E{},
+ "unsupported struct field",
+ "",
+ },
+ {
+ "[]chan save fails",
+ &C2{I: -1, C: make([]chan int, 8)},
+ &E{},
+ "unsupported struct field",
+ "",
+ },
+ {
+ "chan load fails",
+ &C3{C: "not a chan"},
+ &C0{},
+ "",
+ "type mismatch",
+ },
+ {
+ "*chan load fails",
+ &C3{C: "not a *chan"},
+ &C1{},
+ "",
+ "type mismatch",
+ },
+ {
+ "[]chan load fails",
+ &C3{C: "not a []chan"},
+ &C2{},
+ "",
+ "type mismatch",
+ },
+ {
+ "empty struct",
+ &E{},
+ &E{},
+ "",
+ "",
+ },
+ {
+ "geopoint",
+ &G0{G: testGeoPt0},
+ &G0{G: testGeoPt0},
+ "",
+ "",
+ },
+ {
+ "geopoint invalid",
+ &G0{G: testBadGeoPt},
+ &G0{},
+ "invalid GeoPoint value",
+ "",
+ },
+ {
+ "geopoint as props",
+ &G0{G: testGeoPt0},
+ &PropertyList{
+ Property{Name: "G", Value: testGeoPt0, NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "geopoint slice",
+ &G1{G: []appengine.GeoPoint{testGeoPt0, testGeoPt1}},
+ &G1{G: []appengine.GeoPoint{testGeoPt0, testGeoPt1}},
+ "",
+ "",
+ },
+ {
+ "omit empty, all",
+ &OmitAll{},
+ new(PropertyList),
+ "",
+ "",
+ },
+ {
+ "omit empty",
+ &Omit{},
+ &PropertyList{
+ Property{Name: "St", Value: "", NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "omit empty, fields populated",
+ &Omit{
+ A: "a",
+ B: 10,
+ C: true,
+ F: []int{11},
+ },
+ &PropertyList{
+ Property{Name: "A", Value: "a", NoIndex: false, Multiple: false},
+ Property{Name: "Bb", Value: int64(10), NoIndex: false, Multiple: false},
+ Property{Name: "C", Value: true, NoIndex: true, Multiple: false},
+ Property{Name: "F", Value: int64(11), NoIndex: false, Multiple: true},
+ Property{Name: "St", Value: "", NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "omit empty, fields populated",
+ &Omit{
+ A: "a",
+ B: 10,
+ C: true,
+ F: []int{11},
+ S: S{St: "string"},
+ },
+ &PropertyList{
+ Property{Name: "A", Value: "a", NoIndex: false, Multiple: false},
+ Property{Name: "Bb", Value: int64(10), NoIndex: false, Multiple: false},
+ Property{Name: "C", Value: true, NoIndex: true, Multiple: false},
+ Property{Name: "F", Value: int64(11), NoIndex: false, Multiple: true},
+ Property{Name: "St", Value: "string", NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "omit empty does not propagate",
+ &NoOmits{
+ No: []NoOmit{
+ NoOmit{},
+ },
+ S: S{},
+ Ss: S{},
+ },
+ &PropertyList{
+ Property{Name: "No.A", Value: "", NoIndex: false, Multiple: true},
+ Property{Name: "No.Bb", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "No.C", Value: false, NoIndex: true, Multiple: true},
+ Property{Name: "Ss.St", Value: "", NoIndex: false, Multiple: false},
+ Property{Name: "St", Value: "", NoIndex: false, Multiple: false}},
+ "",
+ "",
+ },
+ {
+ "key",
+ &K0{K: testKey1a},
+ &K0{K: testKey1b},
+ "",
+ "",
+ },
+ {
+ "key with parent",
+ &K0{K: testKey2a},
+ &K0{K: testKey2b},
+ "",
+ "",
+ },
+ {
+ "nil key",
+ &K0{},
+ &K0{},
+ "",
+ "",
+ },
+ {
+ "all nil keys in slice",
+ &K1{[]*Key{nil, nil}},
+ &K1{[]*Key{nil, nil}},
+ "",
+ "",
+ },
+ {
+ "some nil keys in slice",
+ &K1{[]*Key{testKey1a, nil, testKey2a}},
+ &K1{[]*Key{testKey1b, nil, testKey2b}},
+ "",
+ "",
+ },
+ {
+ "overflow",
+ &O0{I: 1 << 48},
+ &O1{},
+ "",
+ "overflow",
+ },
+ {
+ "time",
+ &T{T: time.Unix(1e9, 0)},
+ &T{T: time.Unix(1e9, 0)},
+ "",
+ "",
+ },
+ {
+ "time as props",
+ &T{T: time.Unix(1e9, 0)},
+ &PropertyList{
+ Property{Name: "T", Value: time.Unix(1e9, 0).UTC(), NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "uint save",
+ &U0{U: 1},
+ &U0{},
+ "unsupported struct field",
+ "",
+ },
+ {
+ "uint load",
+ &U1{U: "not a uint"},
+ &U0{},
+ "",
+ "type mismatch",
+ },
+ {
+ "zero",
+ &X0{},
+ &X0{},
+ "",
+ "",
+ },
+ {
+ "basic",
+ &X0{S: "one", I: 2, i: 3},
+ &X0{S: "one", I: 2},
+ "",
+ "",
+ },
+ {
+ "save string/int load myString/int32",
+ &X0{S: "one", I: 2, i: 3},
+ &X1{S: "one", I: 2},
+ "",
+ "",
+ },
+ {
+ "missing fields",
+ &X0{S: "one", I: 2, i: 3},
+ &X2{},
+ "",
+ "no such struct field",
+ },
+ {
+ "save string load bool",
+ &X0{S: "one", I: 2, i: 3},
+ &X3{I: 2},
+ "",
+ "type mismatch",
+ },
+ {
+ "basic slice",
+ &Y0{B: true, F: []float64{7, 8, 9}},
+ &Y0{B: true, F: []float64{7, 8, 9}},
+ "",
+ "",
+ },
+ {
+ "save []float64 load float64",
+ &Y0{B: true, F: []float64{7, 8, 9}},
+ &Y1{B: true},
+ "",
+ "requires a slice",
+ },
+ {
+ "save []float64 load []int64",
+ &Y0{B: true, F: []float64{7, 8, 9}},
+ &Y2{B: true},
+ "",
+ "type mismatch",
+ },
+ {
+ "single slice is too long",
+ &Y0{F: make([]float64, maxIndexedProperties+1)},
+ &Y0{},
+ "too many indexed properties",
+ "",
+ },
+ {
+ "two slices are too long",
+ &Y0{F: make([]float64, maxIndexedProperties), G: make([]float64, maxIndexedProperties)},
+ &Y0{},
+ "too many indexed properties",
+ "",
+ },
+ {
+ "one slice and one scalar are too long",
+ &Y0{F: make([]float64, maxIndexedProperties), B: true},
+ &Y0{},
+ "too many indexed properties",
+ "",
+ },
+ {
+ "slice of slices of bytes",
+ &Repeated{
+ Repeats: []Repeat{
+ {
+ Key: "key 1",
+ Value: []byte("value 1"),
+ },
+ {
+ Key: "key 2",
+ Value: []byte("value 2"),
+ },
+ },
+ },
+ &Repeated{
+ Repeats: []Repeat{
+ {
+ Key: "key 1",
+ Value: []byte("value 1"),
+ },
+ {
+ Key: "key 2",
+ Value: []byte("value 2"),
+ },
+ },
+ },
+ "",
+ "",
+ },
+ {
+ "long blob",
+ &B0{B: makeUint8Slice(maxIndexedProperties + 1)},
+ &B0{B: makeUint8Slice(maxIndexedProperties + 1)},
+ "",
+ "",
+ },
+ {
+ "long []int8 is too long",
+ &B1{B: makeInt8Slice(maxIndexedProperties + 1)},
+ &B1{},
+ "too many indexed properties",
+ "",
+ },
+ {
+ "short []int8",
+ &B1{B: makeInt8Slice(3)},
+ &B1{B: makeInt8Slice(3)},
+ "",
+ "",
+ },
+ {
+ "long myBlob",
+ &B2{B: makeUint8Slice(maxIndexedProperties + 1)},
+ &B2{B: makeUint8Slice(maxIndexedProperties + 1)},
+ "",
+ "",
+ },
+ {
+ "short myBlob",
+ &B2{B: makeUint8Slice(3)},
+ &B2{B: makeUint8Slice(3)},
+ "",
+ "",
+ },
+ {
+ "long []myByte",
+ &B3{B: makeMyByteSlice(maxIndexedProperties + 1)},
+ &B3{B: makeMyByteSlice(maxIndexedProperties + 1)},
+ "",
+ "",
+ },
+ {
+ "short []myByte",
+ &B3{B: makeMyByteSlice(3)},
+ &B3{B: makeMyByteSlice(3)},
+ "",
+ "",
+ },
+ {
+ "slice of blobs",
+ &B4{B: [][]byte{
+ makeUint8Slice(3),
+ makeUint8Slice(4),
+ makeUint8Slice(5),
+ }},
+ &B4{B: [][]byte{
+ makeUint8Slice(3),
+ makeUint8Slice(4),
+ makeUint8Slice(5),
+ }},
+ "",
+ "",
+ },
+ {
+ "short ByteString",
+ &B5{B: ByteString(makeUint8Slice(3))},
+ &B5{B: ByteString(makeUint8Slice(3))},
+ "",
+ "",
+ },
+ {
+ "short ByteString as props",
+ &B5{B: ByteString(makeUint8Slice(3))},
+ &PropertyList{
+ Property{Name: "B", Value: ByteString(makeUint8Slice(3)), NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "short ByteString into string",
+ &B5{B: ByteString("legacy")},
+ &struct{ B string }{"legacy"},
+ "",
+ "",
+ },
+ {
+ "[]byte must be noindex",
+ &PropertyList{
+ Property{Name: "B", Value: makeUint8Slice(3), NoIndex: false},
+ },
+ nil,
+ "cannot index a []byte valued Property",
+ "",
+ },
+ {
+ "save tagged load props",
+ &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, F: 6, G: 7, I: 8, J: 9},
+ &PropertyList{
+ // A and B are renamed to a and b; A and C are noindex, I is ignored.
+ // Indexed properties are loaded before raw properties. Thus, the
+ // result is: b, b, b, D, E, a, c.
+ Property{Name: "C", Value: int64(3), NoIndex: true, Multiple: false},
+ Property{Name: "D", Value: int64(4), NoIndex: false, Multiple: false},
+ Property{Name: "E", Value: int64(5), NoIndex: false, Multiple: false},
+ Property{Name: "F", Value: int64(6), NoIndex: true, Multiple: false},
+ Property{Name: "G", Value: int64(7), NoIndex: false, Multiple: false},
+ Property{Name: "J", Value: int64(9), NoIndex: true, Multiple: false},
+ Property{Name: "a", Value: int64(1), NoIndex: true, Multiple: false},
+ Property{Name: "b", Value: int64(21), NoIndex: false, Multiple: true},
+ Property{Name: "b", Value: int64(22), NoIndex: false, Multiple: true},
+ Property{Name: "b", Value: int64(23), NoIndex: false, Multiple: true},
+ },
+ "",
+ "",
+ },
+ {
+ "save tagged load tagged",
+ &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, I: 6, J: 7},
+ &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, J: 7},
+ "",
+ "",
+ },
+ {
+ "save props load tagged",
+ &PropertyList{
+ Property{Name: "A", Value: int64(11), NoIndex: true, Multiple: false},
+ Property{Name: "a", Value: int64(12), NoIndex: true, Multiple: false},
+ },
+ &Tagged{A: 12},
+ "",
+ `cannot load field "A"`,
+ },
+ {
+ "invalid tagged1",
+ &InvalidTagged1{I: 1},
+ &InvalidTagged1{},
+ "struct tag has invalid property name",
+ "",
+ },
+ {
+ "invalid tagged2",
+ &InvalidTagged2{I: 1, J: 2},
+ &InvalidTagged2{},
+ "struct tag has repeated property name",
+ "",
+ },
+ {
+ "doubler",
+ &Doubler{S: "s", I: 1, B: true},
+ &Doubler{S: "ss", I: 2, B: true},
+ "",
+ "",
+ },
+ {
+ "save struct load props",
+ &X0{S: "s", I: 1},
+ &PropertyList{
+ Property{Name: "I", Value: int64(1), NoIndex: false, Multiple: false},
+ Property{Name: "S", Value: "s", NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "save props load struct",
+ &PropertyList{
+ Property{Name: "S", Value: "s", NoIndex: false, Multiple: false},
+ Property{Name: "I", Value: int64(1), NoIndex: false, Multiple: false},
+ },
+ &X0{S: "s", I: 1},
+ "",
+ "",
+ },
+ {
+ "nil-value props",
+ &PropertyList{
+ Property{Name: "I", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "B", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "S", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "F", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "K", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "T", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "J", Value: nil, NoIndex: false, Multiple: true},
+ Property{Name: "J", Value: int64(7), NoIndex: false, Multiple: true},
+ Property{Name: "J", Value: nil, NoIndex: false, Multiple: true},
+ },
+ &struct {
+ I int64
+ B bool
+ S string
+ F float64
+ K *Key
+ T time.Time
+ J []int64
+ }{
+ J: []int64{0, 7, 0},
+ },
+ "",
+ "",
+ },
+ {
+ "save outer load props",
+ &Outer{
+ A: 1,
+ I: []Inner1{
+ {10, "ten"},
+ {20, "twenty"},
+ {30, "thirty"},
+ },
+ J: Inner2{
+ Y: 3.14,
+ },
+ Inner3: Inner3{
+ Z: true,
+ },
+ },
+ &PropertyList{
+ Property{Name: "A", Value: int64(1), NoIndex: false, Multiple: false},
+ Property{Name: "I.W", Value: int64(10), NoIndex: false, Multiple: true},
+ Property{Name: "I.W", Value: int64(20), NoIndex: false, Multiple: true},
+ Property{Name: "I.W", Value: int64(30), NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "ten", NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "twenty", NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "thirty", NoIndex: false, Multiple: true},
+ Property{Name: "J.Y", Value: float64(3.14), NoIndex: false, Multiple: false},
+ Property{Name: "Z", Value: true, NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "save props load outer-equivalent",
+ &PropertyList{
+ Property{Name: "A", Value: int64(1), NoIndex: false, Multiple: false},
+ Property{Name: "I.W", Value: int64(10), NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "ten", NoIndex: false, Multiple: true},
+ Property{Name: "I.W", Value: int64(20), NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "twenty", NoIndex: false, Multiple: true},
+ Property{Name: "I.W", Value: int64(30), NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "thirty", NoIndex: false, Multiple: true},
+ Property{Name: "J.Y", Value: float64(3.14), NoIndex: false, Multiple: false},
+ Property{Name: "Z", Value: true, NoIndex: false, Multiple: false},
+ },
+ &OuterEquivalent{
+ A: 1,
+ IDotW: []int32{10, 20, 30},
+ IDotX: []string{"ten", "twenty", "thirty"},
+ JDotY: 3.14,
+ Z: true,
+ },
+ "",
+ "",
+ },
+ {
+ "save outer-equivalent load outer",
+ &OuterEquivalent{
+ A: 1,
+ IDotW: []int32{10, 20, 30},
+ IDotX: []string{"ten", "twenty", "thirty"},
+ JDotY: 3.14,
+ Z: true,
+ },
+ &Outer{
+ A: 1,
+ I: []Inner1{
+ {10, "ten"},
+ {20, "twenty"},
+ {30, "thirty"},
+ },
+ J: Inner2{
+ Y: 3.14,
+ },
+ Inner3: Inner3{
+ Z: true,
+ },
+ },
+ "",
+ "",
+ },
+ {
+ "dotted names save",
+ &Dotted{A: DottedA{B: DottedB{C: 88}}},
+ &PropertyList{
+ Property{Name: "A0.A1.A2.B3.C4.C5", Value: int64(88), NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "dotted names load",
+ &PropertyList{
+ Property{Name: "A0.A1.A2.B3.C4.C5", Value: int64(99), NoIndex: false, Multiple: false},
+ },
+ &Dotted{A: DottedA{B: DottedB{C: 99}}},
+ "",
+ "",
+ },
+ {
+ "save struct load deriver",
+ &X0{S: "s", I: 1},
+ &Deriver{S: "s", Derived: "derived+s"},
+ "",
+ "",
+ },
+ {
+ "save deriver load struct",
+ &Deriver{S: "s", Derived: "derived+s", Ignored: "ignored"},
+ &X0{S: "s"},
+ "",
+ "",
+ },
+ {
+ "bad multi-prop entity",
+ &BadMultiPropEntity{},
+ &BadMultiPropEntity{},
+ "Multiple is false",
+ "",
+ },
+ // Regression: CL 25062824 broke handling of appengine.BlobKey fields.
+ {
+ "appengine.BlobKey",
+ &BK{Key: "blah"},
+ &BK{Key: "blah"},
+ "",
+ "",
+ },
+ {
+ "zero time.Time",
+ &T{T: time.Time{}},
+ &T{T: time.Time{}},
+ "",
+ "",
+ },
+ {
+ "time.Time near Unix zero time",
+ &T{T: time.Unix(0, 4e3)},
+ &T{T: time.Unix(0, 4e3)},
+ "",
+ "",
+ },
+ {
+ "time.Time, far in the future",
+ &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)},
+ &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)},
+ "",
+ "",
+ },
+ {
+ "time.Time, very far in the past",
+ &T{T: time.Date(-300000, 1, 1, 0, 0, 0, 0, time.UTC)},
+ &T{},
+ "time value out of range",
+ "",
+ },
+ {
+ "time.Time, very far in the future",
+ &T{T: time.Date(294248, 1, 1, 0, 0, 0, 0, time.UTC)},
+ &T{},
+ "time value out of range",
+ "",
+ },
+ {
+ "structs",
+ &N0{
+ X0: X0{S: "one", I: 2, i: 3},
+ Nonymous: X0{S: "four", I: 5, i: 6},
+ Ignore: "ignore",
+ Other: "other",
+ },
+ &N0{
+ X0: X0{S: "one", I: 2},
+ Nonymous: X0{S: "four", I: 5},
+ Other: "other",
+ },
+ "",
+ "",
+ },
+ {
+ "slice of structs",
+ &N1{
+ X0: X0{S: "one", I: 2, i: 3},
+ Nonymous: []X0{
+ {S: "four", I: 5, i: 6},
+ {S: "seven", I: 8, i: 9},
+ {S: "ten", I: 11, i: 12},
+ {S: "thirteen", I: 14, i: 15},
+ },
+ Ignore: "ignore",
+ Other: "other",
+ },
+ &N1{
+ X0: X0{S: "one", I: 2},
+ Nonymous: []X0{
+ {S: "four", I: 5},
+ {S: "seven", I: 8},
+ {S: "ten", I: 11},
+ {S: "thirteen", I: 14},
+ },
+ Other: "other",
+ },
+ "",
+ "",
+ },
+ {
+ "structs with slices of structs",
+ &N2{
+ N1: N1{
+ X0: X0{S: "rouge"},
+ Nonymous: []X0{
+ {S: "rosso0"},
+ {S: "rosso1"},
+ },
+ },
+ Green: N1{
+ X0: X0{S: "vert"},
+ Nonymous: []X0{
+ {S: "verde0"},
+ {S: "verde1"},
+ {S: "verde2"},
+ },
+ },
+ Blue: N1{
+ X0: X0{S: "bleu"},
+ Nonymous: []X0{
+ {S: "blu0"},
+ {S: "blu1"},
+ {S: "blu2"},
+ {S: "blu3"},
+ },
+ },
+ },
+ &N2{
+ N1: N1{
+ X0: X0{S: "rouge"},
+ Nonymous: []X0{
+ {S: "rosso0"},
+ {S: "rosso1"},
+ },
+ },
+ Green: N1{
+ X0: X0{S: "vert"},
+ Nonymous: []X0{
+ {S: "verde0"},
+ {S: "verde1"},
+ {S: "verde2"},
+ },
+ },
+ Blue: N1{
+ X0: X0{S: "bleu"},
+ Nonymous: []X0{
+ {S: "blu0"},
+ {S: "blu1"},
+ {S: "blu2"},
+ {S: "blu3"},
+ },
+ },
+ },
+ "",
+ "",
+ },
+ {
+ "save structs load props",
+ &N2{
+ N1: N1{
+ X0: X0{S: "rouge"},
+ Nonymous: []X0{
+ {S: "rosso0"},
+ {S: "rosso1"},
+ },
+ },
+ Green: N1{
+ X0: X0{S: "vert"},
+ Nonymous: []X0{
+ {S: "verde0"},
+ {S: "verde1"},
+ {S: "verde2"},
+ },
+ },
+ Blue: N1{
+ X0: X0{S: "bleu"},
+ Nonymous: []X0{
+ {S: "blu0"},
+ {S: "blu1"},
+ {S: "blu2"},
+ {S: "blu3"},
+ },
+ },
+ },
+ &PropertyList{
+ Property{Name: "Blue.I", Value: int64(0), NoIndex: false, Multiple: false},
+ Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blu0", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blu1", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blu2", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blu3", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Other", Value: "", NoIndex: false, Multiple: false},
+ Property{Name: "Blue.S", Value: "bleu", NoIndex: false, Multiple: false},
+ Property{Name: "green.I", Value: int64(0), NoIndex: false, Multiple: false},
+ Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.S", Value: "verde0", NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.S", Value: "verde1", NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.S", Value: "verde2", NoIndex: false, Multiple: true},
+ Property{Name: "green.Other", Value: "", NoIndex: false, Multiple: false},
+ Property{Name: "green.S", Value: "vert", NoIndex: false, Multiple: false},
+ Property{Name: "red.I", Value: int64(0), NoIndex: false, Multiple: false},
+ Property{Name: "red.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "red.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "red.Nonymous.S", Value: "rosso0", NoIndex: false, Multiple: true},
+ Property{Name: "red.Nonymous.S", Value: "rosso1", NoIndex: false, Multiple: true},
+ Property{Name: "red.Other", Value: "", NoIndex: false, Multiple: false},
+ Property{Name: "red.S", Value: "rouge", NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "save props load structs with ragged fields",
+ &PropertyList{
+ Property{Name: "red.S", Value: "rot", NoIndex: false, Multiple: false},
+ Property{Name: "green.Nonymous.I", Value: int64(10), NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.I", Value: int64(11), NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.I", Value: int64(12), NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.I", Value: int64(13), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blau0", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.I", Value: int64(20), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blau1", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.I", Value: int64(21), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blau2", NoIndex: false, Multiple: true},
+ },
+ &N2{
+ N1: N1{
+ X0: X0{S: "rot"},
+ },
+ Green: N1{
+ Nonymous: []X0{
+ {I: 10},
+ {I: 11},
+ {I: 12},
+ {I: 13},
+ },
+ },
+ Blue: N1{
+ Nonymous: []X0{
+ {S: "blau0", I: 20},
+ {S: "blau1", I: 21},
+ {S: "blau2"},
+ },
+ },
+ },
+ "",
+ "",
+ },
+ {
+ "save structs with noindex tags",
+ &struct {
+ A struct {
+ X string `datastore:",noindex"`
+ Y string
+ } `datastore:",noindex"`
+ B struct {
+ X string `datastore:",noindex"`
+ Y string
+ }
+ }{},
+ &PropertyList{
+ Property{Name: "A.X", Value: "", NoIndex: true, Multiple: false},
+ Property{Name: "A.Y", Value: "", NoIndex: true, Multiple: false},
+ Property{Name: "B.X", Value: "", NoIndex: true, Multiple: false},
+ Property{Name: "B.Y", Value: "", NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "embedded struct with name override",
+ &struct {
+ Inner1 `datastore:"foo"`
+ }{},
+ &PropertyList{
+ Property{Name: "foo.W", Value: int64(0), NoIndex: false, Multiple: false},
+ Property{Name: "foo.X", Value: "", NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "slice of slices",
+ &SliceOfSlices{},
+ nil,
+ "flattening nested structs leads to a slice of slices",
+ "",
+ },
+ {
+ "recursive struct",
+ &Recursive{},
+ nil,
+ "recursive struct",
+ "",
+ },
+ {
+ "mutually recursive struct",
+ &MutuallyRecursive0{},
+ nil,
+ "recursive struct",
+ "",
+ },
+ {
+ "non-exported struct fields",
+ &struct {
+ i, J int64
+ }{i: 1, J: 2},
+ &PropertyList{
+ Property{Name: "J", Value: int64(2), NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "json.RawMessage",
+ &struct {
+ J json.RawMessage
+ }{
+ J: json.RawMessage("rawr"),
+ },
+ &PropertyList{
+ Property{Name: "J", Value: []byte("rawr"), NoIndex: true, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "json.RawMessage to myBlob",
+ &struct {
+ B json.RawMessage
+ }{
+ B: json.RawMessage("rawr"),
+ },
+ &B2{B: myBlob("rawr")},
+ "",
+ "",
+ },
+ {
+ "embedded time field",
+ &SpecialTime{MyTime: EmbeddedTime{now}},
+ &SpecialTime{MyTime: EmbeddedTime{now}},
+ "",
+ "",
+ },
+ {
+ "embedded time load",
+ &PropertyList{
+ Property{Name: "MyTime.", Value: now, NoIndex: false, Multiple: false},
+ },
+ &SpecialTime{MyTime: EmbeddedTime{now}},
+ "",
+ "",
+ },
+}
+
+// checkErr returns the empty string if either both want and err are zero,
+// or if want is a non-empty substring of err's string representation.
+func checkErr(want string, err error) string {
+ if err != nil {
+ got := err.Error()
+ if want == "" || strings.Index(got, want) == -1 {
+ return got
+ }
+ } else if want != "" {
+ return fmt.Sprintf("want error %q", want)
+ }
+ return ""
+}
+
+func TestRoundTrip(t *testing.T) {
+ for _, tc := range testCases {
+ p, err := saveEntity(testAppID, testKey0, tc.src)
+ if s := checkErr(tc.putErr, err); s != "" {
+ t.Errorf("%s: save: %s", tc.desc, s)
+ continue
+ }
+ if p == nil {
+ continue
+ }
+ var got interface{}
+ if _, ok := tc.want.(*PropertyList); ok {
+ got = new(PropertyList)
+ } else {
+ got = reflect.New(reflect.TypeOf(tc.want).Elem()).Interface()
+ }
+ err = loadEntity(got, p)
+ if s := checkErr(tc.getErr, err); s != "" {
+ t.Errorf("%s: load: %s", tc.desc, s)
+ continue
+ }
+ if pl, ok := got.(*PropertyList); ok {
+ // Sort by name to make sure we have a deterministic order.
+ sort.Stable(byName(*pl))
+ }
+ equal := false
+ if gotT, ok := got.(*T); ok {
+ // Round tripping a time.Time can result in a different time.Location: Local instead of UTC.
+ // We therefore test equality explicitly, instead of relying on reflect.DeepEqual.
+ equal = gotT.T.Equal(tc.want.(*T).T)
+ } else {
+ equal = reflect.DeepEqual(got, tc.want)
+ }
+ if !equal {
+ t.Errorf("%s: compare: got %v want %v", tc.desc, got, tc.want)
+ continue
+ }
+ }
+}
+
+type byName PropertyList
+
+func (s byName) Len() int { return len(s) }
+func (s byName) Less(i, j int) bool { return s[i].Name < s[j].Name }
+func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func TestQueryConstruction(t *testing.T) {
+ tests := []struct {
+ q, exp *Query
+ err string
+ }{
+ {
+ q: NewQuery("Foo"),
+ exp: &Query{
+ kind: "Foo",
+ limit: -1,
+ },
+ },
+ {
+ // Regular filtered query with standard spacing.
+ q: NewQuery("Foo").Filter("foo >", 7),
+ exp: &Query{
+ kind: "Foo",
+ filter: []filter{
+ {
+ FieldName: "foo",
+ Op: greaterThan,
+ Value: 7,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Filtered query with no spacing.
+ q: NewQuery("Foo").Filter("foo=", 6),
+ exp: &Query{
+ kind: "Foo",
+ filter: []filter{
+ {
+ FieldName: "foo",
+ Op: equal,
+ Value: 6,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Filtered query with funky spacing.
+ q: NewQuery("Foo").Filter(" foo< ", 8),
+ exp: &Query{
+ kind: "Foo",
+ filter: []filter{
+ {
+ FieldName: "foo",
+ Op: lessThan,
+ Value: 8,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Filtered query with multicharacter op.
+ q: NewQuery("Foo").Filter("foo >=", 9),
+ exp: &Query{
+ kind: "Foo",
+ filter: []filter{
+ {
+ FieldName: "foo",
+ Op: greaterEq,
+ Value: 9,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Query with ordering.
+ q: NewQuery("Foo").Order("bar"),
+ exp: &Query{
+ kind: "Foo",
+ order: []order{
+ {
+ FieldName: "bar",
+ Direction: ascending,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Query with reverse ordering, and funky spacing.
+ q: NewQuery("Foo").Order(" - bar"),
+ exp: &Query{
+ kind: "Foo",
+ order: []order{
+ {
+ FieldName: "bar",
+ Direction: descending,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Query with an empty ordering.
+ q: NewQuery("Foo").Order(""),
+ err: "empty order",
+ },
+ {
+ // Query with a + ordering.
+ q: NewQuery("Foo").Order("+bar"),
+ err: "invalid order",
+ },
+ }
+ for i, test := range tests {
+ if test.q.err != nil {
+ got := test.q.err.Error()
+ if !strings.Contains(got, test.err) {
+ t.Errorf("%d: error mismatch: got %q want something containing %q", i, got, test.err)
+ }
+ continue
+ }
+ if !reflect.DeepEqual(test.q, test.exp) {
+ t.Errorf("%d: mismatch: got %v want %v", i, test.q, test.exp)
+ }
+ }
+}
+
+func TestStringMeaning(t *testing.T) {
+ var xx [4]interface{}
+ xx[0] = &struct {
+ X string
+ }{"xx0"}
+ xx[1] = &struct {
+ X string `datastore:",noindex"`
+ }{"xx1"}
+ xx[2] = &struct {
+ X []byte
+ }{[]byte("xx2")}
+ xx[3] = &struct {
+ X []byte `datastore:",noindex"`
+ }{[]byte("xx3")}
+
+ indexed := [4]bool{
+ true,
+ false,
+ false, // A []byte is always no-index.
+ false,
+ }
+ want := [4]pb.Property_Meaning{
+ pb.Property_NO_MEANING,
+ pb.Property_TEXT,
+ pb.Property_BLOB,
+ pb.Property_BLOB,
+ }
+
+ for i, x := range xx {
+ props, err := SaveStruct(x)
+ if err != nil {
+ t.Errorf("i=%d: SaveStruct: %v", i, err)
+ continue
+ }
+ e, err := propertiesToProto("appID", testKey0, props)
+ if err != nil {
+ t.Errorf("i=%d: propertiesToProto: %v", i, err)
+ continue
+ }
+ var p *pb.Property
+ switch {
+ case indexed[i] && len(e.Property) == 1:
+ p = e.Property[0]
+ case !indexed[i] && len(e.RawProperty) == 1:
+ p = e.RawProperty[0]
+ default:
+ t.Errorf("i=%d: EntityProto did not have expected property slice", i)
+ continue
+ }
+ if got := p.GetMeaning(); got != want[i] {
+ t.Errorf("i=%d: meaning: got %v, want %v", i, got, want[i])
+ continue
+ }
+ }
+}
+
+func TestNamespaceResetting(t *testing.T) {
+ // These environment variables are necessary because *Query.Run will
+ // call internal.FullyQualifiedAppID which checks these variables or falls
+ // back to the Metadata service that is not available in tests.
+ environ := []struct {
+ key, value string
+ }{
+ {"GAE_LONG_APP_ID", "my-app-id"},
+ {"GAE_PARTITION", "1"},
+ }
+ for _, v := range environ {
+ old := os.Getenv(v.key)
+ os.Setenv(v.key, v.value)
+ v.value = old
+ }
+ defer func() { // Restore old environment after the test completes.
+ for _, v := range environ {
+ if v.value == "" {
+ os.Unsetenv(v.key)
+ continue
+ }
+ os.Setenv(v.key, v.value)
+ }
+ }()
+
+ namec := make(chan *string, 1)
+ c0 := aetesting.FakeSingleContext(t, "datastore_v3", "RunQuery", func(req *pb.Query, res *pb.QueryResult) error {
+ namec <- req.NameSpace
+ return fmt.Errorf("RPC error")
+ })
+
+ // Check that wrapping c0 in a namespace twice works correctly.
+ c1, err := appengine.Namespace(c0, "A")
+ if err != nil {
+ t.Fatalf("appengine.Namespace: %v", err)
+ }
+ c2, err := appengine.Namespace(c1, "") // should act as the original context
+ if err != nil {
+ t.Fatalf("appengine.Namespace: %v", err)
+ }
+
+ q := NewQuery("SomeKind")
+
+ q.Run(c0)
+ if ns := <-namec; ns != nil {
+ t.Errorf(`RunQuery with c0: ns = %q, want nil`, *ns)
+ }
+
+ q.Run(c1)
+ if ns := <-namec; ns == nil {
+ t.Error(`RunQuery with c1: ns = nil, want "A"`)
+ } else if *ns != "A" {
+ t.Errorf(`RunQuery with c1: ns = %q, want "A"`, *ns)
+ }
+
+ q.Run(c2)
+ if ns := <-namec; ns != nil {
+ t.Errorf(`RunQuery with c2: ns = %q, want nil`, *ns)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/datastore/doc.go b/vendor/google.golang.org/appengine/datastore/doc.go
new file mode 100644
index 000000000..85616cf27
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/doc.go
@@ -0,0 +1,361 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package datastore provides a client for App Engine's datastore service.
+
+
+Basic Operations
+
+Entities are the unit of storage and are associated with a key. A key
+consists of an optional parent key, a string application ID, a string kind
+(also known as an entity type), and either a StringID or an IntID. A
+StringID is also known as an entity name or key name.
+
+It is valid to create a key with a zero StringID and a zero IntID; this is
+called an incomplete key, and does not refer to any saved entity. Putting an
+entity into the datastore under an incomplete key will cause a unique key
+to be generated for that entity, with a non-zero IntID.
+
+An entity's contents are a mapping from case-sensitive field names to values.
+Valid value types are:
+ - signed integers (int, int8, int16, int32 and int64),
+ - bool,
+ - string,
+ - float32 and float64,
+ - []byte (up to 1 megabyte in length),
+ - any type whose underlying type is one of the above predeclared types,
+ - ByteString,
+ - *Key,
+ - time.Time (stored with microsecond precision),
+ - appengine.BlobKey,
+ - appengine.GeoPoint,
+ - structs whose fields are all valid value types,
+ - slices of any of the above.
+
+Slices of structs are valid, as are structs that contain slices. However, if
+one struct contains another, then at most one of those can be repeated. This
+disqualifies recursively defined struct types: any struct T that (directly or
+indirectly) contains a []T.
+
+The Get and Put functions load and save an entity's contents. An entity's
+contents are typically represented by a struct pointer.
+
+Example code:
+
+ type Entity struct {
+ Value string
+ }
+
+ func handle(w http.ResponseWriter, r *http.Request) {
+ ctx := appengine.NewContext(r)
+
+ k := datastore.NewKey(ctx, "Entity", "stringID", 0, nil)
+ e := new(Entity)
+ if err := datastore.Get(ctx, k, e); err != nil {
+ http.Error(w, err.Error(), 500)
+ return
+ }
+
+ old := e.Value
+ e.Value = r.URL.Path
+
+ if _, err := datastore.Put(ctx, k, e); err != nil {
+ http.Error(w, err.Error(), 500)
+ return
+ }
+
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ fmt.Fprintf(w, "old=%q\nnew=%q\n", old, e.Value)
+ }
+
+GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and
+Delete functions. They take a []*Key instead of a *Key, and may return an
+appengine.MultiError when encountering partial failure.
+
+
+Properties
+
+An entity's contents can be represented by a variety of types. These are
+typically struct pointers, but can also be any type that implements the
+PropertyLoadSaver interface. If using a struct pointer, you do not have to
+explicitly implement the PropertyLoadSaver interface; the datastore will
+automatically convert via reflection. If a struct pointer does implement that
+interface then those methods will be used in preference to the default
+behavior for struct pointers. Struct pointers are more strongly typed and are
+easier to use; PropertyLoadSavers are more flexible.
+
+The actual types passed do not have to match between Get and Put calls or even
+across different calls to datastore. It is valid to put a *PropertyList and
+get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1.
+Conceptually, any entity is saved as a sequence of properties, and is loaded
+into the destination value on a property-by-property basis. When loading into
+a struct pointer, an entity that cannot be completely represented (such as a
+missing field) will result in an ErrFieldMismatch error but it is up to the
+caller whether this error is fatal, recoverable or ignorable.
+
+By default, for struct pointers, all properties are potentially indexed, and
+the property name is the same as the field name (and hence must start with an
+upper case letter).
+
+Fields may have a `datastore:"name,options"` tag. The tag name is the
+property name, which must be one or more valid Go identifiers joined by ".",
+but may start with a lower case letter. An empty tag name means to just use the
+field name. A "-" tag name means that the datastore will ignore that field.
+
+The only valid options are "omitempty" and "noindex".
+
+If the options include "omitempty" and the value of the field is empty, then the field will be omitted on Save.
+The empty values are false, 0, any nil interface value, and any array, slice, map, or string of length zero.
+Struct field values will never be empty.
+
+If options include "noindex" then the field will not be indexed. All fields are indexed
+by default. Strings or byte slices longer than 1500 bytes cannot be indexed;
+fields used to store long strings and byte slices must be tagged with "noindex"
+or they will cause Put operations to fail.
+
+To use multiple options together, separate them by a comma.
+The order does not matter.
+
+If the options is "" then the comma may be omitted.
+
+Example code:
+
+ // A and B are renamed to a and b.
+ // A, C and J are not indexed.
+ // D's tag is equivalent to having no tag at all (E).
+ // I is ignored entirely by the datastore.
+ // J has tag information for both the datastore and json packages.
+ type TaggedStruct struct {
+ A int `datastore:"a,noindex"`
+ B int `datastore:"b"`
+ C int `datastore:",noindex"`
+ D int `datastore:""`
+ E int
+ I int `datastore:"-"`
+ J int `datastore:",noindex" json:"j"`
+ }
+
+
+Structured Properties
+
+If the struct pointed to contains other structs, then the nested or embedded
+structs are flattened. For example, given these definitions:
+
+ type Inner1 struct {
+ W int32
+ X string
+ }
+
+ type Inner2 struct {
+ Y float64
+ }
+
+ type Inner3 struct {
+ Z bool
+ }
+
+ type Outer struct {
+ A int16
+ I []Inner1
+ J Inner2
+ Inner3
+ }
+
+then an Outer's properties would be equivalent to those of:
+
+ type OuterEquivalent struct {
+ A int16
+ IDotW []int32 `datastore:"I.W"`
+ IDotX []string `datastore:"I.X"`
+ JDotY float64 `datastore:"J.Y"`
+ Z bool
+ }
+
+If Outer's embedded Inner3 field was tagged as `datastore:"Foo"` then the
+equivalent field would instead be: FooDotZ bool `datastore:"Foo.Z"`.
+
+If an outer struct is tagged "noindex" then all of its implicit flattened
+fields are effectively "noindex".
+
+
+The PropertyLoadSaver Interface
+
+An entity's contents can also be represented by any type that implements the
+PropertyLoadSaver interface. This type may be a struct pointer, but it does
+not have to be. The datastore package will call Load when getting the entity's
+contents, and Save when putting the entity's contents.
+Possible uses include deriving non-stored fields, verifying fields, or indexing
+a field only if its value is positive.
+
+Example code:
+
+ type CustomPropsExample struct {
+ I, J int
+ // Sum is not stored, but should always be equal to I + J.
+ Sum int `datastore:"-"`
+ }
+
+ func (x *CustomPropsExample) Load(ps []datastore.Property) error {
+ // Load I and J as usual.
+ if err := datastore.LoadStruct(x, ps); err != nil {
+ return err
+ }
+ // Derive the Sum field.
+ x.Sum = x.I + x.J
+ return nil
+ }
+
+ func (x *CustomPropsExample) Save() ([]datastore.Property, error) {
+ // Validate the Sum field.
+ if x.Sum != x.I + x.J {
+ return nil, errors.New("CustomPropsExample has inconsistent sum")
+ }
+ // Save I and J as usual. The code below is equivalent to calling
+ // "return datastore.SaveStruct(x)", but is done manually for
+ // demonstration purposes.
+ return []datastore.Property{
+ {
+ Name: "I",
+ Value: int64(x.I),
+ },
+ {
+ Name: "J",
+ Value: int64(x.J),
+ },
+ }, nil
+ }
+
+The *PropertyList type implements PropertyLoadSaver, and can therefore hold an
+arbitrary entity's contents.
+
+
+Queries
+
+Queries retrieve entities based on their properties or key's ancestry. Running
+a query yields an iterator of results: either keys or (key, entity) pairs.
+Queries are re-usable and it is safe to call Query.Run from concurrent
+goroutines. Iterators are not safe for concurrent use.
+
+Queries are immutable, and are either created by calling NewQuery, or derived
+from an existing query by calling a method like Filter or Order that returns a
+new query value. A query is typically constructed by calling NewQuery followed
+by a chain of zero or more such methods. These methods are:
+ - Ancestor and Filter constrain the entities returned by running a query.
+ - Order affects the order in which they are returned.
+ - Project constrains the fields returned.
+ - Distinct de-duplicates projected entities.
+ - KeysOnly makes the iterator return only keys, not (key, entity) pairs.
+ - Start, End, Offset and Limit define which sub-sequence of matching entities
+ to return. Start and End take cursors, Offset and Limit take integers. Start
+ and Offset affect the first result, End and Limit affect the last result.
+ If both Start and Offset are set, then the offset is relative to Start.
+ If both End and Limit are set, then the earliest constraint wins. Limit is
+ relative to Start+Offset, not relative to End. As a special case, a
+ negative limit means unlimited.
+
+Example code:
+
+ type Widget struct {
+ Description string
+ Price int
+ }
+
+ func handle(w http.ResponseWriter, r *http.Request) {
+ ctx := appengine.NewContext(r)
+ q := datastore.NewQuery("Widget").
+ Filter("Price <", 1000).
+ Order("-Price")
+ b := new(bytes.Buffer)
+ for t := q.Run(ctx); ; {
+ var x Widget
+ key, err := t.Next(&x)
+ if err == datastore.Done {
+ break
+ }
+ if err != nil {
+ serveError(ctx, w, err)
+ return
+ }
+ fmt.Fprintf(b, "Key=%v\nWidget=%#v\n\n", key, x)
+ }
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ io.Copy(w, b)
+ }
+
+
+Transactions
+
+RunInTransaction runs a function in a transaction.
+
+Example code:
+
+ type Counter struct {
+ Count int
+ }
+
+ func inc(ctx context.Context, key *datastore.Key) (int, error) {
+ var x Counter
+ if err := datastore.Get(ctx, key, &x); err != nil && err != datastore.ErrNoSuchEntity {
+ return 0, err
+ }
+ x.Count++
+ if _, err := datastore.Put(ctx, key, &x); err != nil {
+ return 0, err
+ }
+ return x.Count, nil
+ }
+
+ func handle(w http.ResponseWriter, r *http.Request) {
+ ctx := appengine.NewContext(r)
+ var count int
+ err := datastore.RunInTransaction(ctx, func(ctx context.Context) error {
+ var err1 error
+ count, err1 = inc(ctx, datastore.NewKey(ctx, "Counter", "singleton", 0, nil))
+ return err1
+ }, nil)
+ if err != nil {
+ serveError(ctx, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ fmt.Fprintf(w, "Count=%d", count)
+ }
+
+
+Metadata
+
+The datastore package provides access to some of App Engine's datastore
+metadata. This metadata includes information about the entity groups,
+namespaces, entity kinds, and properties in the datastore, as well as the
+property representations for each property.
+
+Example code:
+
+ func handle(w http.ResponseWriter, r *http.Request) {
+ // Print all the kinds in the datastore, with all the indexed
+ // properties (and their representations) for each.
+ ctx := appengine.NewContext(r)
+
+ kinds, err := datastore.Kinds(ctx)
+ if err != nil {
+ serveError(ctx, w, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ for _, kind := range kinds {
+ fmt.Fprintf(w, "%s:\n", kind)
+ props, err := datastore.KindProperties(ctx, kind)
+ if err != nil {
+ fmt.Fprintln(w, "\t(unable to retrieve properties)")
+ continue
+ }
+ for p, rep := range props {
+ fmt.Fprintf(w, "\t-%s (%s)\n", p, strings.Join(rep, ", "))
+ }
+ }
+ }
+*/
+package datastore // import "google.golang.org/appengine/datastore"
diff --git a/vendor/google.golang.org/appengine/datastore/key.go b/vendor/google.golang.org/appengine/datastore/key.go
new file mode 100644
index 000000000..ac1f00250
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/key.go
@@ -0,0 +1,309 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/gob"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+// Key represents the datastore key for a stored entity, and is immutable.
+type Key struct {
+ kind string
+ stringID string
+ intID int64
+ parent *Key
+ appID string
+ namespace string
+}
+
+// Kind returns the key's kind (also known as entity type).
+func (k *Key) Kind() string {
+ return k.kind
+}
+
+// StringID returns the key's string ID (also known as an entity name or key
+// name), which may be "".
+func (k *Key) StringID() string {
+ return k.stringID
+}
+
+// IntID returns the key's integer ID, which may be 0.
+func (k *Key) IntID() int64 {
+ return k.intID
+}
+
+// Parent returns the key's parent key, which may be nil.
+func (k *Key) Parent() *Key {
+ return k.parent
+}
+
+// AppID returns the key's application ID.
+func (k *Key) AppID() string {
+ return k.appID
+}
+
+// Namespace returns the key's namespace.
+func (k *Key) Namespace() string {
+ return k.namespace
+}
+
+// Incomplete returns whether the key does not refer to a stored entity.
+// In particular, whether the key has a zero StringID and a zero IntID.
+func (k *Key) Incomplete() bool {
+ return k.stringID == "" && k.intID == 0
+}
+
+// valid returns whether the key is valid.
+func (k *Key) valid() bool {
+ if k == nil {
+ return false
+ }
+ for ; k != nil; k = k.parent {
+ if k.kind == "" || k.appID == "" {
+ return false
+ }
+ if k.stringID != "" && k.intID != 0 {
+ return false
+ }
+ if k.parent != nil {
+ if k.parent.Incomplete() {
+ return false
+ }
+ if k.parent.appID != k.appID || k.parent.namespace != k.namespace {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Equal returns whether two keys are equal.
+func (k *Key) Equal(o *Key) bool {
+ for k != nil && o != nil {
+ if k.kind != o.kind || k.stringID != o.stringID || k.intID != o.intID || k.appID != o.appID || k.namespace != o.namespace {
+ return false
+ }
+ k, o = k.parent, o.parent
+ }
+ return k == o
+}
+
+// root returns the furthest ancestor of a key, which may be itself.
+func (k *Key) root() *Key {
+ for k.parent != nil {
+ k = k.parent
+ }
+ return k
+}
+
+// marshal marshals the key's string representation to the buffer.
+func (k *Key) marshal(b *bytes.Buffer) {
+ if k.parent != nil {
+ k.parent.marshal(b)
+ }
+ b.WriteByte('/')
+ b.WriteString(k.kind)
+ b.WriteByte(',')
+ if k.stringID != "" {
+ b.WriteString(k.stringID)
+ } else {
+ b.WriteString(strconv.FormatInt(k.intID, 10))
+ }
+}
+
+// String returns a string representation of the key.
+func (k *Key) String() string {
+ if k == nil {
+ return ""
+ }
+ b := bytes.NewBuffer(make([]byte, 0, 512))
+ k.marshal(b)
+ return b.String()
+}
+
+type gobKey struct {
+ Kind string
+ StringID string
+ IntID int64
+ Parent *gobKey
+ AppID string
+ Namespace string
+}
+
+func keyToGobKey(k *Key) *gobKey {
+ if k == nil {
+ return nil
+ }
+ return &gobKey{
+ Kind: k.kind,
+ StringID: k.stringID,
+ IntID: k.intID,
+ Parent: keyToGobKey(k.parent),
+ AppID: k.appID,
+ Namespace: k.namespace,
+ }
+}
+
+func gobKeyToKey(gk *gobKey) *Key {
+ if gk == nil {
+ return nil
+ }
+ return &Key{
+ kind: gk.Kind,
+ stringID: gk.StringID,
+ intID: gk.IntID,
+ parent: gobKeyToKey(gk.Parent),
+ appID: gk.AppID,
+ namespace: gk.Namespace,
+ }
+}
+
+func (k *Key) GobEncode() ([]byte, error) {
+ buf := new(bytes.Buffer)
+ if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func (k *Key) GobDecode(buf []byte) error {
+ gk := new(gobKey)
+ if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil {
+ return err
+ }
+ *k = *gobKeyToKey(gk)
+ return nil
+}
+
+func (k *Key) MarshalJSON() ([]byte, error) {
+ return []byte(`"` + k.Encode() + `"`), nil
+}
+
+func (k *Key) UnmarshalJSON(buf []byte) error {
+ if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' {
+ return errors.New("datastore: bad JSON key")
+ }
+ k2, err := DecodeKey(string(buf[1 : len(buf)-1]))
+ if err != nil {
+ return err
+ }
+ *k = *k2
+ return nil
+}
+
+// Encode returns an opaque representation of the key
+// suitable for use in HTML and URLs.
+// This is compatible with the Python and Java runtimes.
+func (k *Key) Encode() string {
+ ref := keyToProto("", k)
+
+ b, err := proto.Marshal(ref)
+ if err != nil {
+ panic(err)
+ }
+
+ // Trailing padding is stripped.
+ return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
+}
+
+// DecodeKey decodes a key from the opaque representation returned by Encode.
+func DecodeKey(encoded string) (*Key, error) {
+ // Re-add padding.
+ if m := len(encoded) % 4; m != 0 {
+ encoded += strings.Repeat("=", 4-m)
+ }
+
+ b, err := base64.URLEncoding.DecodeString(encoded)
+ if err != nil {
+ return nil, err
+ }
+
+ ref := new(pb.Reference)
+ if err := proto.Unmarshal(b, ref); err != nil {
+ return nil, err
+ }
+
+ return protoToKey(ref)
+}
+
+// NewIncompleteKey creates a new incomplete key.
+// kind cannot be empty.
+func NewIncompleteKey(c context.Context, kind string, parent *Key) *Key {
+ return NewKey(c, kind, "", 0, parent)
+}
+
+// NewKey creates a new key.
+// kind cannot be empty.
+// Either one or both of stringID and intID must be zero. If both are zero,
+// the key returned is incomplete.
+// parent must either be a complete key or nil.
+func NewKey(c context.Context, kind, stringID string, intID int64, parent *Key) *Key {
+ // If there's a parent key, use its namespace.
+ // Otherwise, use any namespace attached to the context.
+ var namespace string
+ if parent != nil {
+ namespace = parent.namespace
+ } else {
+ namespace = internal.NamespaceFromContext(c)
+ }
+
+ return &Key{
+ kind: kind,
+ stringID: stringID,
+ intID: intID,
+ parent: parent,
+ appID: internal.FullyQualifiedAppID(c),
+ namespace: namespace,
+ }
+}
+
+// AllocateIDs returns a range of n integer IDs with the given kind and parent
+// combination. kind cannot be empty; parent may be nil. The IDs in the range
+// returned will not be used by the datastore's automatic ID sequence generator
+// and may be used with NewKey without conflict.
+//
+// The range is inclusive at the low end and exclusive at the high end. In
+// other words, valid intIDs x satisfy low <= x && x < high.
+//
+// If no error is returned, low + n == high.
+func AllocateIDs(c context.Context, kind string, parent *Key, n int) (low, high int64, err error) {
+ if kind == "" {
+ return 0, 0, errors.New("datastore: AllocateIDs given an empty kind")
+ }
+ if n < 0 {
+ return 0, 0, fmt.Errorf("datastore: AllocateIDs given a negative count: %d", n)
+ }
+ if n == 0 {
+ return 0, 0, nil
+ }
+ req := &pb.AllocateIdsRequest{
+ ModelKey: keyToProto("", NewIncompleteKey(c, kind, parent)),
+ Size: proto.Int64(int64(n)),
+ }
+ res := &pb.AllocateIdsResponse{}
+ if err := internal.Call(c, "datastore_v3", "AllocateIds", req, res); err != nil {
+ return 0, 0, err
+ }
+ // The protobuf is inclusive at both ends. Idiomatic Go (e.g. slices, for loops)
+ // is inclusive at the low end and exclusive at the high end, so we add 1.
+ low = res.GetStart()
+ high = res.GetEnd() + 1
+ if low+int64(n) != high {
+ return 0, 0, fmt.Errorf("datastore: internal error: could not allocate %d IDs", n)
+ }
+ return low, high, nil
+}
diff --git a/vendor/google.golang.org/appengine/datastore/key_test.go b/vendor/google.golang.org/appengine/datastore/key_test.go
new file mode 100644
index 000000000..1fb3e9752
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/key_test.go
@@ -0,0 +1,204 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "bytes"
+ "encoding/gob"
+ "encoding/json"
+ "testing"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+func TestKeyEncoding(t *testing.T) {
+ testCases := []struct {
+ desc string
+ key *Key
+ exp string
+ }{
+ {
+ desc: "A simple key with an int ID",
+ key: &Key{
+ kind: "Person",
+ intID: 1,
+ appID: "glibrary",
+ },
+ exp: "aghnbGlicmFyeXIMCxIGUGVyc29uGAEM",
+ },
+ {
+ desc: "A simple key with a string ID",
+ key: &Key{
+ kind: "Graph",
+ stringID: "graph:7-day-active",
+ appID: "glibrary",
+ },
+ exp: "aghnbGlicmFyeXIdCxIFR3JhcGgiEmdyYXBoOjctZGF5LWFjdGl2ZQw",
+ },
+ {
+ desc: "A key with a parent",
+ key: &Key{
+ kind: "WordIndex",
+ intID: 1033,
+ parent: &Key{
+ kind: "WordIndex",
+ intID: 1020032,
+ appID: "glibrary",
+ },
+ appID: "glibrary",
+ },
+ exp: "aghnbGlicmFyeXIhCxIJV29yZEluZGV4GIChPgwLEglXb3JkSW5kZXgYiQgM",
+ },
+ }
+ for _, tc := range testCases {
+ enc := tc.key.Encode()
+ if enc != tc.exp {
+ t.Errorf("%s: got %q, want %q", tc.desc, enc, tc.exp)
+ }
+
+ key, err := DecodeKey(tc.exp)
+ if err != nil {
+ t.Errorf("%s: failed decoding key: %v", tc.desc, err)
+ continue
+ }
+ if !key.Equal(tc.key) {
+ t.Errorf("%s: decoded key %v, want %v", tc.desc, key, tc.key)
+ }
+ }
+}
+
+func TestKeyGob(t *testing.T) {
+ k := &Key{
+ kind: "Gopher",
+ intID: 3,
+ parent: &Key{
+ kind: "Mom",
+ stringID: "narwhal",
+ appID: "gopher-con",
+ },
+ appID: "gopher-con",
+ }
+
+ buf := new(bytes.Buffer)
+ if err := gob.NewEncoder(buf).Encode(k); err != nil {
+ t.Fatalf("gob encode failed: %v", err)
+ }
+
+ k2 := new(Key)
+ if err := gob.NewDecoder(buf).Decode(k2); err != nil {
+ t.Fatalf("gob decode failed: %v", err)
+ }
+ if !k2.Equal(k) {
+ t.Errorf("gob round trip of %v produced %v", k, k2)
+ }
+}
+
+func TestNilKeyGob(t *testing.T) {
+ type S struct {
+ Key *Key
+ }
+ s1 := new(S)
+
+ buf := new(bytes.Buffer)
+ if err := gob.NewEncoder(buf).Encode(s1); err != nil {
+ t.Fatalf("gob encode failed: %v", err)
+ }
+
+ s2 := new(S)
+ if err := gob.NewDecoder(buf).Decode(s2); err != nil {
+ t.Fatalf("gob decode failed: %v", err)
+ }
+ if s2.Key != nil {
+ t.Errorf("gob round trip of nil key produced %v", s2.Key)
+ }
+}
+
+func TestKeyJSON(t *testing.T) {
+ k := &Key{
+ kind: "Gopher",
+ intID: 2,
+ parent: &Key{
+ kind: "Mom",
+ stringID: "narwhal",
+ appID: "gopher-con",
+ },
+ appID: "gopher-con",
+ }
+ exp := `"` + k.Encode() + `"`
+
+ buf, err := json.Marshal(k)
+ if err != nil {
+ t.Fatalf("json.Marshal failed: %v", err)
+ }
+ if s := string(buf); s != exp {
+ t.Errorf("JSON encoding of key %v: got %q, want %q", k, s, exp)
+ }
+
+ k2 := new(Key)
+ if err := json.Unmarshal(buf, k2); err != nil {
+ t.Fatalf("json.Unmarshal failed: %v", err)
+ }
+ if !k2.Equal(k) {
+ t.Errorf("JSON round trip of %v produced %v", k, k2)
+ }
+}
+
+func TestNilKeyJSON(t *testing.T) {
+ type S struct {
+ Key *Key
+ }
+ s1 := new(S)
+
+ buf, err := json.Marshal(s1)
+ if err != nil {
+ t.Fatalf("json.Marshal failed: %v", err)
+ }
+
+ s2 := new(S)
+ if err := json.Unmarshal(buf, s2); err != nil {
+ t.Fatalf("json.Unmarshal failed: %v", err)
+ }
+ if s2.Key != nil {
+ t.Errorf("JSON round trip of nil key produced %v", s2.Key)
+ }
+}
+
+func TestIncompleteKeyWithParent(t *testing.T) {
+ c := internal.WithAppIDOverride(context.Background(), "s~some-app")
+
+ // fadduh is a complete key.
+ fadduh := NewKey(c, "Person", "", 1, nil)
+ if fadduh.Incomplete() {
+ t.Fatalf("fadduh is incomplete")
+ }
+
+ // robert is an incomplete key with fadduh as a parent.
+ robert := NewIncompleteKey(c, "Person", fadduh)
+ if !robert.Incomplete() {
+ t.Fatalf("robert is complete")
+ }
+
+ // Both should be valid keys.
+ if !fadduh.valid() {
+ t.Errorf("fadduh is invalid: %v", fadduh)
+ }
+ if !robert.valid() {
+ t.Errorf("robert is invalid: %v", robert)
+ }
+}
+
+func TestNamespace(t *testing.T) {
+ key := &Key{
+ kind: "Person",
+ intID: 1,
+ appID: "s~some-app",
+ namespace: "mynamespace",
+ }
+ if g, w := key.Namespace(), "mynamespace"; g != w {
+ t.Errorf("key.Namespace() = %q, want %q", g, w)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/datastore/load.go b/vendor/google.golang.org/appengine/datastore/load.go
new file mode 100644
index 000000000..38a636539
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/load.go
@@ -0,0 +1,429 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/appengine"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+var (
+ typeOfBlobKey = reflect.TypeOf(appengine.BlobKey(""))
+ typeOfByteSlice = reflect.TypeOf([]byte(nil))
+ typeOfByteString = reflect.TypeOf(ByteString(nil))
+ typeOfGeoPoint = reflect.TypeOf(appengine.GeoPoint{})
+ typeOfTime = reflect.TypeOf(time.Time{})
+ typeOfKeyPtr = reflect.TypeOf(&Key{})
+ typeOfEntityPtr = reflect.TypeOf(&Entity{})
+)
+
+// typeMismatchReason returns a string explaining why the property p could not
+// be stored in an entity field of type v.Type().
+func typeMismatchReason(pValue interface{}, v reflect.Value) string {
+ entityType := "empty"
+ switch pValue.(type) {
+ case int64:
+ entityType = "int"
+ case bool:
+ entityType = "bool"
+ case string:
+ entityType = "string"
+ case float64:
+ entityType = "float"
+ case *Key:
+ entityType = "*datastore.Key"
+ case time.Time:
+ entityType = "time.Time"
+ case appengine.BlobKey:
+ entityType = "appengine.BlobKey"
+ case appengine.GeoPoint:
+ entityType = "appengine.GeoPoint"
+ case ByteString:
+ entityType = "datastore.ByteString"
+ case []byte:
+ entityType = "[]byte"
+ }
+ return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type())
+}
+
+type propertyLoader struct {
+ // m holds the number of times a substruct field like "Foo.Bar.Baz" has
+ // been seen so far. The map is constructed lazily.
+ m map[string]int
+}
+
+func (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p Property, requireSlice bool) string {
+ var v reflect.Value
+ var sliceIndex int
+
+ name := p.Name
+
+ // If name ends with a '.', the last field is anonymous.
+ // In this case, strings.Split will give us "" as the
+ // last element of our fields slice, which will match the ""
+ // field name in the substruct codec.
+ fields := strings.Split(name, ".")
+
+ for len(fields) > 0 {
+ var decoder fieldCodec
+ var ok bool
+
+ // Cut off the last field (delimited by ".") and find its parent
+ // in the codec.
+ // eg. for name "A.B.C.D", split off "A.B.C" and try to
+ // find a field in the codec with this name.
+ // Loop again with "A.B", etc.
+ for i := len(fields); i > 0; i-- {
+ parent := strings.Join(fields[:i], ".")
+ decoder, ok = codec.fields[parent]
+ if ok {
+ fields = fields[i:]
+ break
+ }
+ }
+
+ // If we never found a matching field in the codec, return
+ // error message.
+ if !ok {
+ return "no such struct field"
+ }
+
+ v = initField(structValue, decoder.path)
+ if !v.IsValid() {
+ return "no such struct field"
+ }
+ if !v.CanSet() {
+ return "cannot set struct field"
+ }
+
+ if decoder.structCodec != nil {
+ codec = decoder.structCodec
+ structValue = v
+ }
+
+ if v.Kind() == reflect.Slice && v.Type() != typeOfByteSlice {
+ if l.m == nil {
+ l.m = make(map[string]int)
+ }
+ sliceIndex = l.m[p.Name]
+ l.m[p.Name] = sliceIndex + 1
+ for v.Len() <= sliceIndex {
+ v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem()))
+ }
+ structValue = v.Index(sliceIndex)
+ requireSlice = false
+ }
+ }
+
+ var slice reflect.Value
+ if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
+ slice = v
+ v = reflect.New(v.Type().Elem()).Elem()
+ } else if requireSlice {
+ return "multiple-valued property requires a slice field type"
+ }
+
+ // Convert indexValues to a Go value with a meaning derived from the
+ // destination type.
+ pValue := p.Value
+ if iv, ok := pValue.(indexValue); ok {
+ meaning := pb.Property_NO_MEANING
+ switch v.Type() {
+ case typeOfBlobKey:
+ meaning = pb.Property_BLOBKEY
+ case typeOfByteSlice:
+ meaning = pb.Property_BLOB
+ case typeOfByteString:
+ meaning = pb.Property_BYTESTRING
+ case typeOfGeoPoint:
+ meaning = pb.Property_GEORSS_POINT
+ case typeOfTime:
+ meaning = pb.Property_GD_WHEN
+ case typeOfEntityPtr:
+ meaning = pb.Property_ENTITY_PROTO
+ }
+ var err error
+ pValue, err = propValue(iv.value, meaning)
+ if err != nil {
+ return err.Error()
+ }
+ }
+
+ if errReason := setVal(v, pValue); errReason != "" {
+ // Set the slice back to its zero value.
+ if slice.IsValid() {
+ slice.Set(reflect.Zero(slice.Type()))
+ }
+ return errReason
+ }
+
+ if slice.IsValid() {
+ slice.Index(sliceIndex).Set(v)
+ }
+
+ return ""
+}
+
+// setVal sets v to the value pValue.
+func setVal(v reflect.Value, pValue interface{}) string {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ x, ok := pValue.(int64)
+ if !ok && pValue != nil {
+ return typeMismatchReason(pValue, v)
+ }
+ if v.OverflowInt(x) {
+ return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
+ }
+ v.SetInt(x)
+ case reflect.Bool:
+ x, ok := pValue.(bool)
+ if !ok && pValue != nil {
+ return typeMismatchReason(pValue, v)
+ }
+ v.SetBool(x)
+ case reflect.String:
+ switch x := pValue.(type) {
+ case appengine.BlobKey:
+ v.SetString(string(x))
+ case ByteString:
+ v.SetString(string(x))
+ case string:
+ v.SetString(x)
+ default:
+ if pValue != nil {
+ return typeMismatchReason(pValue, v)
+ }
+ }
+ case reflect.Float32, reflect.Float64:
+ x, ok := pValue.(float64)
+ if !ok && pValue != nil {
+ return typeMismatchReason(pValue, v)
+ }
+ if v.OverflowFloat(x) {
+ return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
+ }
+ v.SetFloat(x)
+ case reflect.Ptr:
+ x, ok := pValue.(*Key)
+ if !ok && pValue != nil {
+ return typeMismatchReason(pValue, v)
+ }
+ if _, ok := v.Interface().(*Key); !ok {
+ return typeMismatchReason(pValue, v)
+ }
+ v.Set(reflect.ValueOf(x))
+ case reflect.Struct:
+ switch v.Type() {
+ case typeOfTime:
+ x, ok := pValue.(time.Time)
+ if !ok && pValue != nil {
+ return typeMismatchReason(pValue, v)
+ }
+ v.Set(reflect.ValueOf(x))
+ case typeOfGeoPoint:
+ x, ok := pValue.(appengine.GeoPoint)
+ if !ok && pValue != nil {
+ return typeMismatchReason(pValue, v)
+ }
+ v.Set(reflect.ValueOf(x))
+ default:
+ ent, ok := pValue.(*Entity)
+ if !ok {
+ return typeMismatchReason(pValue, v)
+ }
+
+ // Recursively load nested struct
+ pls, err := newStructPLS(v.Addr().Interface())
+ if err != nil {
+ return err.Error()
+ }
+
+ // if ent has a Key value and our struct has a Key field,
+ // load the Entity's Key value into the Key field on the struct.
+ if ent.Key != nil && pls.codec.keyField != -1 {
+
+ pls.v.Field(pls.codec.keyField).Set(reflect.ValueOf(ent.Key))
+ }
+
+ err = pls.Load(ent.Properties)
+ if err != nil {
+ return err.Error()
+ }
+ }
+ case reflect.Slice:
+ x, ok := pValue.([]byte)
+ if !ok {
+ if y, yok := pValue.(ByteString); yok {
+ x, ok = []byte(y), true
+ }
+ }
+ if !ok && pValue != nil {
+ return typeMismatchReason(pValue, v)
+ }
+ if v.Type().Elem().Kind() != reflect.Uint8 {
+ return typeMismatchReason(pValue, v)
+ }
+ v.SetBytes(x)
+ default:
+ return typeMismatchReason(pValue, v)
+ }
+ return ""
+}
+
+// initField is similar to reflect's Value.FieldByIndex, in that it
+// returns the nested struct field corresponding to index, but it
+// initialises any nil pointers encountered when traversing the structure.
+func initField(val reflect.Value, index []int) reflect.Value {
+ for _, i := range index[:len(index)-1] {
+ val = val.Field(i)
+ if val.Kind() == reflect.Ptr {
+ if val.IsNil() {
+ val.Set(reflect.New(val.Type().Elem()))
+ }
+ val = val.Elem()
+ }
+ }
+ return val.Field(index[len(index)-1])
+}
+
+// loadEntity loads an EntityProto into PropertyLoadSaver or struct pointer.
+func loadEntity(dst interface{}, src *pb.EntityProto) (err error) {
+ ent, err := protoToEntity(src)
+ if err != nil {
+ return err
+ }
+ if e, ok := dst.(PropertyLoadSaver); ok {
+ return e.Load(ent.Properties)
+ }
+ return LoadStruct(dst, ent.Properties)
+}
+
+func (s structPLS) Load(props []Property) error {
+ var fieldName, reason string
+ var l propertyLoader
+ for _, p := range props {
+ if errStr := l.load(s.codec, s.v, p, p.Multiple); errStr != "" {
+ // We don't return early, as we try to load as many properties as possible.
+ // It is valid to load an entity into a struct that cannot fully represent it.
+ // That case returns an error, but the caller is free to ignore it.
+ fieldName, reason = p.Name, errStr
+ }
+ }
+ if reason != "" {
+ return &ErrFieldMismatch{
+ StructType: s.v.Type(),
+ FieldName: fieldName,
+ Reason: reason,
+ }
+ }
+ return nil
+}
+
+func protoToEntity(src *pb.EntityProto) (*Entity, error) {
+ props, rawProps := src.Property, src.RawProperty
+ outProps := make([]Property, 0, len(props)+len(rawProps))
+ for {
+ var (
+ x *pb.Property
+ noIndex bool
+ )
+ if len(props) > 0 {
+ x, props = props[0], props[1:]
+ } else if len(rawProps) > 0 {
+ x, rawProps = rawProps[0], rawProps[1:]
+ noIndex = true
+ } else {
+ break
+ }
+
+ var value interface{}
+ if x.Meaning != nil && *x.Meaning == pb.Property_INDEX_VALUE {
+ value = indexValue{x.Value}
+ } else {
+ var err error
+ value, err = propValue(x.Value, x.GetMeaning())
+ if err != nil {
+ return nil, err
+ }
+ }
+ outProps = append(outProps, Property{
+ Name: x.GetName(),
+ Value: value,
+ NoIndex: noIndex,
+ Multiple: x.GetMultiple(),
+ })
+ }
+
+ var key *Key
+ if src.Key != nil {
+ // Ignore any error, since nested entity values
+ // are allowed to have an invalid key.
+ key, _ = protoToKey(src.Key)
+ }
+ return &Entity{key, outProps}, nil
+}
+
+// propValue returns a Go value that combines the raw PropertyValue with a
+// meaning. For example, an Int64Value with GD_WHEN becomes a time.Time.
+func propValue(v *pb.PropertyValue, m pb.Property_Meaning) (interface{}, error) {
+ switch {
+ case v.Int64Value != nil:
+ if m == pb.Property_GD_WHEN {
+ return fromUnixMicro(*v.Int64Value), nil
+ } else {
+ return *v.Int64Value, nil
+ }
+ case v.BooleanValue != nil:
+ return *v.BooleanValue, nil
+ case v.StringValue != nil:
+ if m == pb.Property_BLOB {
+ return []byte(*v.StringValue), nil
+ } else if m == pb.Property_BLOBKEY {
+ return appengine.BlobKey(*v.StringValue), nil
+ } else if m == pb.Property_BYTESTRING {
+ return ByteString(*v.StringValue), nil
+ } else if m == pb.Property_ENTITY_PROTO {
+ var ent pb.EntityProto
+ err := proto.Unmarshal([]byte(*v.StringValue), &ent)
+ if err != nil {
+ return nil, err
+ }
+ return protoToEntity(&ent)
+ } else {
+ return *v.StringValue, nil
+ }
+ case v.DoubleValue != nil:
+ return *v.DoubleValue, nil
+ case v.Referencevalue != nil:
+ key, err := referenceValueToKey(v.Referencevalue)
+ if err != nil {
+ return nil, err
+ }
+ return key, nil
+ case v.Pointvalue != nil:
+ // NOTE: Strangely, latitude maps to X, longitude to Y.
+ return appengine.GeoPoint{Lat: v.Pointvalue.GetX(), Lng: v.Pointvalue.GetY()}, nil
+ }
+ return nil, nil
+}
+
+// indexValue is a Property value that is created when entities are loaded from
+// an index, such as from a projection query.
+//
+// Such Property values do not contain all of the metadata required to be
+// faithfully represented as a Go value, and are instead represented as an
+// opaque indexValue. Load the properties into a concrete struct type (e.g. by
+// passing a struct pointer to Iterator.Next) to reconstruct actual Go values
+// of type int, string, time.Time, etc.
+type indexValue struct {
+ value *pb.PropertyValue
+}
diff --git a/vendor/google.golang.org/appengine/datastore/load_test.go b/vendor/google.golang.org/appengine/datastore/load_test.go
new file mode 100644
index 000000000..46029bba5
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/load_test.go
@@ -0,0 +1,656 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "reflect"
+ "testing"
+
+ proto "github.com/golang/protobuf/proto"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+type Simple struct {
+ I int64
+}
+
+type SimpleWithTag struct {
+ I int64 `datastore:"II"`
+}
+
+type NestedSimpleWithTag struct {
+ A SimpleWithTag `datastore:"AA"`
+}
+
+type NestedSliceOfSimple struct {
+ A []Simple
+}
+
+type SimpleTwoFields struct {
+ S string
+ SS string
+}
+
+type NestedSimpleAnonymous struct {
+ Simple
+ X string
+}
+
+type NestedSimple struct {
+ A Simple
+ I int64
+}
+
+type NestedSimple1 struct {
+ A Simple
+ X string
+}
+
+type NestedSimple2X struct {
+ AA NestedSimple
+ A SimpleTwoFields
+ S string
+}
+
+type BDotB struct {
+ B string `datastore:"B.B"`
+}
+
+type ABDotB struct {
+ A BDotB
+}
+
+type MultiAnonymous struct {
+ Simple
+ SimpleTwoFields
+ X string
+}
+
+var (
+ // these values need to be addressable
+ testString2 = "two"
+ testString3 = "three"
+ testInt64 = int64(2)
+
+ fieldNameI = "I"
+ fieldNameX = "X"
+ fieldNameS = "S"
+ fieldNameSS = "SS"
+ fieldNameADotI = "A.I"
+ fieldNameAADotII = "AA.II"
+ fieldNameADotBDotB = "A.B.B"
+)
+
+func TestLoadEntityNestedLegacy(t *testing.T) {
+ testCases := []struct {
+ desc string
+ src *pb.EntityProto
+ want interface{}
+ }{
+ {
+ "nested",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameX,
+ Value: &pb.PropertyValue{
+ StringValue: &testString2,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameADotI,
+ Value: &pb.PropertyValue{
+ Int64Value: &testInt64,
+ },
+ },
+ },
+ },
+ &NestedSimple1{
+ A: Simple{I: testInt64},
+ X: testString2,
+ },
+ },
+ {
+ "nested with tag",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameAADotII,
+ Value: &pb.PropertyValue{
+ Int64Value: &testInt64,
+ },
+ },
+ },
+ },
+ &NestedSimpleWithTag{
+ A: SimpleWithTag{I: testInt64},
+ },
+ },
+ {
+ "nested with anonymous struct field",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameX,
+ Value: &pb.PropertyValue{
+ StringValue: &testString2,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameI,
+ Value: &pb.PropertyValue{
+ Int64Value: &testInt64,
+ },
+ },
+ },
+ },
+ &NestedSimpleAnonymous{
+ Simple: Simple{I: testInt64},
+ X: testString2,
+ },
+ },
+ {
+ "nested with dotted field tag",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameADotBDotB,
+ Value: &pb.PropertyValue{
+ StringValue: &testString2,
+ },
+ },
+ },
+ },
+ &ABDotB{
+ A: BDotB{
+ B: testString2,
+ },
+ },
+ },
+ {
+ "nested with dotted field tag",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameI,
+ Value: &pb.PropertyValue{
+ Int64Value: &testInt64,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameS,
+ Value: &pb.PropertyValue{
+ StringValue: &testString2,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameSS,
+ Value: &pb.PropertyValue{
+ StringValue: &testString3,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameX,
+ Value: &pb.PropertyValue{
+ StringValue: &testString3,
+ },
+ },
+ },
+ },
+ &MultiAnonymous{
+ Simple: Simple{I: testInt64},
+ SimpleTwoFields: SimpleTwoFields{S: "two", SS: "three"},
+ X: "three",
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ dst := reflect.New(reflect.TypeOf(tc.want).Elem()).Interface()
+ err := loadEntity(dst, tc.src)
+ if err != nil {
+ t.Errorf("loadEntity: %s: %v", tc.desc, err)
+ continue
+ }
+
+ if !reflect.DeepEqual(tc.want, dst) {
+ t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want)
+ }
+ }
+}
+
+type WithKey struct {
+ X string
+ I int64
+ K *Key `datastore:"__key__"`
+}
+
+type NestedWithKey struct {
+ N WithKey
+ Y string
+}
+
+var (
+ incompleteKey = newKey("", nil)
+ invalidKey = newKey("s", incompleteKey)
+
+ // these values need to be addressable
+ fieldNameA = "A"
+ fieldNameK = "K"
+ fieldNameN = "N"
+ fieldNameY = "Y"
+ fieldNameAA = "AA"
+ fieldNameII = "II"
+ fieldNameBDotB = "B.B"
+
+ entityProtoMeaning = pb.Property_ENTITY_PROTO
+
+ TRUE = true
+ FALSE = false
+)
+
+var (
+ simpleEntityProto, nestedSimpleEntityProto,
+ simpleTwoFieldsEntityProto, simpleWithTagEntityProto,
+ bDotBEntityProto, withKeyEntityProto string
+)
+
+func init() {
+ // simpleEntityProto corresponds to:
+ // Simple{I: testInt64}
+ simpleEntityProtob, err := proto.Marshal(&pb.EntityProto{
+ Key: keyToProto("", incompleteKey),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameI,
+ Value: &pb.PropertyValue{
+ Int64Value: &testInt64,
+ },
+ Multiple: &FALSE,
+ },
+ },
+ EntityGroup: &pb.Path{},
+ })
+ if err != nil {
+ panic(err)
+ }
+ simpleEntityProto = string(simpleEntityProtob)
+
+ // nestedSimpleEntityProto corresponds to:
+ // NestedSimple{
+ // A: Simple{I: testInt64},
+ // I: testInt64,
+ // }
+ nestedSimpleEntityProtob, err := proto.Marshal(&pb.EntityProto{
+ Key: keyToProto("", incompleteKey),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameA,
+ Meaning: &entityProtoMeaning,
+ Value: &pb.PropertyValue{
+ StringValue: &simpleEntityProto,
+ },
+ Multiple: &FALSE,
+ },
+ &pb.Property{
+ Name: &fieldNameI,
+ Meaning: &entityProtoMeaning,
+ Value: &pb.PropertyValue{
+ Int64Value: &testInt64,
+ },
+ Multiple: &FALSE,
+ },
+ },
+ EntityGroup: &pb.Path{},
+ })
+ if err != nil {
+ panic(err)
+ }
+ nestedSimpleEntityProto = string(nestedSimpleEntityProtob)
+
+ // simpleTwoFieldsEntityProto corresponds to:
+ // SimpleTwoFields{S: testString2, SS: testString3}
+ simpleTwoFieldsEntityProtob, err := proto.Marshal(&pb.EntityProto{
+ Key: keyToProto("", incompleteKey),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameS,
+ Value: &pb.PropertyValue{
+ StringValue: &testString2,
+ },
+ Multiple: &FALSE,
+ },
+ &pb.Property{
+ Name: &fieldNameSS,
+ Value: &pb.PropertyValue{
+ StringValue: &testString3,
+ },
+ Multiple: &FALSE,
+ },
+ },
+ EntityGroup: &pb.Path{},
+ })
+ if err != nil {
+ panic(err)
+ }
+ simpleTwoFieldsEntityProto = string(simpleTwoFieldsEntityProtob)
+
+ // simpleWithTagEntityProto corresponds to:
+ // SimpleWithTag{I: testInt64}
+ simpleWithTagEntityProtob, err := proto.Marshal(&pb.EntityProto{
+ Key: keyToProto("", incompleteKey),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameII,
+ Value: &pb.PropertyValue{
+ Int64Value: &testInt64,
+ },
+ Multiple: &FALSE,
+ },
+ },
+ EntityGroup: &pb.Path{},
+ })
+ if err != nil {
+ panic(err)
+ }
+ simpleWithTagEntityProto = string(simpleWithTagEntityProtob)
+
+ // bDotBEntityProto corresponds to:
+ // BDotB{
+ // B: testString2,
+ // }
+ bDotBEntityProtob, err := proto.Marshal(&pb.EntityProto{
+ Key: keyToProto("", incompleteKey),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameBDotB,
+ Value: &pb.PropertyValue{
+ StringValue: &testString2,
+ },
+ Multiple: &FALSE,
+ },
+ },
+ EntityGroup: &pb.Path{},
+ })
+ if err != nil {
+ panic(err)
+ }
+ bDotBEntityProto = string(bDotBEntityProtob)
+
+ // withKeyEntityProto corresponds to:
+ // WithKey{
+ // X: testString3,
+ // I: testInt64,
+ // K: testKey1a,
+ // }
+ withKeyEntityProtob, err := proto.Marshal(&pb.EntityProto{
+ Key: keyToProto("", testKey1a),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameX,
+ Value: &pb.PropertyValue{
+ StringValue: &testString3,
+ },
+ Multiple: &FALSE,
+ },
+ &pb.Property{
+ Name: &fieldNameI,
+ Value: &pb.PropertyValue{
+ Int64Value: &testInt64,
+ },
+ Multiple: &FALSE,
+ },
+ },
+ EntityGroup: &pb.Path{},
+ })
+ if err != nil {
+ panic(err)
+ }
+ withKeyEntityProto = string(withKeyEntityProtob)
+
+}
+
+func TestLoadEntityNested(t *testing.T) {
+ testCases := []struct {
+ desc string
+ src *pb.EntityProto
+ want interface{}
+ }{
+ {
+ "nested basic",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameA,
+ Meaning: &entityProtoMeaning,
+ Value: &pb.PropertyValue{
+ StringValue: &simpleEntityProto,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameI,
+ Value: &pb.PropertyValue{
+ Int64Value: &testInt64,
+ },
+ },
+ },
+ },
+ &NestedSimple{
+ A: Simple{I: 2},
+ I: 2,
+ },
+ },
+ {
+ "nested with struct tags",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameAA,
+ Meaning: &entityProtoMeaning,
+ Value: &pb.PropertyValue{
+ StringValue: &simpleWithTagEntityProto,
+ },
+ },
+ },
+ },
+ &NestedSimpleWithTag{
+ A: SimpleWithTag{I: testInt64},
+ },
+ },
+ {
+ "nested 2x",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameAA,
+ Meaning: &entityProtoMeaning,
+ Value: &pb.PropertyValue{
+ StringValue: &nestedSimpleEntityProto,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameA,
+ Meaning: &entityProtoMeaning,
+ Value: &pb.PropertyValue{
+ StringValue: &simpleTwoFieldsEntityProto,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameS,
+ Value: &pb.PropertyValue{
+ StringValue: &testString3,
+ },
+ },
+ },
+ },
+ &NestedSimple2X{
+ AA: NestedSimple{
+ A: Simple{I: testInt64},
+ I: testInt64,
+ },
+ A: SimpleTwoFields{S: testString2, SS: testString3},
+ S: testString3,
+ },
+ },
+ {
+ "nested anonymous",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameI,
+ Value: &pb.PropertyValue{
+ Int64Value: &testInt64,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameX,
+ Value: &pb.PropertyValue{
+ StringValue: &testString2,
+ },
+ },
+ },
+ },
+ &NestedSimpleAnonymous{
+ Simple: Simple{I: testInt64},
+ X: testString2,
+ },
+ },
+ {
+ "nested simple with slice",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameA,
+ Meaning: &entityProtoMeaning,
+ Multiple: &TRUE,
+ Value: &pb.PropertyValue{
+ StringValue: &simpleEntityProto,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameA,
+ Meaning: &entityProtoMeaning,
+ Multiple: &TRUE,
+ Value: &pb.PropertyValue{
+ StringValue: &simpleEntityProto,
+ },
+ },
+ },
+ },
+ &NestedSliceOfSimple{
+ A: []Simple{Simple{I: testInt64}, Simple{I: testInt64}},
+ },
+ },
+ {
+ "nested with multiple anonymous fields",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameI,
+ Value: &pb.PropertyValue{
+ Int64Value: &testInt64,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameS,
+ Value: &pb.PropertyValue{
+ StringValue: &testString2,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameSS,
+ Value: &pb.PropertyValue{
+ StringValue: &testString3,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameX,
+ Value: &pb.PropertyValue{
+ StringValue: &testString2,
+ },
+ },
+ },
+ },
+ &MultiAnonymous{
+ Simple: Simple{I: testInt64},
+ SimpleTwoFields: SimpleTwoFields{S: testString2, SS: testString3},
+ X: testString2,
+ },
+ },
+ {
+ "nested with dotted field tag",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameA,
+ Meaning: &entityProtoMeaning,
+ Value: &pb.PropertyValue{
+ StringValue: &bDotBEntityProto,
+ },
+ },
+ },
+ },
+ &ABDotB{
+ A: BDotB{
+ B: testString2,
+ },
+ },
+ },
+ {
+ "nested entity with key",
+ &pb.EntityProto{
+ Key: keyToProto("some-app-id", testKey0),
+ Property: []*pb.Property{
+ &pb.Property{
+ Name: &fieldNameY,
+ Value: &pb.PropertyValue{
+ StringValue: &testString2,
+ },
+ },
+ &pb.Property{
+ Name: &fieldNameN,
+ Meaning: &entityProtoMeaning,
+ Value: &pb.PropertyValue{
+ StringValue: &withKeyEntityProto,
+ },
+ },
+ },
+ },
+ &NestedWithKey{
+ Y: testString2,
+ N: WithKey{
+ X: testString3,
+ I: testInt64,
+ K: testKey1a,
+ },
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ dst := reflect.New(reflect.TypeOf(tc.want).Elem()).Interface()
+ err := loadEntity(dst, tc.src)
+ if err != nil {
+ t.Errorf("loadEntity: %s: %v", tc.desc, err)
+ continue
+ }
+
+ if !reflect.DeepEqual(tc.want, dst) {
+ t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/datastore/metadata.go b/vendor/google.golang.org/appengine/datastore/metadata.go
new file mode 100644
index 000000000..6acacc3db
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/metadata.go
@@ -0,0 +1,78 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import "golang.org/x/net/context"
+
+// Datastore kinds for the metadata entities.
+const (
+ namespaceKind = "__namespace__"
+ kindKind = "__kind__"
+ propertyKind = "__property__"
+)
+
+// Namespaces returns all the datastore namespaces.
+func Namespaces(ctx context.Context) ([]string, error) {
+ // TODO(djd): Support range queries.
+ q := NewQuery(namespaceKind).KeysOnly()
+ keys, err := q.GetAll(ctx, nil)
+ if err != nil {
+ return nil, err
+ }
+ // The empty namespace key uses a numeric ID (==1), but luckily
+ // the string ID defaults to "" for numeric IDs anyway.
+ return keyNames(keys), nil
+}
+
+// Kinds returns the names of all the kinds in the current namespace.
+func Kinds(ctx context.Context) ([]string, error) {
+ // TODO(djd): Support range queries.
+ q := NewQuery(kindKind).KeysOnly()
+ keys, err := q.GetAll(ctx, nil)
+ if err != nil {
+ return nil, err
+ }
+ return keyNames(keys), nil
+}
+
+// keyNames returns a slice of the provided keys' names (string IDs).
+func keyNames(keys []*Key) []string {
+ n := make([]string, 0, len(keys))
+ for _, k := range keys {
+ n = append(n, k.StringID())
+ }
+ return n
+}
+
+// KindProperties returns all the indexed properties for the given kind.
+// The properties are returned as a map of property names to a slice of the
+// representation types. The representation types for the supported Go property
+// types are:
+// "INT64": signed integers and time.Time
+// "DOUBLE": float32 and float64
+// "BOOLEAN": bool
+// "STRING": string, []byte and ByteString
+// "POINT": appengine.GeoPoint
+// "REFERENCE": *Key
+// "USER": (not used in the Go runtime)
+func KindProperties(ctx context.Context, kind string) (map[string][]string, error) {
+ // TODO(djd): Support range queries.
+ kindKey := NewKey(ctx, kindKind, kind, 0, nil)
+ q := NewQuery(propertyKind).Ancestor(kindKey)
+
+ propMap := map[string][]string{}
+ props := []struct {
+ Repr []string `datastore:"property_representation"`
+ }{}
+
+ keys, err := q.GetAll(ctx, &props)
+ if err != nil {
+ return nil, err
+ }
+ for i, p := range props {
+ propMap[keys[i].StringID()] = p.Repr
+ }
+ return propMap, nil
+}
diff --git a/vendor/google.golang.org/appengine/datastore/prop.go b/vendor/google.golang.org/appengine/datastore/prop.go
new file mode 100644
index 000000000..5cb2079d8
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/prop.go
@@ -0,0 +1,330 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "unicode"
+)
+
+// Entities with more than this many indexed properties will not be saved.
+const maxIndexedProperties = 20000
+
+// []byte fields more than 1 megabyte long will not be loaded or saved.
+const maxBlobLen = 1 << 20
+
+// Property is a name/value pair plus some metadata. A datastore entity's
+// contents are loaded and saved as a sequence of Properties. An entity can
+// have multiple Properties with the same name, provided that p.Multiple is
+// true on all of that entity's Properties with that name.
+type Property struct {
+ // Name is the property name.
+ Name string
+ // Value is the property value. The valid types are:
+ // - int64
+ // - bool
+ // - string
+ // - float64
+ // - ByteString
+ // - *Key
+ // - time.Time
+ // - appengine.BlobKey
+ // - appengine.GeoPoint
+ // - []byte (up to 1 megabyte in length)
+ // - *Entity (representing a nested struct)
+ // This set is smaller than the set of valid struct field types that the
+ // datastore can load and save. A Property Value cannot be a slice (apart
+ // from []byte); use multiple Properties instead. Also, a Value's type
+ // must be explicitly on the list above; it is not sufficient for the
+ // underlying type to be on that list. For example, a Value of "type
+ // myInt64 int64" is invalid. Smaller-width integers and floats are also
+ // invalid. Again, this is more restrictive than the set of valid struct
+ // field types.
+ //
+ // A Value will have an opaque type when loading entities from an index,
+ // such as via a projection query. Load entities into a struct instead
+ // of a PropertyLoadSaver when using a projection query.
+ //
+ // A Value may also be the nil interface value; this is equivalent to
+ // Python's None but not directly representable by a Go struct. Loading
+ // a nil-valued property into a struct will set that field to the zero
+ // value.
+ Value interface{}
+ // NoIndex is whether the datastore cannot index this property.
+ NoIndex bool
+ // Multiple is whether the entity can have multiple properties with
+ // the same name. Even if a particular instance only has one property with
+ // a certain name, Multiple should be true if a struct would best represent
+ // it as a field of type []T instead of type T.
+ Multiple bool
+}
+
+// An Entity is the value type for a nested struct.
+// This type is only used for a Property's Value.
+type Entity struct {
+ Key *Key
+ Properties []Property
+}
+
+// ByteString is a short byte slice (up to 1500 bytes) that can be indexed.
+type ByteString []byte
+
+// PropertyLoadSaver can be converted from and to a slice of Properties.
+type PropertyLoadSaver interface {
+ Load([]Property) error
+ Save() ([]Property, error)
+}
+
+// PropertyList converts a []Property to implement PropertyLoadSaver.
+type PropertyList []Property
+
+var (
+ typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem()
+ typeOfPropertyList = reflect.TypeOf(PropertyList(nil))
+)
+
+// Load loads all of the provided properties into l.
+// It does not first reset *l to an empty slice.
+func (l *PropertyList) Load(p []Property) error {
+ *l = append(*l, p...)
+ return nil
+}
+
+// Save saves all of l's properties as a slice or Properties.
+func (l *PropertyList) Save() ([]Property, error) {
+ return *l, nil
+}
+
+// validPropertyName returns whether name consists of one or more valid Go
+// identifiers joined by ".".
+func validPropertyName(name string) bool {
+ if name == "" {
+ return false
+ }
+ for _, s := range strings.Split(name, ".") {
+ if s == "" {
+ return false
+ }
+ first := true
+ for _, c := range s {
+ if first {
+ first = false
+ if c != '_' && !unicode.IsLetter(c) {
+ return false
+ }
+ } else {
+ if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ }
+ return true
+}
+
+// structCodec describes how to convert a struct to and from a sequence of
+// properties.
+type structCodec struct {
+ // fields gives the field codec for the structTag with the given name.
+ fields map[string]fieldCodec
+ // hasSlice is whether a struct or any of its nested or embedded structs
+ // has a slice-typed field (other than []byte).
+ hasSlice bool
+ // keyField is the index of a *Key field with structTag __key__.
+ // This field is not relevant for the top level struct, only for
+ // nested structs.
+ keyField int
+ // complete is whether the structCodec is complete. An incomplete
+ // structCodec may be encountered when walking a recursive struct.
+ complete bool
+}
+
+// fieldCodec is a struct field's index and, if that struct field's type is
+// itself a struct, that substruct's structCodec.
+type fieldCodec struct {
+ // path is the index path to the field
+ path []int
+ noIndex bool
+ // omitEmpty indicates that the field should be omitted on save
+ // if empty.
+ omitEmpty bool
+ // structCodec is the codec fot the struct field at index 'path',
+ // or nil if the field is not a struct.
+ structCodec *structCodec
+}
+
+// structCodecs collects the structCodecs that have already been calculated.
+var (
+ structCodecsMutex sync.Mutex
+ structCodecs = make(map[reflect.Type]*structCodec)
+)
+
+// getStructCodec returns the structCodec for the given struct type.
+func getStructCodec(t reflect.Type) (*structCodec, error) {
+ structCodecsMutex.Lock()
+ defer structCodecsMutex.Unlock()
+ return getStructCodecLocked(t)
+}
+
+// getStructCodecLocked implements getStructCodec. The structCodecsMutex must
+// be held when calling this function.
+func getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) {
+ c, ok := structCodecs[t]
+ if ok {
+ return c, nil
+ }
+ c = &structCodec{
+ fields: make(map[string]fieldCodec),
+ // We initialize keyField to -1 so that the zero-value is not
+ // misinterpreted as index 0.
+ keyField: -1,
+ }
+
+ // Add c to the structCodecs map before we are sure it is good. If t is
+ // a recursive type, it needs to find the incomplete entry for itself in
+ // the map.
+ structCodecs[t] = c
+ defer func() {
+ if retErr != nil {
+ delete(structCodecs, t)
+ }
+ }()
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ // Skip unexported fields.
+ // Note that if f is an anonymous, unexported struct field,
+ // we will promote its fields.
+ if f.PkgPath != "" && !f.Anonymous {
+ continue
+ }
+
+ tags := strings.Split(f.Tag.Get("datastore"), ",")
+ name := tags[0]
+ opts := make(map[string]bool)
+ for _, t := range tags[1:] {
+ opts[t] = true
+ }
+ switch {
+ case name == "":
+ if !f.Anonymous {
+ name = f.Name
+ }
+ case name == "-":
+ continue
+ case name == "__key__":
+ if f.Type != typeOfKeyPtr {
+ return nil, fmt.Errorf("datastore: __key__ field on struct %v is not a *datastore.Key", t)
+ }
+ c.keyField = i
+ case !validPropertyName(name):
+ return nil, fmt.Errorf("datastore: struct tag has invalid property name: %q", name)
+ }
+
+ substructType, fIsSlice := reflect.Type(nil), false
+ switch f.Type.Kind() {
+ case reflect.Struct:
+ substructType = f.Type
+ case reflect.Slice:
+ if f.Type.Elem().Kind() == reflect.Struct {
+ substructType = f.Type.Elem()
+ }
+ fIsSlice = f.Type != typeOfByteSlice
+ c.hasSlice = c.hasSlice || fIsSlice
+ }
+
+ var sub *structCodec
+ if substructType != nil && substructType != typeOfTime && substructType != typeOfGeoPoint {
+ var err error
+ sub, err = getStructCodecLocked(substructType)
+ if err != nil {
+ return nil, err
+ }
+ if !sub.complete {
+ return nil, fmt.Errorf("datastore: recursive struct: field %q", f.Name)
+ }
+ if fIsSlice && sub.hasSlice {
+ return nil, fmt.Errorf(
+ "datastore: flattening nested structs leads to a slice of slices: field %q", f.Name)
+ }
+ c.hasSlice = c.hasSlice || sub.hasSlice
+ // If f is an anonymous struct field, we promote the substruct's fields up to this level
+ // in the linked list of struct codecs.
+ if f.Anonymous {
+ for subname, subfield := range sub.fields {
+ if name != "" {
+ subname = name + "." + subname
+ }
+ if _, ok := c.fields[subname]; ok {
+ return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", subname)
+ }
+ c.fields[subname] = fieldCodec{
+ path: append([]int{i}, subfield.path...),
+ noIndex: subfield.noIndex || opts["noindex"],
+ omitEmpty: subfield.omitEmpty,
+ structCodec: subfield.structCodec,
+ }
+ }
+ continue
+ }
+ }
+
+ if _, ok := c.fields[name]; ok {
+ return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", name)
+ }
+ c.fields[name] = fieldCodec{
+ path: []int{i},
+ noIndex: opts["noindex"],
+ omitEmpty: opts["omitempty"],
+ structCodec: sub,
+ }
+ }
+ c.complete = true
+ return c, nil
+}
+
+// structPLS adapts a struct to be a PropertyLoadSaver.
+type structPLS struct {
+ v reflect.Value
+ codec *structCodec
+}
+
+// newStructPLS returns a structPLS, which implements the
+// PropertyLoadSaver interface, for the struct pointer p.
+func newStructPLS(p interface{}) (*structPLS, error) {
+ v := reflect.ValueOf(p)
+ if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
+ return nil, ErrInvalidEntityType
+ }
+ v = v.Elem()
+ codec, err := getStructCodec(v.Type())
+ if err != nil {
+ return nil, err
+ }
+ return &structPLS{v, codec}, nil
+}
+
+// LoadStruct loads the properties from p to dst.
+// dst must be a struct pointer.
+func LoadStruct(dst interface{}, p []Property) error {
+ x, err := newStructPLS(dst)
+ if err != nil {
+ return err
+ }
+ return x.Load(p)
+}
+
+// SaveStruct returns the properties from src as a slice of Properties.
+// src must be a struct pointer.
+func SaveStruct(src interface{}) ([]Property, error) {
+ x, err := newStructPLS(src)
+ if err != nil {
+ return nil, err
+ }
+ return x.Save()
+}
diff --git a/vendor/google.golang.org/appengine/datastore/prop_test.go b/vendor/google.golang.org/appengine/datastore/prop_test.go
new file mode 100644
index 000000000..1b42249df
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/prop_test.go
@@ -0,0 +1,547 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "reflect"
+ "testing"
+ "time"
+
+ "google.golang.org/appengine"
+)
+
+func TestValidPropertyName(t *testing.T) {
+ testCases := []struct {
+ name string
+ want bool
+ }{
+ // Invalid names.
+ {"", false},
+ {"'", false},
+ {".", false},
+ {"..", false},
+ {".foo", false},
+ {"0", false},
+ {"00", false},
+ {"X.X.4.X.X", false},
+ {"\n", false},
+ {"\x00", false},
+ {"abc\xffz", false},
+ {"foo.", false},
+ {"foo..", false},
+ {"foo..bar", false},
+ {"☃", false},
+ {`"`, false},
+ // Valid names.
+ {"AB", true},
+ {"Abc", true},
+ {"X.X.X.X.X", true},
+ {"_", true},
+ {"_0", true},
+ {"a", true},
+ {"a_B", true},
+ {"f00", true},
+ {"f0o", true},
+ {"fo0", true},
+ {"foo", true},
+ {"foo.bar", true},
+ {"foo.bar.baz", true},
+ {"世界", true},
+ }
+ for _, tc := range testCases {
+ got := validPropertyName(tc.name)
+ if got != tc.want {
+ t.Errorf("%q: got %v, want %v", tc.name, got, tc.want)
+ }
+ }
+}
+
+func TestStructCodec(t *testing.T) {
+ type oStruct struct {
+ O int
+ }
+ type pStruct struct {
+ P int
+ Q int
+ }
+ type rStruct struct {
+ R int
+ S pStruct
+ T oStruct
+ oStruct
+ }
+ type uStruct struct {
+ U int
+ v int
+ }
+ type vStruct struct {
+ V string `datastore:",noindex"`
+ }
+ oStructCodec := &structCodec{
+ fields: map[string]fieldCodec{
+ "O": {path: []int{0}},
+ },
+ complete: true,
+ }
+ pStructCodec := &structCodec{
+ fields: map[string]fieldCodec{
+ "P": {path: []int{0}},
+ "Q": {path: []int{1}},
+ },
+ complete: true,
+ }
+ rStructCodec := &structCodec{
+ fields: map[string]fieldCodec{
+ "R": {path: []int{0}},
+ "S": {path: []int{1}, structCodec: pStructCodec},
+ "T": {path: []int{2}, structCodec: oStructCodec},
+ "O": {path: []int{3, 0}},
+ },
+ complete: true,
+ }
+ uStructCodec := &structCodec{
+ fields: map[string]fieldCodec{
+ "U": {path: []int{0}},
+ },
+ complete: true,
+ }
+ vStructCodec := &structCodec{
+ fields: map[string]fieldCodec{
+ "V": {path: []int{0}, noIndex: true},
+ },
+ complete: true,
+ }
+
+ testCases := []struct {
+ desc string
+ structValue interface{}
+ want *structCodec
+ }{
+ {
+ "oStruct",
+ oStruct{},
+ oStructCodec,
+ },
+ {
+ "pStruct",
+ pStruct{},
+ pStructCodec,
+ },
+ {
+ "rStruct",
+ rStruct{},
+ rStructCodec,
+ },
+ {
+ "uStruct",
+ uStruct{},
+ uStructCodec,
+ },
+ {
+ "non-basic fields",
+ struct {
+ B appengine.BlobKey
+ K *Key
+ T time.Time
+ }{},
+ &structCodec{
+ fields: map[string]fieldCodec{
+ "B": {path: []int{0}},
+ "K": {path: []int{1}},
+ "T": {path: []int{2}},
+ },
+ complete: true,
+ },
+ },
+ {
+ "struct tags with ignored embed",
+ struct {
+ A int `datastore:"a,noindex"`
+ B int `datastore:"b"`
+ C int `datastore:",noindex"`
+ D int `datastore:""`
+ E int
+ I int `datastore:"-"`
+ J int `datastore:",noindex" json:"j"`
+ oStruct `datastore:"-"`
+ }{},
+ &structCodec{
+ fields: map[string]fieldCodec{
+ "a": {path: []int{0}, noIndex: true},
+ "b": {path: []int{1}},
+ "C": {path: []int{2}, noIndex: true},
+ "D": {path: []int{3}},
+ "E": {path: []int{4}},
+ "J": {path: []int{6}, noIndex: true},
+ },
+ complete: true,
+ },
+ },
+ {
+ "unexported fields",
+ struct {
+ A int
+ b int
+ C int `datastore:"x"`
+ d int `datastore:"Y"`
+ }{},
+ &structCodec{
+ fields: map[string]fieldCodec{
+ "A": {path: []int{0}},
+ "x": {path: []int{2}},
+ },
+ complete: true,
+ },
+ },
+ {
+ "nested and embedded structs",
+ struct {
+ A int
+ B int
+ CC oStruct
+ DDD rStruct
+ oStruct
+ }{},
+ &structCodec{
+ fields: map[string]fieldCodec{
+ "A": {path: []int{0}},
+ "B": {path: []int{1}},
+ "CC": {path: []int{2}, structCodec: oStructCodec},
+ "DDD": {path: []int{3}, structCodec: rStructCodec},
+ "O": {path: []int{4, 0}},
+ },
+ complete: true,
+ },
+ },
+ {
+ "struct tags with nested and embedded structs",
+ struct {
+ A int `datastore:"-"`
+ B int `datastore:"w"`
+ C oStruct `datastore:"xx"`
+ D rStruct `datastore:"y"`
+ oStruct `datastore:"z"`
+ }{},
+ &structCodec{
+ fields: map[string]fieldCodec{
+ "w": {path: []int{1}},
+ "xx": {path: []int{2}, structCodec: oStructCodec},
+ "y": {path: []int{3}, structCodec: rStructCodec},
+ "z.O": {path: []int{4, 0}},
+ },
+ complete: true,
+ },
+ },
+ {
+ "unexported nested and embedded structs",
+ struct {
+ a int
+ B int
+ c uStruct
+ D uStruct
+ uStruct
+ }{},
+ &structCodec{
+ fields: map[string]fieldCodec{
+ "B": {path: []int{1}},
+ "D": {path: []int{3}, structCodec: uStructCodec},
+ "U": {path: []int{4, 0}},
+ },
+ complete: true,
+ },
+ },
+ {
+ "noindex nested struct",
+ struct {
+ A oStruct `datastore:",noindex"`
+ }{},
+ &structCodec{
+ fields: map[string]fieldCodec{
+ "A": {path: []int{0}, structCodec: oStructCodec, noIndex: true},
+ },
+ complete: true,
+ },
+ },
+ {
+ "noindex slice",
+ struct {
+ A []string `datastore:",noindex"`
+ }{},
+ &structCodec{
+ fields: map[string]fieldCodec{
+ "A": {path: []int{0}, noIndex: true},
+ },
+ hasSlice: true,
+ complete: true,
+ },
+ },
+ {
+ "noindex embedded struct slice",
+ struct {
+ // vStruct has a single field, V, also with noindex.
+ A []vStruct `datastore:",noindex"`
+ }{},
+ &structCodec{
+ fields: map[string]fieldCodec{
+ "A": {path: []int{0}, structCodec: vStructCodec, noIndex: true},
+ },
+ hasSlice: true,
+ complete: true,
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ got, err := getStructCodec(reflect.TypeOf(tc.structValue))
+ if err != nil {
+ t.Errorf("%s: getStructCodec: %v", tc.desc, err)
+ continue
+ }
+ // can't reflect.DeepEqual b/c element order in fields map may differ
+ if !isEqualStructCodec(got, tc.want) {
+ t.Errorf("%s\ngot %+v\nwant %+v\n", tc.desc, got, tc.want)
+ }
+ }
+}
+
+func isEqualStructCodec(got, want *structCodec) bool {
+ if got.complete != want.complete {
+ return false
+ }
+ if got.hasSlice != want.hasSlice {
+ return false
+ }
+ if len(got.fields) != len(want.fields) {
+ return false
+ }
+ for name, wantF := range want.fields {
+ gotF := got.fields[name]
+ if !reflect.DeepEqual(wantF.path, gotF.path) {
+ return false
+ }
+ if wantF.noIndex != gotF.noIndex {
+ return false
+ }
+ if wantF.structCodec != nil {
+ if gotF.structCodec == nil {
+ return false
+ }
+ if !isEqualStructCodec(gotF.structCodec, wantF.structCodec) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+func TestRepeatedPropertyName(t *testing.T) {
+ good := []interface{}{
+ struct {
+ A int `datastore:"-"`
+ }{},
+ struct {
+ A int `datastore:"b"`
+ B int
+ }{},
+ struct {
+ A int
+ B int `datastore:"B"`
+ }{},
+ struct {
+ A int `datastore:"B"`
+ B int `datastore:"-"`
+ }{},
+ struct {
+ A int `datastore:"-"`
+ B int `datastore:"A"`
+ }{},
+ struct {
+ A int `datastore:"B"`
+ B int `datastore:"A"`
+ }{},
+ struct {
+ A int `datastore:"B"`
+ B int `datastore:"C"`
+ C int `datastore:"A"`
+ }{},
+ struct {
+ A int `datastore:"B"`
+ B int `datastore:"C"`
+ C int `datastore:"D"`
+ }{},
+ }
+ bad := []interface{}{
+ struct {
+ A int `datastore:"B"`
+ B int
+ }{},
+ struct {
+ A int
+ B int `datastore:"A"`
+ }{},
+ struct {
+ A int `datastore:"C"`
+ B int `datastore:"C"`
+ }{},
+ struct {
+ A int `datastore:"B"`
+ B int `datastore:"C"`
+ C int `datastore:"B"`
+ }{},
+ }
+ testGetStructCodec(t, good, bad)
+}
+
+func TestFlatteningNestedStructs(t *testing.T) {
+ type DeepGood struct {
+ A struct {
+ B []struct {
+ C struct {
+ D int
+ }
+ }
+ }
+ }
+ type DeepBad struct {
+ A struct {
+ B []struct {
+ C struct {
+ D []int
+ }
+ }
+ }
+ }
+ type ISay struct {
+ Tomato int
+ }
+ type YouSay struct {
+ Tomato int
+ }
+ type Tweedledee struct {
+ Dee int `datastore:"D"`
+ }
+ type Tweedledum struct {
+ Dum int `datastore:"D"`
+ }
+
+ good := []interface{}{
+ struct {
+ X []struct {
+ Y string
+ }
+ }{},
+ struct {
+ X []struct {
+ Y []byte
+ }
+ }{},
+ struct {
+ P []int
+ X struct {
+ Y []int
+ }
+ }{},
+ struct {
+ X struct {
+ Y []int
+ }
+ Q []int
+ }{},
+ struct {
+ P []int
+ X struct {
+ Y []int
+ }
+ Q []int
+ }{},
+ struct {
+ DeepGood
+ }{},
+ struct {
+ DG DeepGood
+ }{},
+ struct {
+ Foo struct {
+ Z int
+ } `datastore:"A"`
+ Bar struct {
+ Z int
+ } `datastore:"B"`
+ }{},
+ }
+ bad := []interface{}{
+ struct {
+ X []struct {
+ Y []string
+ }
+ }{},
+ struct {
+ X []struct {
+ Y []int
+ }
+ }{},
+ struct {
+ DeepBad
+ }{},
+ struct {
+ DB DeepBad
+ }{},
+ struct {
+ ISay
+ YouSay
+ }{},
+ struct {
+ Tweedledee
+ Tweedledum
+ }{},
+ struct {
+ Foo struct {
+ Z int
+ } `datastore:"A"`
+ Bar struct {
+ Z int
+ } `datastore:"A"`
+ }{},
+ }
+ testGetStructCodec(t, good, bad)
+}
+
+func testGetStructCodec(t *testing.T, good []interface{}, bad []interface{}) {
+ for _, x := range good {
+ if _, err := getStructCodec(reflect.TypeOf(x)); err != nil {
+ t.Errorf("type %T: got non-nil error (%s), want nil", x, err)
+ }
+ }
+ for _, x := range bad {
+ if _, err := getStructCodec(reflect.TypeOf(x)); err == nil {
+ t.Errorf("type %T: got nil error, want non-nil", x)
+ }
+ }
+}
+
+func TestNilKeyIsStored(t *testing.T) {
+ x := struct {
+ K *Key
+ I int
+ }{}
+ p := PropertyList{}
+ // Save x as properties.
+ p1, _ := SaveStruct(&x)
+ p.Load(p1)
+ // Set x's fields to non-zero.
+ x.K = &Key{}
+ x.I = 2
+ // Load x from properties.
+ p2, _ := p.Save()
+ LoadStruct(&x, p2)
+ // Check that x's fields were set to zero.
+ if x.K != nil {
+ t.Errorf("K field was not zero")
+ }
+ if x.I != 0 {
+ t.Errorf("I field was not zero")
+ }
+}
diff --git a/vendor/google.golang.org/appengine/datastore/query.go b/vendor/google.golang.org/appengine/datastore/query.go
new file mode 100644
index 000000000..3847b0fa6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/query.go
@@ -0,0 +1,724 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+type operator int
+
+const (
+ lessThan operator = iota
+ lessEq
+ equal
+ greaterEq
+ greaterThan
+)
+
+var operatorToProto = map[operator]*pb.Query_Filter_Operator{
+ lessThan: pb.Query_Filter_LESS_THAN.Enum(),
+ lessEq: pb.Query_Filter_LESS_THAN_OR_EQUAL.Enum(),
+ equal: pb.Query_Filter_EQUAL.Enum(),
+ greaterEq: pb.Query_Filter_GREATER_THAN_OR_EQUAL.Enum(),
+ greaterThan: pb.Query_Filter_GREATER_THAN.Enum(),
+}
+
+// filter is a conditional filter on query results.
+type filter struct {
+ FieldName string
+ Op operator
+ Value interface{}
+}
+
+type sortDirection int
+
+const (
+ ascending sortDirection = iota
+ descending
+)
+
+var sortDirectionToProto = map[sortDirection]*pb.Query_Order_Direction{
+ ascending: pb.Query_Order_ASCENDING.Enum(),
+ descending: pb.Query_Order_DESCENDING.Enum(),
+}
+
+// order is a sort order on query results.
+type order struct {
+ FieldName string
+ Direction sortDirection
+}
+
+// NewQuery creates a new Query for a specific entity kind.
+//
+// An empty kind means to return all entities, including entities created and
+// managed by other App Engine features, and is called a kindless query.
+// Kindless queries cannot include filters or sort orders on property values.
+func NewQuery(kind string) *Query {
+ return &Query{
+ kind: kind,
+ limit: -1,
+ }
+}
+
+// Query represents a datastore query.
+type Query struct {
+ kind string
+ ancestor *Key
+ filter []filter
+ order []order
+ projection []string
+
+ distinct bool
+ keysOnly bool
+ eventual bool
+ limit int32
+ offset int32
+ start *pb.CompiledCursor
+ end *pb.CompiledCursor
+
+ err error
+}
+
+func (q *Query) clone() *Query {
+ x := *q
+ // Copy the contents of the slice-typed fields to a new backing store.
+ if len(q.filter) > 0 {
+ x.filter = make([]filter, len(q.filter))
+ copy(x.filter, q.filter)
+ }
+ if len(q.order) > 0 {
+ x.order = make([]order, len(q.order))
+ copy(x.order, q.order)
+ }
+ return &x
+}
+
+// Ancestor returns a derivative query with an ancestor filter.
+// The ancestor should not be nil.
+func (q *Query) Ancestor(ancestor *Key) *Query {
+ q = q.clone()
+ if ancestor == nil {
+ q.err = errors.New("datastore: nil query ancestor")
+ return q
+ }
+ q.ancestor = ancestor
+ return q
+}
+
+// EventualConsistency returns a derivative query that returns eventually
+// consistent results.
+// It only has an effect on ancestor queries.
+func (q *Query) EventualConsistency() *Query {
+ q = q.clone()
+ q.eventual = true
+ return q
+}
+
+// Filter returns a derivative query with a field-based filter.
+// The filterStr argument must be a field name followed by optional space,
+// followed by an operator, one of ">", "<", ">=", "<=", or "=".
+// Fields are compared against the provided value using the operator.
+// Multiple filters are AND'ed together.
+func (q *Query) Filter(filterStr string, value interface{}) *Query {
+ q = q.clone()
+ filterStr = strings.TrimSpace(filterStr)
+ if len(filterStr) < 1 {
+ q.err = errors.New("datastore: invalid filter: " + filterStr)
+ return q
+ }
+ f := filter{
+ FieldName: strings.TrimRight(filterStr, " ><=!"),
+ Value: value,
+ }
+ switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op {
+ case "<=":
+ f.Op = lessEq
+ case ">=":
+ f.Op = greaterEq
+ case "<":
+ f.Op = lessThan
+ case ">":
+ f.Op = greaterThan
+ case "=":
+ f.Op = equal
+ default:
+ q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr)
+ return q
+ }
+ q.filter = append(q.filter, f)
+ return q
+}
+
+// Order returns a derivative query with a field-based sort order. Orders are
+// applied in the order they are added. The default order is ascending; to sort
+// in descending order prefix the fieldName with a minus sign (-).
+func (q *Query) Order(fieldName string) *Query {
+ q = q.clone()
+ fieldName = strings.TrimSpace(fieldName)
+ o := order{
+ Direction: ascending,
+ FieldName: fieldName,
+ }
+ if strings.HasPrefix(fieldName, "-") {
+ o.Direction = descending
+ o.FieldName = strings.TrimSpace(fieldName[1:])
+ } else if strings.HasPrefix(fieldName, "+") {
+ q.err = fmt.Errorf("datastore: invalid order: %q", fieldName)
+ return q
+ }
+ if len(o.FieldName) == 0 {
+ q.err = errors.New("datastore: empty order")
+ return q
+ }
+ q.order = append(q.order, o)
+ return q
+}
+
+// Project returns a derivative query that yields only the given fields. It
+// cannot be used with KeysOnly.
+func (q *Query) Project(fieldNames ...string) *Query {
+ q = q.clone()
+ q.projection = append([]string(nil), fieldNames...)
+ return q
+}
+
+// Distinct returns a derivative query that yields de-duplicated entities with
+// respect to the set of projected fields. It is only used for projection
+// queries.
+func (q *Query) Distinct() *Query {
+ q = q.clone()
+ q.distinct = true
+ return q
+}
+
+// KeysOnly returns a derivative query that yields only keys, not keys and
+// entities. It cannot be used with projection queries.
+func (q *Query) KeysOnly() *Query {
+ q = q.clone()
+ q.keysOnly = true
+ return q
+}
+
+// Limit returns a derivative query that has a limit on the number of results
+// returned. A negative value means unlimited.
+func (q *Query) Limit(limit int) *Query {
+ q = q.clone()
+ if limit < math.MinInt32 || limit > math.MaxInt32 {
+ q.err = errors.New("datastore: query limit overflow")
+ return q
+ }
+ q.limit = int32(limit)
+ return q
+}
+
+// Offset returns a derivative query that has an offset of how many keys to
+// skip over before returning results. A negative value is invalid.
+func (q *Query) Offset(offset int) *Query {
+ q = q.clone()
+ if offset < 0 {
+ q.err = errors.New("datastore: negative query offset")
+ return q
+ }
+ if offset > math.MaxInt32 {
+ q.err = errors.New("datastore: query offset overflow")
+ return q
+ }
+ q.offset = int32(offset)
+ return q
+}
+
+// Start returns a derivative query with the given start point.
+func (q *Query) Start(c Cursor) *Query {
+ q = q.clone()
+ if c.cc == nil {
+ q.err = errors.New("datastore: invalid cursor")
+ return q
+ }
+ q.start = c.cc
+ return q
+}
+
+// End returns a derivative query with the given end point.
+func (q *Query) End(c Cursor) *Query {
+ q = q.clone()
+ if c.cc == nil {
+ q.err = errors.New("datastore: invalid cursor")
+ return q
+ }
+ q.end = c.cc
+ return q
+}
+
+// toProto converts the query to a protocol buffer.
+func (q *Query) toProto(dst *pb.Query, appID string) error {
+ if len(q.projection) != 0 && q.keysOnly {
+ return errors.New("datastore: query cannot both project and be keys-only")
+ }
+ dst.Reset()
+ dst.App = proto.String(appID)
+ if q.kind != "" {
+ dst.Kind = proto.String(q.kind)
+ }
+ if q.ancestor != nil {
+ dst.Ancestor = keyToProto(appID, q.ancestor)
+ if q.eventual {
+ dst.Strong = proto.Bool(false)
+ }
+ }
+ if q.projection != nil {
+ dst.PropertyName = q.projection
+ if q.distinct {
+ dst.GroupByPropertyName = q.projection
+ }
+ }
+ if q.keysOnly {
+ dst.KeysOnly = proto.Bool(true)
+ dst.RequirePerfectPlan = proto.Bool(true)
+ }
+ for _, qf := range q.filter {
+ if qf.FieldName == "" {
+ return errors.New("datastore: empty query filter field name")
+ }
+ p, errStr := valueToProto(appID, qf.FieldName, reflect.ValueOf(qf.Value), false)
+ if errStr != "" {
+ return errors.New("datastore: bad query filter value type: " + errStr)
+ }
+ xf := &pb.Query_Filter{
+ Op: operatorToProto[qf.Op],
+ Property: []*pb.Property{p},
+ }
+ if xf.Op == nil {
+ return errors.New("datastore: unknown query filter operator")
+ }
+ dst.Filter = append(dst.Filter, xf)
+ }
+ for _, qo := range q.order {
+ if qo.FieldName == "" {
+ return errors.New("datastore: empty query order field name")
+ }
+ xo := &pb.Query_Order{
+ Property: proto.String(qo.FieldName),
+ Direction: sortDirectionToProto[qo.Direction],
+ }
+ if xo.Direction == nil {
+ return errors.New("datastore: unknown query order direction")
+ }
+ dst.Order = append(dst.Order, xo)
+ }
+ if q.limit >= 0 {
+ dst.Limit = proto.Int32(q.limit)
+ }
+ if q.offset != 0 {
+ dst.Offset = proto.Int32(q.offset)
+ }
+ dst.CompiledCursor = q.start
+ dst.EndCompiledCursor = q.end
+ dst.Compile = proto.Bool(true)
+ return nil
+}
+
+// Count returns the number of results for the query.
+//
+// The running time and number of API calls made by Count scale linearly with
+// the sum of the query's offset and limit. Unless the result count is
+// expected to be small, it is best to specify a limit; otherwise Count will
+// continue until it finishes counting or the provided context expires.
+func (q *Query) Count(c context.Context) (int, error) {
+ // Check that the query is well-formed.
+ if q.err != nil {
+ return 0, q.err
+ }
+
+ // Run a copy of the query, with keysOnly true (if we're not a projection,
+ // since the two are incompatible), and an adjusted offset. We also set the
+ // limit to zero, as we don't want any actual entity data, just the number
+ // of skipped results.
+ newQ := q.clone()
+ newQ.keysOnly = len(newQ.projection) == 0
+ newQ.limit = 0
+ if q.limit < 0 {
+ // If the original query was unlimited, set the new query's offset to maximum.
+ newQ.offset = math.MaxInt32
+ } else {
+ newQ.offset = q.offset + q.limit
+ if newQ.offset < 0 {
+ // Do the best we can, in the presence of overflow.
+ newQ.offset = math.MaxInt32
+ }
+ }
+ req := &pb.Query{}
+ if err := newQ.toProto(req, internal.FullyQualifiedAppID(c)); err != nil {
+ return 0, err
+ }
+ res := &pb.QueryResult{}
+ if err := internal.Call(c, "datastore_v3", "RunQuery", req, res); err != nil {
+ return 0, err
+ }
+
+ // n is the count we will return. For example, suppose that our original
+ // query had an offset of 4 and a limit of 2008: the count will be 2008,
+ // provided that there are at least 2012 matching entities. However, the
+ // RPCs will only skip 1000 results at a time. The RPC sequence is:
+ // call RunQuery with (offset, limit) = (2012, 0) // 2012 == newQ.offset
+ // response has (skippedResults, moreResults) = (1000, true)
+ // n += 1000 // n == 1000
+ // call Next with (offset, limit) = (1012, 0) // 1012 == newQ.offset - n
+ // response has (skippedResults, moreResults) = (1000, true)
+ // n += 1000 // n == 2000
+ // call Next with (offset, limit) = (12, 0) // 12 == newQ.offset - n
+ // response has (skippedResults, moreResults) = (12, false)
+ // n += 12 // n == 2012
+ // // exit the loop
+ // n -= 4 // n == 2008
+ var n int32
+ for {
+ // The QueryResult should have no actual entity data, just skipped results.
+ if len(res.Result) != 0 {
+ return 0, errors.New("datastore: internal error: Count request returned too much data")
+ }
+ n += res.GetSkippedResults()
+ if !res.GetMoreResults() {
+ break
+ }
+ if err := callNext(c, res, newQ.offset-n, 0); err != nil {
+ return 0, err
+ }
+ }
+ n -= q.offset
+ if n < 0 {
+ // If the offset was greater than the number of matching entities,
+ // return 0 instead of negative.
+ n = 0
+ }
+ return int(n), nil
+}
+
+// callNext issues a datastore_v3/Next RPC to advance a cursor, such as that
+// returned by a query with more results.
+func callNext(c context.Context, res *pb.QueryResult, offset, limit int32) error {
+ if res.Cursor == nil {
+ return errors.New("datastore: internal error: server did not return a cursor")
+ }
+ req := &pb.NextRequest{
+ Cursor: res.Cursor,
+ }
+ if limit >= 0 {
+ req.Count = proto.Int32(limit)
+ }
+ if offset != 0 {
+ req.Offset = proto.Int32(offset)
+ }
+ if res.CompiledCursor != nil {
+ req.Compile = proto.Bool(true)
+ }
+ res.Reset()
+ return internal.Call(c, "datastore_v3", "Next", req, res)
+}
+
+// GetAll runs the query in the given context and returns all keys that match
+// that query, as well as appending the values to dst.
+//
+// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non-
+// interface, non-pointer type P such that P or *P implements PropertyLoadSaver.
+//
+// As a special case, *PropertyList is an invalid type for dst, even though a
+// PropertyList is a slice of structs. It is treated as invalid to avoid being
+// mistakenly passed when *[]PropertyList was intended.
+//
+// The keys returned by GetAll will be in a 1-1 correspondence with the entities
+// added to dst.
+//
+// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys.
+//
+// The running time and number of API calls made by GetAll scale linearly with
+// with the sum of the query's offset and limit. Unless the result count is
+// expected to be small, it is best to specify a limit; otherwise GetAll will
+// continue until it finishes collecting results or the provided context
+// expires.
+func (q *Query) GetAll(c context.Context, dst interface{}) ([]*Key, error) {
+ var (
+ dv reflect.Value
+ mat multiArgType
+ elemType reflect.Type
+ errFieldMismatch error
+ )
+ if !q.keysOnly {
+ dv = reflect.ValueOf(dst)
+ if dv.Kind() != reflect.Ptr || dv.IsNil() {
+ return nil, ErrInvalidEntityType
+ }
+ dv = dv.Elem()
+ mat, elemType = checkMultiArg(dv)
+ if mat == multiArgTypeInvalid || mat == multiArgTypeInterface {
+ return nil, ErrInvalidEntityType
+ }
+ }
+
+ var keys []*Key
+ for t := q.Run(c); ; {
+ k, e, err := t.next()
+ if err == Done {
+ break
+ }
+ if err != nil {
+ return keys, err
+ }
+ if !q.keysOnly {
+ ev := reflect.New(elemType)
+ if elemType.Kind() == reflect.Map {
+ // This is a special case. The zero values of a map type are
+ // not immediately useful; they have to be make'd.
+ //
+ // Funcs and channels are similar, in that a zero value is not useful,
+ // but even a freshly make'd channel isn't useful: there's no fixed
+ // channel buffer size that is always going to be large enough, and
+ // there's no goroutine to drain the other end. Theoretically, these
+ // types could be supported, for example by sniffing for a constructor
+ // method or requiring prior registration, but for now it's not a
+ // frequent enough concern to be worth it. Programmers can work around
+ // it by explicitly using Iterator.Next instead of the Query.GetAll
+ // convenience method.
+ x := reflect.MakeMap(elemType)
+ ev.Elem().Set(x)
+ }
+ if err = loadEntity(ev.Interface(), e); err != nil {
+ if _, ok := err.(*ErrFieldMismatch); ok {
+ // We continue loading entities even in the face of field mismatch errors.
+ // If we encounter any other error, that other error is returned. Otherwise,
+ // an ErrFieldMismatch is returned.
+ errFieldMismatch = err
+ } else {
+ return keys, err
+ }
+ }
+ if mat != multiArgTypeStructPtr {
+ ev = ev.Elem()
+ }
+ dv.Set(reflect.Append(dv, ev))
+ }
+ keys = append(keys, k)
+ }
+ return keys, errFieldMismatch
+}
+
+// Run runs the query in the given context.
+func (q *Query) Run(c context.Context) *Iterator {
+ if q.err != nil {
+ return &Iterator{err: q.err}
+ }
+ t := &Iterator{
+ c: c,
+ limit: q.limit,
+ q: q,
+ prevCC: q.start,
+ }
+ var req pb.Query
+ if err := q.toProto(&req, internal.FullyQualifiedAppID(c)); err != nil {
+ t.err = err
+ return t
+ }
+ if err := internal.Call(c, "datastore_v3", "RunQuery", &req, &t.res); err != nil {
+ t.err = err
+ return t
+ }
+ offset := q.offset - t.res.GetSkippedResults()
+ for offset > 0 && t.res.GetMoreResults() {
+ t.prevCC = t.res.CompiledCursor
+ if err := callNext(t.c, &t.res, offset, t.limit); err != nil {
+ t.err = err
+ break
+ }
+ skip := t.res.GetSkippedResults()
+ if skip < 0 {
+ t.err = errors.New("datastore: internal error: negative number of skipped_results")
+ break
+ }
+ offset -= skip
+ }
+ if offset < 0 {
+ t.err = errors.New("datastore: internal error: query offset was overshot")
+ }
+ return t
+}
+
+// Iterator is the result of running a query.
+type Iterator struct {
+ c context.Context
+ err error
+ // res is the result of the most recent RunQuery or Next API call.
+ res pb.QueryResult
+ // i is how many elements of res.Result we have iterated over.
+ i int
+ // limit is the limit on the number of results this iterator should return.
+ // A negative value means unlimited.
+ limit int32
+ // q is the original query which yielded this iterator.
+ q *Query
+ // prevCC is the compiled cursor that marks the end of the previous batch
+ // of results.
+ prevCC *pb.CompiledCursor
+}
+
+// Done is returned when a query iteration has completed.
+var Done = errors.New("datastore: query has no more results")
+
+// Next returns the key of the next result. When there are no more results,
+// Done is returned as the error.
+//
+// If the query is not keys only and dst is non-nil, it also loads the entity
+// stored for that key into the struct pointer or PropertyLoadSaver dst, with
+// the same semantics and possible errors as for the Get function.
+func (t *Iterator) Next(dst interface{}) (*Key, error) {
+ k, e, err := t.next()
+ if err != nil {
+ return nil, err
+ }
+ if dst != nil && !t.q.keysOnly {
+ err = loadEntity(dst, e)
+ }
+ return k, err
+}
+
+func (t *Iterator) next() (*Key, *pb.EntityProto, error) {
+ if t.err != nil {
+ return nil, nil, t.err
+ }
+
+ // Issue datastore_v3/Next RPCs as necessary.
+ for t.i == len(t.res.Result) {
+ if !t.res.GetMoreResults() {
+ t.err = Done
+ return nil, nil, t.err
+ }
+ t.prevCC = t.res.CompiledCursor
+ if err := callNext(t.c, &t.res, 0, t.limit); err != nil {
+ t.err = err
+ return nil, nil, t.err
+ }
+ if t.res.GetSkippedResults() != 0 {
+ t.err = errors.New("datastore: internal error: iterator has skipped results")
+ return nil, nil, t.err
+ }
+ t.i = 0
+ if t.limit >= 0 {
+ t.limit -= int32(len(t.res.Result))
+ if t.limit < 0 {
+ t.err = errors.New("datastore: internal error: query returned more results than the limit")
+ return nil, nil, t.err
+ }
+ }
+ }
+
+ // Extract the key from the t.i'th element of t.res.Result.
+ e := t.res.Result[t.i]
+ t.i++
+ if e.Key == nil {
+ return nil, nil, errors.New("datastore: internal error: server did not return a key")
+ }
+ k, err := protoToKey(e.Key)
+ if err != nil || k.Incomplete() {
+ return nil, nil, errors.New("datastore: internal error: server returned an invalid key")
+ }
+ return k, e, nil
+}
+
+// Cursor returns a cursor for the iterator's current location.
+func (t *Iterator) Cursor() (Cursor, error) {
+ if t.err != nil && t.err != Done {
+ return Cursor{}, t.err
+ }
+ // If we are at either end of the current batch of results,
+ // return the compiled cursor at that end.
+ skipped := t.res.GetSkippedResults()
+ if t.i == 0 && skipped == 0 {
+ if t.prevCC == nil {
+ // A nil pointer (of type *pb.CompiledCursor) means no constraint:
+ // passing it as the end cursor of a new query means unlimited results
+ // (glossing over the integer limit parameter for now).
+ // A non-nil pointer to an empty pb.CompiledCursor means the start:
+ // passing it as the end cursor of a new query means 0 results.
+ // If prevCC was nil, then the original query had no start cursor, but
+ // Iterator.Cursor should return "the start" instead of unlimited.
+ return Cursor{&zeroCC}, nil
+ }
+ return Cursor{t.prevCC}, nil
+ }
+ if t.i == len(t.res.Result) {
+ return Cursor{t.res.CompiledCursor}, nil
+ }
+ // Otherwise, re-run the query offset to this iterator's position, starting from
+ // the most recent compiled cursor. This is done on a best-effort basis, as it
+ // is racy; if a concurrent process has added or removed entities, then the
+ // cursor returned may be inconsistent.
+ q := t.q.clone()
+ q.start = t.prevCC
+ q.offset = skipped + int32(t.i)
+ q.limit = 0
+ q.keysOnly = len(q.projection) == 0
+ t1 := q.Run(t.c)
+ _, _, err := t1.next()
+ if err != Done {
+ if err == nil {
+ err = fmt.Errorf("datastore: internal error: zero-limit query did not have zero results")
+ }
+ return Cursor{}, err
+ }
+ return Cursor{t1.res.CompiledCursor}, nil
+}
+
+var zeroCC pb.CompiledCursor
+
+// Cursor is an iterator's position. It can be converted to and from an opaque
+// string. A cursor can be used from different HTTP requests, but only with a
+// query with the same kind, ancestor, filter and order constraints.
+type Cursor struct {
+ cc *pb.CompiledCursor
+}
+
+// String returns a base-64 string representation of a cursor.
+func (c Cursor) String() string {
+ if c.cc == nil {
+ return ""
+ }
+ b, err := proto.Marshal(c.cc)
+ if err != nil {
+ // The only way to construct a Cursor with a non-nil cc field is to
+ // unmarshal from the byte representation. We panic if the unmarshal
+ // succeeds but the marshaling of the unchanged protobuf value fails.
+ panic(fmt.Sprintf("datastore: internal error: malformed cursor: %v", err))
+ }
+ return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
+}
+
+// Decode decodes a cursor from its base-64 string representation.
+func DecodeCursor(s string) (Cursor, error) {
+ if s == "" {
+ return Cursor{&zeroCC}, nil
+ }
+ if n := len(s) % 4; n != 0 {
+ s += strings.Repeat("=", 4-n)
+ }
+ b, err := base64.URLEncoding.DecodeString(s)
+ if err != nil {
+ return Cursor{}, err
+ }
+ cc := &pb.CompiledCursor{}
+ if err := proto.Unmarshal(b, cc); err != nil {
+ return Cursor{}, err
+ }
+ return Cursor{cc}, nil
+}
diff --git a/vendor/google.golang.org/appengine/datastore/query_test.go b/vendor/google.golang.org/appengine/datastore/query_test.go
new file mode 100644
index 000000000..f1b9de87f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/query_test.go
@@ -0,0 +1,583 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine/internal"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+var (
+ path1 = &pb.Path{
+ Element: []*pb.Path_Element{
+ {
+ Type: proto.String("Gopher"),
+ Id: proto.Int64(6),
+ },
+ },
+ }
+ path2 = &pb.Path{
+ Element: []*pb.Path_Element{
+ {
+ Type: proto.String("Gopher"),
+ Id: proto.Int64(6),
+ },
+ {
+ Type: proto.String("Gopher"),
+ Id: proto.Int64(8),
+ },
+ },
+ }
+)
+
+func fakeRunQuery(in *pb.Query, out *pb.QueryResult) error {
+ expectedIn := &pb.Query{
+ App: proto.String("dev~fake-app"),
+ Kind: proto.String("Gopher"),
+ Compile: proto.Bool(true),
+ }
+ if !proto.Equal(in, expectedIn) {
+ return fmt.Errorf("unsupported argument: got %v want %v", in, expectedIn)
+ }
+ *out = pb.QueryResult{
+ Result: []*pb.EntityProto{
+ {
+ Key: &pb.Reference{
+ App: proto.String("s~test-app"),
+ Path: path1,
+ },
+ EntityGroup: path1,
+ Property: []*pb.Property{
+ {
+ Meaning: pb.Property_TEXT.Enum(),
+ Name: proto.String("Name"),
+ Value: &pb.PropertyValue{
+ StringValue: proto.String("George"),
+ },
+ },
+ {
+ Name: proto.String("Height"),
+ Value: &pb.PropertyValue{
+ Int64Value: proto.Int64(32),
+ },
+ },
+ },
+ },
+ {
+ Key: &pb.Reference{
+ App: proto.String("s~test-app"),
+ Path: path2,
+ },
+ EntityGroup: path1, // ancestor is George
+ Property: []*pb.Property{
+ {
+ Meaning: pb.Property_TEXT.Enum(),
+ Name: proto.String("Name"),
+ Value: &pb.PropertyValue{
+ StringValue: proto.String("Rufus"),
+ },
+ },
+ // No height for Rufus.
+ },
+ },
+ },
+ MoreResults: proto.Bool(false),
+ }
+ return nil
+}
+
+type StructThatImplementsPLS struct{}
+
+func (StructThatImplementsPLS) Load(p []Property) error { return nil }
+func (StructThatImplementsPLS) Save() ([]Property, error) { return nil, nil }
+
+var _ PropertyLoadSaver = StructThatImplementsPLS{}
+
+type StructPtrThatImplementsPLS struct{}
+
+func (*StructPtrThatImplementsPLS) Load(p []Property) error { return nil }
+func (*StructPtrThatImplementsPLS) Save() ([]Property, error) { return nil, nil }
+
+var _ PropertyLoadSaver = &StructPtrThatImplementsPLS{}
+
+type PropertyMap map[string]Property
+
+func (m PropertyMap) Load(props []Property) error {
+ for _, p := range props {
+ if p.Multiple {
+ return errors.New("PropertyMap does not support multiple properties")
+ }
+ m[p.Name] = p
+ }
+ return nil
+}
+
+func (m PropertyMap) Save() ([]Property, error) {
+ props := make([]Property, 0, len(m))
+ for _, p := range m {
+ if p.Multiple {
+ return nil, errors.New("PropertyMap does not support multiple properties")
+ }
+ props = append(props, p)
+ }
+ return props, nil
+}
+
+var _ PropertyLoadSaver = PropertyMap{}
+
+type Gopher struct {
+ Name string
+ Height int
+}
+
+// typeOfEmptyInterface is the type of interface{}, but we can't use
+// reflect.TypeOf((interface{})(nil)) directly because TypeOf takes an
+// interface{}.
+var typeOfEmptyInterface = reflect.TypeOf((*interface{})(nil)).Elem()
+
+func TestCheckMultiArg(t *testing.T) {
+ testCases := []struct {
+ v interface{}
+ mat multiArgType
+ elemType reflect.Type
+ }{
+ // Invalid cases.
+ {nil, multiArgTypeInvalid, nil},
+ {Gopher{}, multiArgTypeInvalid, nil},
+ {&Gopher{}, multiArgTypeInvalid, nil},
+ {PropertyList{}, multiArgTypeInvalid, nil}, // This is a special case.
+ {PropertyMap{}, multiArgTypeInvalid, nil},
+ {[]*PropertyList(nil), multiArgTypeInvalid, nil},
+ {[]*PropertyMap(nil), multiArgTypeInvalid, nil},
+ {[]**Gopher(nil), multiArgTypeInvalid, nil},
+ {[]*interface{}(nil), multiArgTypeInvalid, nil},
+ // Valid cases.
+ {
+ []PropertyList(nil),
+ multiArgTypePropertyLoadSaver,
+ reflect.TypeOf(PropertyList{}),
+ },
+ {
+ []PropertyMap(nil),
+ multiArgTypePropertyLoadSaver,
+ reflect.TypeOf(PropertyMap{}),
+ },
+ {
+ []StructThatImplementsPLS(nil),
+ multiArgTypePropertyLoadSaver,
+ reflect.TypeOf(StructThatImplementsPLS{}),
+ },
+ {
+ []StructPtrThatImplementsPLS(nil),
+ multiArgTypePropertyLoadSaver,
+ reflect.TypeOf(StructPtrThatImplementsPLS{}),
+ },
+ {
+ []Gopher(nil),
+ multiArgTypeStruct,
+ reflect.TypeOf(Gopher{}),
+ },
+ {
+ []*Gopher(nil),
+ multiArgTypeStructPtr,
+ reflect.TypeOf(Gopher{}),
+ },
+ {
+ []interface{}(nil),
+ multiArgTypeInterface,
+ typeOfEmptyInterface,
+ },
+ }
+ for _, tc := range testCases {
+ mat, elemType := checkMultiArg(reflect.ValueOf(tc.v))
+ if mat != tc.mat || elemType != tc.elemType {
+ t.Errorf("checkMultiArg(%T): got %v, %v want %v, %v",
+ tc.v, mat, elemType, tc.mat, tc.elemType)
+ }
+ }
+}
+
+func TestSimpleQuery(t *testing.T) {
+ struct1 := Gopher{Name: "George", Height: 32}
+ struct2 := Gopher{Name: "Rufus"}
+ pList1 := PropertyList{
+ {
+ Name: "Name",
+ Value: "George",
+ },
+ {
+ Name: "Height",
+ Value: int64(32),
+ },
+ }
+ pList2 := PropertyList{
+ {
+ Name: "Name",
+ Value: "Rufus",
+ },
+ }
+ pMap1 := PropertyMap{
+ "Name": Property{
+ Name: "Name",
+ Value: "George",
+ },
+ "Height": Property{
+ Name: "Height",
+ Value: int64(32),
+ },
+ }
+ pMap2 := PropertyMap{
+ "Name": Property{
+ Name: "Name",
+ Value: "Rufus",
+ },
+ }
+
+ testCases := []struct {
+ dst interface{}
+ want interface{}
+ }{
+ // The destination must have type *[]P, *[]S or *[]*S, for some non-interface
+ // type P such that *P implements PropertyLoadSaver, or for some struct type S.
+ {new([]Gopher), &[]Gopher{struct1, struct2}},
+ {new([]*Gopher), &[]*Gopher{&struct1, &struct2}},
+ {new([]PropertyList), &[]PropertyList{pList1, pList2}},
+ {new([]PropertyMap), &[]PropertyMap{pMap1, pMap2}},
+
+ // Any other destination type is invalid.
+ {0, nil},
+ {Gopher{}, nil},
+ {PropertyList{}, nil},
+ {PropertyMap{}, nil},
+ {[]int{}, nil},
+ {[]Gopher{}, nil},
+ {[]PropertyList{}, nil},
+ {new(int), nil},
+ {new(Gopher), nil},
+ {new(PropertyList), nil}, // This is a special case.
+ {new(PropertyMap), nil},
+ {new([]int), nil},
+ {new([]map[int]int), nil},
+ {new([]map[string]Property), nil},
+ {new([]map[string]interface{}), nil},
+ {new([]*int), nil},
+ {new([]*map[int]int), nil},
+ {new([]*map[string]Property), nil},
+ {new([]*map[string]interface{}), nil},
+ {new([]**Gopher), nil},
+ {new([]*PropertyList), nil},
+ {new([]*PropertyMap), nil},
+ }
+ for _, tc := range testCases {
+ nCall := 0
+ c := aetesting.FakeSingleContext(t, "datastore_v3", "RunQuery", func(in *pb.Query, out *pb.QueryResult) error {
+ nCall++
+ return fakeRunQuery(in, out)
+ })
+ c = internal.WithAppIDOverride(c, "dev~fake-app")
+
+ var (
+ expectedErr error
+ expectedNCall int
+ )
+ if tc.want == nil {
+ expectedErr = ErrInvalidEntityType
+ } else {
+ expectedNCall = 1
+ }
+ keys, err := NewQuery("Gopher").GetAll(c, tc.dst)
+ if err != expectedErr {
+ t.Errorf("dst type %T: got error [%v], want [%v]", tc.dst, err, expectedErr)
+ continue
+ }
+ if nCall != expectedNCall {
+ t.Errorf("dst type %T: Context.Call was called an incorrect number of times: got %d want %d", tc.dst, nCall, expectedNCall)
+ continue
+ }
+ if err != nil {
+ continue
+ }
+
+ key1 := NewKey(c, "Gopher", "", 6, nil)
+ expectedKeys := []*Key{
+ key1,
+ NewKey(c, "Gopher", "", 8, key1),
+ }
+ if l1, l2 := len(keys), len(expectedKeys); l1 != l2 {
+ t.Errorf("dst type %T: got %d keys, want %d keys", tc.dst, l1, l2)
+ continue
+ }
+ for i, key := range keys {
+ if key.AppID() != "s~test-app" {
+ t.Errorf(`dst type %T: Key #%d's AppID = %q, want "s~test-app"`, tc.dst, i, key.AppID())
+ continue
+ }
+ if !keysEqual(key, expectedKeys[i]) {
+ t.Errorf("dst type %T: got key #%d %v, want %v", tc.dst, i, key, expectedKeys[i])
+ continue
+ }
+ }
+
+ if !reflect.DeepEqual(tc.dst, tc.want) {
+ t.Errorf("dst type %T: Entities got %+v, want %+v", tc.dst, tc.dst, tc.want)
+ continue
+ }
+ }
+}
+
+// keysEqual is like (*Key).Equal, but ignores the App ID.
+func keysEqual(a, b *Key) bool {
+ for a != nil && b != nil {
+ if a.Kind() != b.Kind() || a.StringID() != b.StringID() || a.IntID() != b.IntID() {
+ return false
+ }
+ a, b = a.Parent(), b.Parent()
+ }
+ return a == b
+}
+
+func TestQueriesAreImmutable(t *testing.T) {
+ // Test that deriving q2 from q1 does not modify q1.
+ q0 := NewQuery("foo")
+ q1 := NewQuery("foo")
+ q2 := q1.Offset(2)
+ if !reflect.DeepEqual(q0, q1) {
+ t.Errorf("q0 and q1 were not equal")
+ }
+ if reflect.DeepEqual(q1, q2) {
+ t.Errorf("q1 and q2 were equal")
+ }
+
+ // Test that deriving from q4 twice does not conflict, even though
+ // q4 has a long list of order clauses. This tests that the arrays
+ // backed by a query's slice of orders are not shared.
+ f := func() *Query {
+ q := NewQuery("bar")
+ // 47 is an ugly number that is unlikely to be near a re-allocation
+ // point in repeated append calls. For example, it's not near a power
+ // of 2 or a multiple of 10.
+ for i := 0; i < 47; i++ {
+ q = q.Order(fmt.Sprintf("x%d", i))
+ }
+ return q
+ }
+ q3 := f().Order("y")
+ q4 := f()
+ q5 := q4.Order("y")
+ q6 := q4.Order("z")
+ if !reflect.DeepEqual(q3, q5) {
+ t.Errorf("q3 and q5 were not equal")
+ }
+ if reflect.DeepEqual(q5, q6) {
+ t.Errorf("q5 and q6 were equal")
+ }
+}
+
+func TestFilterParser(t *testing.T) {
+ testCases := []struct {
+ filterStr string
+ wantOK bool
+ wantFieldName string
+ wantOp operator
+ }{
+ // Supported ops.
+ {"x<", true, "x", lessThan},
+ {"x <", true, "x", lessThan},
+ {"x <", true, "x", lessThan},
+ {" x < ", true, "x", lessThan},
+ {"x <=", true, "x", lessEq},
+ {"x =", true, "x", equal},
+ {"x >=", true, "x", greaterEq},
+ {"x >", true, "x", greaterThan},
+ {"in >", true, "in", greaterThan},
+ {"in>", true, "in", greaterThan},
+ // Valid but (currently) unsupported ops.
+ {"x!=", false, "", 0},
+ {"x !=", false, "", 0},
+ {" x != ", false, "", 0},
+ {"x IN", false, "", 0},
+ {"x in", false, "", 0},
+ // Invalid ops.
+ {"x EQ", false, "", 0},
+ {"x lt", false, "", 0},
+ {"x <>", false, "", 0},
+ {"x >>", false, "", 0},
+ {"x ==", false, "", 0},
+ {"x =<", false, "", 0},
+ {"x =>", false, "", 0},
+ {"x !", false, "", 0},
+ {"x ", false, "", 0},
+ {"x", false, "", 0},
+ }
+ for _, tc := range testCases {
+ q := NewQuery("foo").Filter(tc.filterStr, 42)
+ if ok := q.err == nil; ok != tc.wantOK {
+ t.Errorf("%q: ok=%t, want %t", tc.filterStr, ok, tc.wantOK)
+ continue
+ }
+ if !tc.wantOK {
+ continue
+ }
+ if len(q.filter) != 1 {
+ t.Errorf("%q: len=%d, want %d", tc.filterStr, len(q.filter), 1)
+ continue
+ }
+ got, want := q.filter[0], filter{tc.wantFieldName, tc.wantOp, 42}
+ if got != want {
+ t.Errorf("%q: got %v, want %v", tc.filterStr, got, want)
+ continue
+ }
+ }
+}
+
+func TestQueryToProto(t *testing.T) {
+ // The context is required to make Keys for the test cases.
+ var got *pb.Query
+ NoErr := errors.New("No error")
+ c := aetesting.FakeSingleContext(t, "datastore_v3", "RunQuery", func(in *pb.Query, out *pb.QueryResult) error {
+ got = in
+ return NoErr // return a non-nil error so Run doesn't keep going.
+ })
+ c = internal.WithAppIDOverride(c, "dev~fake-app")
+
+ testCases := []struct {
+ desc string
+ query *Query
+ want *pb.Query
+ err string
+ }{
+ {
+ desc: "empty",
+ query: NewQuery(""),
+ want: &pb.Query{},
+ },
+ {
+ desc: "standard query",
+ query: NewQuery("kind").Order("-I").Filter("I >", 17).Filter("U =", "Dave").Limit(7).Offset(42),
+ want: &pb.Query{
+ Kind: proto.String("kind"),
+ Filter: []*pb.Query_Filter{
+ {
+ Op: pb.Query_Filter_GREATER_THAN.Enum(),
+ Property: []*pb.Property{
+ {
+ Name: proto.String("I"),
+ Value: &pb.PropertyValue{Int64Value: proto.Int64(17)},
+ Multiple: proto.Bool(false),
+ },
+ },
+ },
+ {
+ Op: pb.Query_Filter_EQUAL.Enum(),
+ Property: []*pb.Property{
+ {
+ Name: proto.String("U"),
+ Value: &pb.PropertyValue{StringValue: proto.String("Dave")},
+ Multiple: proto.Bool(false),
+ },
+ },
+ },
+ },
+ Order: []*pb.Query_Order{
+ {
+ Property: proto.String("I"),
+ Direction: pb.Query_Order_DESCENDING.Enum(),
+ },
+ },
+ Limit: proto.Int32(7),
+ Offset: proto.Int32(42),
+ },
+ },
+ {
+ desc: "ancestor",
+ query: NewQuery("").Ancestor(NewKey(c, "kind", "Mummy", 0, nil)),
+ want: &pb.Query{
+ Ancestor: &pb.Reference{
+ App: proto.String("dev~fake-app"),
+ Path: &pb.Path{
+ Element: []*pb.Path_Element{{Type: proto.String("kind"), Name: proto.String("Mummy")}},
+ },
+ },
+ },
+ },
+ {
+ desc: "projection",
+ query: NewQuery("").Project("A", "B"),
+ want: &pb.Query{
+ PropertyName: []string{"A", "B"},
+ },
+ },
+ {
+ desc: "projection with distinct",
+ query: NewQuery("").Project("A", "B").Distinct(),
+ want: &pb.Query{
+ PropertyName: []string{"A", "B"},
+ GroupByPropertyName: []string{"A", "B"},
+ },
+ },
+ {
+ desc: "keys only",
+ query: NewQuery("").KeysOnly(),
+ want: &pb.Query{
+ KeysOnly: proto.Bool(true),
+ RequirePerfectPlan: proto.Bool(true),
+ },
+ },
+ {
+ desc: "empty filter",
+ query: NewQuery("kind").Filter("=", 17),
+ err: "empty query filter field nam",
+ },
+ {
+ desc: "bad filter type",
+ query: NewQuery("kind").Filter("M =", map[string]bool{}),
+ err: "bad query filter value type",
+ },
+ {
+ desc: "bad filter operator",
+ query: NewQuery("kind").Filter("I <<=", 17),
+ err: `invalid operator "<<=" in filter "I <<="`,
+ },
+ {
+ desc: "empty order",
+ query: NewQuery("kind").Order(""),
+ err: "empty order",
+ },
+ {
+ desc: "bad order direction",
+ query: NewQuery("kind").Order("+I"),
+ err: `invalid order: "+I`,
+ },
+ }
+
+ for _, tt := range testCases {
+ got = nil
+ if _, err := tt.query.Run(c).Next(nil); err != NoErr {
+ if tt.err == "" || !strings.Contains(err.Error(), tt.err) {
+ t.Errorf("%s: error %v, want %q", tt.desc, err, tt.err)
+ }
+ continue
+ }
+ if tt.err != "" {
+ t.Errorf("%s: no error, want %q", tt.desc, tt.err)
+ continue
+ }
+ // Fields that are common to all protos.
+ tt.want.App = proto.String("dev~fake-app")
+ tt.want.Compile = proto.Bool(true)
+ if !proto.Equal(got, tt.want) {
+ t.Errorf("%s:\ngot %v\nwant %v", tt.desc, got, tt.want)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/datastore/save.go b/vendor/google.golang.org/appengine/datastore/save.go
new file mode 100644
index 000000000..728d4ca0c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/save.go
@@ -0,0 +1,327 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+func toUnixMicro(t time.Time) int64 {
+ // We cannot use t.UnixNano() / 1e3 because we want to handle times more than
+ // 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot
+ // be represented in the numerator of a single int64 divide.
+ return t.Unix()*1e6 + int64(t.Nanosecond()/1e3)
+}
+
+func fromUnixMicro(t int64) time.Time {
+ return time.Unix(t/1e6, (t%1e6)*1e3).UTC()
+}
+
+var (
+ minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3)
+ maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3)
+)
+
+// valueToProto converts a named value to a newly allocated Property.
+// The returned error string is empty on success.
+func valueToProto(defaultAppID, name string, v reflect.Value, multiple bool) (p *pb.Property, errStr string) {
+ var (
+ pv pb.PropertyValue
+ unsupported bool
+ )
+ switch v.Kind() {
+ case reflect.Invalid:
+ // No-op.
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ pv.Int64Value = proto.Int64(v.Int())
+ case reflect.Bool:
+ pv.BooleanValue = proto.Bool(v.Bool())
+ case reflect.String:
+ pv.StringValue = proto.String(v.String())
+ case reflect.Float32, reflect.Float64:
+ pv.DoubleValue = proto.Float64(v.Float())
+ case reflect.Ptr:
+ if k, ok := v.Interface().(*Key); ok {
+ if k != nil {
+ pv.Referencevalue = keyToReferenceValue(defaultAppID, k)
+ }
+ } else {
+ unsupported = true
+ }
+ case reflect.Struct:
+ switch t := v.Interface().(type) {
+ case time.Time:
+ if t.Before(minTime) || t.After(maxTime) {
+ return nil, "time value out of range"
+ }
+ pv.Int64Value = proto.Int64(toUnixMicro(t))
+ case appengine.GeoPoint:
+ if !t.Valid() {
+ return nil, "invalid GeoPoint value"
+ }
+ // NOTE: Strangely, latitude maps to X, longitude to Y.
+ pv.Pointvalue = &pb.PropertyValue_PointValue{X: &t.Lat, Y: &t.Lng}
+ default:
+ unsupported = true
+ }
+ case reflect.Slice:
+ if b, ok := v.Interface().([]byte); ok {
+ pv.StringValue = proto.String(string(b))
+ } else {
+ // nvToProto should already catch slice values.
+ // If we get here, we have a slice of slice values.
+ unsupported = true
+ }
+ default:
+ unsupported = true
+ }
+ if unsupported {
+ return nil, "unsupported datastore value type: " + v.Type().String()
+ }
+ p = &pb.Property{
+ Name: proto.String(name),
+ Value: &pv,
+ Multiple: proto.Bool(multiple),
+ }
+ if v.IsValid() {
+ switch v.Interface().(type) {
+ case []byte:
+ p.Meaning = pb.Property_BLOB.Enum()
+ case ByteString:
+ p.Meaning = pb.Property_BYTESTRING.Enum()
+ case appengine.BlobKey:
+ p.Meaning = pb.Property_BLOBKEY.Enum()
+ case time.Time:
+ p.Meaning = pb.Property_GD_WHEN.Enum()
+ case appengine.GeoPoint:
+ p.Meaning = pb.Property_GEORSS_POINT.Enum()
+ }
+ }
+ return p, ""
+}
+
+type saveOpts struct {
+ noIndex bool
+ multiple bool
+ omitEmpty bool
+}
+
+// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer.
+func saveEntity(defaultAppID string, key *Key, src interface{}) (*pb.EntityProto, error) {
+ var err error
+ var props []Property
+ if e, ok := src.(PropertyLoadSaver); ok {
+ props, err = e.Save()
+ } else {
+ props, err = SaveStruct(src)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return propertiesToProto(defaultAppID, key, props)
+}
+
+func saveStructProperty(props *[]Property, name string, opts saveOpts, v reflect.Value) error {
+ if opts.omitEmpty && isEmptyValue(v) {
+ return nil
+ }
+ p := Property{
+ Name: name,
+ NoIndex: opts.noIndex,
+ Multiple: opts.multiple,
+ }
+ switch x := v.Interface().(type) {
+ case *Key:
+ p.Value = x
+ case time.Time:
+ p.Value = x
+ case appengine.BlobKey:
+ p.Value = x
+ case appengine.GeoPoint:
+ p.Value = x
+ case ByteString:
+ p.Value = x
+ default:
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p.Value = v.Int()
+ case reflect.Bool:
+ p.Value = v.Bool()
+ case reflect.String:
+ p.Value = v.String()
+ case reflect.Float32, reflect.Float64:
+ p.Value = v.Float()
+ case reflect.Slice:
+ if v.Type().Elem().Kind() == reflect.Uint8 {
+ p.NoIndex = true
+ p.Value = v.Bytes()
+ }
+ case reflect.Struct:
+ if !v.CanAddr() {
+ return fmt.Errorf("datastore: unsupported struct field: value is unaddressable")
+ }
+ sub, err := newStructPLS(v.Addr().Interface())
+ if err != nil {
+ return fmt.Errorf("datastore: unsupported struct field: %v", err)
+ }
+ return sub.save(props, name+".", opts)
+ }
+ }
+ if p.Value == nil {
+ return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type())
+ }
+ *props = append(*props, p)
+ return nil
+}
+
+func (s structPLS) Save() ([]Property, error) {
+ var props []Property
+ if err := s.save(&props, "", saveOpts{}); err != nil {
+ return nil, err
+ }
+ return props, nil
+}
+
+func (s structPLS) save(props *[]Property, prefix string, opts saveOpts) error {
+ for name, f := range s.codec.fields {
+ name = prefix + name
+ v := s.v.FieldByIndex(f.path)
+ if !v.IsValid() || !v.CanSet() {
+ continue
+ }
+ var opts1 saveOpts
+ opts1.noIndex = opts.noIndex || f.noIndex
+ opts1.multiple = opts.multiple
+ opts1.omitEmpty = f.omitEmpty // don't propagate
+ // For slice fields that aren't []byte, save each element.
+ if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
+ opts1.multiple = true
+ for j := 0; j < v.Len(); j++ {
+ if err := saveStructProperty(props, name, opts1, v.Index(j)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ // Otherwise, save the field itself.
+ if err := saveStructProperty(props, name, opts1, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func propertiesToProto(defaultAppID string, key *Key, props []Property) (*pb.EntityProto, error) {
+ e := &pb.EntityProto{
+ Key: keyToProto(defaultAppID, key),
+ }
+ if key.parent == nil {
+ e.EntityGroup = &pb.Path{}
+ } else {
+ e.EntityGroup = keyToProto(defaultAppID, key.root()).Path
+ }
+ prevMultiple := make(map[string]bool)
+
+ for _, p := range props {
+ if pm, ok := prevMultiple[p.Name]; ok {
+ if !pm || !p.Multiple {
+ return nil, fmt.Errorf("datastore: multiple Properties with Name %q, but Multiple is false", p.Name)
+ }
+ } else {
+ prevMultiple[p.Name] = p.Multiple
+ }
+
+ x := &pb.Property{
+ Name: proto.String(p.Name),
+ Value: new(pb.PropertyValue),
+ Multiple: proto.Bool(p.Multiple),
+ }
+ switch v := p.Value.(type) {
+ case int64:
+ x.Value.Int64Value = proto.Int64(v)
+ case bool:
+ x.Value.BooleanValue = proto.Bool(v)
+ case string:
+ x.Value.StringValue = proto.String(v)
+ if p.NoIndex {
+ x.Meaning = pb.Property_TEXT.Enum()
+ }
+ case float64:
+ x.Value.DoubleValue = proto.Float64(v)
+ case *Key:
+ if v != nil {
+ x.Value.Referencevalue = keyToReferenceValue(defaultAppID, v)
+ }
+ case time.Time:
+ if v.Before(minTime) || v.After(maxTime) {
+ return nil, fmt.Errorf("datastore: time value out of range")
+ }
+ x.Value.Int64Value = proto.Int64(toUnixMicro(v))
+ x.Meaning = pb.Property_GD_WHEN.Enum()
+ case appengine.BlobKey:
+ x.Value.StringValue = proto.String(string(v))
+ x.Meaning = pb.Property_BLOBKEY.Enum()
+ case appengine.GeoPoint:
+ if !v.Valid() {
+ return nil, fmt.Errorf("datastore: invalid GeoPoint value")
+ }
+ // NOTE: Strangely, latitude maps to X, longitude to Y.
+ x.Value.Pointvalue = &pb.PropertyValue_PointValue{X: &v.Lat, Y: &v.Lng}
+ x.Meaning = pb.Property_GEORSS_POINT.Enum()
+ case []byte:
+ x.Value.StringValue = proto.String(string(v))
+ x.Meaning = pb.Property_BLOB.Enum()
+ if !p.NoIndex {
+ return nil, fmt.Errorf("datastore: cannot index a []byte valued Property with Name %q", p.Name)
+ }
+ case ByteString:
+ x.Value.StringValue = proto.String(string(v))
+ x.Meaning = pb.Property_BYTESTRING.Enum()
+ default:
+ if p.Value != nil {
+ return nil, fmt.Errorf("datastore: invalid Value type for a Property with Name %q", p.Name)
+ }
+ }
+
+ if p.NoIndex {
+ e.RawProperty = append(e.RawProperty, x)
+ } else {
+ e.Property = append(e.Property, x)
+ if len(e.Property) > maxIndexedProperties {
+ return nil, errors.New("datastore: too many indexed properties")
+ }
+ }
+ }
+ return e, nil
+}
+
+// isEmptyValue is taken from the encoding/json package in the
+// standard library.
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
diff --git a/vendor/google.golang.org/appengine/datastore/time_test.go b/vendor/google.golang.org/appengine/datastore/time_test.go
new file mode 100644
index 000000000..ba74b449e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/time_test.go
@@ -0,0 +1,65 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "testing"
+ "time"
+)
+
+func TestUnixMicro(t *testing.T) {
+ // Test that all these time.Time values survive a round trip to unix micros.
+ testCases := []time.Time{
+ {},
+ time.Date(2, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(23, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(234, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(1000, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(1600, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(1700, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(1800, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Unix(-1e6, -1000),
+ time.Unix(-1e6, 0),
+ time.Unix(-1e6, +1000),
+ time.Unix(-60, -1000),
+ time.Unix(-60, 0),
+ time.Unix(-60, +1000),
+ time.Unix(-1, -1000),
+ time.Unix(-1, 0),
+ time.Unix(-1, +1000),
+ time.Unix(0, -3000),
+ time.Unix(0, -2000),
+ time.Unix(0, -1000),
+ time.Unix(0, 0),
+ time.Unix(0, +1000),
+ time.Unix(0, +2000),
+ time.Unix(+60, -1000),
+ time.Unix(+60, 0),
+ time.Unix(+60, +1000),
+ time.Unix(+1e6, -1000),
+ time.Unix(+1e6, 0),
+ time.Unix(+1e6, +1000),
+ time.Date(1999, 12, 31, 23, 59, 59, 999000, time.UTC),
+ time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(2006, 1, 2, 15, 4, 5, 678000, time.UTC),
+ time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC),
+ time.Date(3456, 1, 1, 0, 0, 0, 0, time.UTC),
+ }
+ for _, tc := range testCases {
+ got := fromUnixMicro(toUnixMicro(tc))
+ if !got.Equal(tc) {
+ t.Errorf("got %q, want %q", got, tc)
+ }
+ }
+
+ // Test that a time.Time that isn't an integral number of microseconds
+ // is not perfectly reconstructed after a round trip.
+ t0 := time.Unix(0, 123)
+ t1 := fromUnixMicro(toUnixMicro(t0))
+ if t1.Nanosecond()%1000 != 0 || t0.Nanosecond()%1000 == 0 {
+ t.Errorf("quantization to µs: got %q with %d ns, started with %d ns", t1, t1.Nanosecond(), t0.Nanosecond())
+ }
+}
diff --git a/vendor/google.golang.org/appengine/datastore/transaction.go b/vendor/google.golang.org/appengine/datastore/transaction.go
new file mode 100644
index 000000000..a7f3f2b28
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/transaction.go
@@ -0,0 +1,87 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "errors"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+func init() {
+ internal.RegisterTransactionSetter(func(x *pb.Query, t *pb.Transaction) {
+ x.Transaction = t
+ })
+ internal.RegisterTransactionSetter(func(x *pb.GetRequest, t *pb.Transaction) {
+ x.Transaction = t
+ })
+ internal.RegisterTransactionSetter(func(x *pb.PutRequest, t *pb.Transaction) {
+ x.Transaction = t
+ })
+ internal.RegisterTransactionSetter(func(x *pb.DeleteRequest, t *pb.Transaction) {
+ x.Transaction = t
+ })
+}
+
+// ErrConcurrentTransaction is returned when a transaction is rolled back due
+// to a conflict with a concurrent transaction.
+var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction")
+
+// RunInTransaction runs f in a transaction. It calls f with a transaction
+// context tc that f should use for all App Engine operations.
+//
+// If f returns nil, RunInTransaction attempts to commit the transaction,
+// returning nil if it succeeds. If the commit fails due to a conflicting
+// transaction, RunInTransaction retries f, each time with a new transaction
+// context. It gives up and returns ErrConcurrentTransaction after three
+// failed attempts. The number of attempts can be configured by specifying
+// TransactionOptions.Attempts.
+//
+// If f returns non-nil, then any datastore changes will not be applied and
+// RunInTransaction returns that same error. The function f is not retried.
+//
+// Note that when f returns, the transaction is not yet committed. Calling code
+// must be careful not to assume that any of f's changes have been committed
+// until RunInTransaction returns nil.
+//
+// Since f may be called multiple times, f should usually be idempotent.
+// datastore.Get is not idempotent when unmarshaling slice fields.
+//
+// Nested transactions are not supported; c may not be a transaction context.
+func RunInTransaction(c context.Context, f func(tc context.Context) error, opts *TransactionOptions) error {
+ xg := false
+ if opts != nil {
+ xg = opts.XG
+ }
+ attempts := 3
+ if opts != nil && opts.Attempts > 0 {
+ attempts = opts.Attempts
+ }
+ for i := 0; i < attempts; i++ {
+ if err := internal.RunTransactionOnce(c, f, xg); err != internal.ErrConcurrentTransaction {
+ return err
+ }
+ }
+ return ErrConcurrentTransaction
+}
+
+// TransactionOptions are the options for running a transaction.
+type TransactionOptions struct {
+ // XG is whether the transaction can cross multiple entity groups. In
+ // comparison, a single group transaction is one where all datastore keys
+ // used have the same root key. Note that cross group transactions do not
+ // have the same behavior as single group transactions. In particular, it
+ // is much more likely to see partially applied transactions in different
+ // entity groups, in global queries.
+ // It is valid to set XG to true even if the transaction is within a
+ // single entity group.
+ XG bool
+ // Attempts controls the number of retries to perform when commits fail
+ // due to a conflicting transaction. If omitted, it defaults to 3.
+ Attempts int
+}
diff --git a/vendor/google.golang.org/appengine/delay/delay.go b/vendor/google.golang.org/appengine/delay/delay.go
new file mode 100644
index 000000000..52915a422
--- /dev/null
+++ b/vendor/google.golang.org/appengine/delay/delay.go
@@ -0,0 +1,295 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package delay provides a way to execute code outside the scope of a
+user request by using the taskqueue API.
+
+To declare a function that may be executed later, call Func
+in a top-level assignment context, passing it an arbitrary string key
+and a function whose first argument is of type context.Context.
+The key is used to look up the function so it can be called later.
+ var laterFunc = delay.Func("key", myFunc)
+It is also possible to use a function literal.
+ var laterFunc = delay.Func("key", func(c context.Context, x string) {
+ // ...
+ })
+
+To call a function, invoke its Call method.
+ laterFunc.Call(c, "something")
+A function may be called any number of times. If the function has any
+return arguments, and the last one is of type error, the function may
+return a non-nil error to signal that the function should be retried.
+
+The arguments to functions may be of any type that is encodable by the gob
+package. If an argument is of interface type, it is the client's responsibility
+to register with the gob package whatever concrete type may be passed for that
+argument; see http://golang.org/pkg/gob/#Register for details.
+
+Any errors during initialization or execution of a function will be
+logged to the application logs. Error logs that occur during initialization will
+be associated with the request that invoked the Call method.
+
+The state of a function invocation that has not yet successfully
+executed is preserved by combining the file name in which it is declared
+with the string key that was passed to the Func function. Updating an app
+with pending function invocations is safe as long as the relevant
+functions have the (filename, key) combination preserved.
+
+The delay package uses the Task Queue API to create tasks that call the
+reserved application path "/_ah/queue/go/delay".
+This path must not be marked as "login: required" in app.yaml;
+it must be marked as "login: admin" or have no access restriction.
+*/
+package delay // import "google.golang.org/appengine/delay"
+
+import (
+ "bytes"
+ "encoding/gob"
+ "errors"
+ "fmt"
+ "net/http"
+ "reflect"
+ "runtime"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/log"
+ "google.golang.org/appengine/taskqueue"
+)
+
+// Function represents a function that may have a delayed invocation.
+type Function struct {
+ fv reflect.Value // Kind() == reflect.Func
+ key string
+ err error // any error during initialization
+}
+
+const (
+ // The HTTP path for invocations.
+ path = "/_ah/queue/go/delay"
+ // Use the default queue.
+ queue = ""
+)
+
+type contextKey int
+
+var (
+ // registry of all delayed functions
+ funcs = make(map[string]*Function)
+
+ // precomputed types
+ errorType = reflect.TypeOf((*error)(nil)).Elem()
+
+ // errors
+ errFirstArg = errors.New("first argument must be context.Context")
+ errOutsideDelayFunc = errors.New("request headers are only available inside a delay.Func")
+
+ // context keys
+ headersContextKey contextKey = 0
+)
+
+// Func declares a new Function. The second argument must be a function with a
+// first argument of type context.Context.
+// This function must be called at program initialization time. That means it
+// must be called in a global variable declaration or from an init function.
+// This restriction is necessary because the instance that delays a function
+// call may not be the one that executes it. Only the code executed at program
+// initialization time is guaranteed to have been run by an instance before it
+// receives a request.
+func Func(key string, i interface{}) *Function {
+ f := &Function{fv: reflect.ValueOf(i)}
+
+ // Derive unique, somewhat stable key for this func.
+ _, file, _, _ := runtime.Caller(1)
+ f.key = file + ":" + key
+
+ t := f.fv.Type()
+ if t.Kind() != reflect.Func {
+ f.err = errors.New("not a function")
+ return f
+ }
+ if t.NumIn() == 0 || !isContext(t.In(0)) {
+ f.err = errFirstArg
+ return f
+ }
+
+ // Register the function's arguments with the gob package.
+ // This is required because they are marshaled inside a []interface{}.
+ // gob.Register only expects to be called during initialization;
+ // that's fine because this function expects the same.
+ for i := 0; i < t.NumIn(); i++ {
+ // Only concrete types may be registered. If the argument has
+ // interface type, the client is resposible for registering the
+ // concrete types it will hold.
+ if t.In(i).Kind() == reflect.Interface {
+ continue
+ }
+ gob.Register(reflect.Zero(t.In(i)).Interface())
+ }
+
+ if old := funcs[f.key]; old != nil {
+ old.err = fmt.Errorf("multiple functions registered for %s in %s", key, file)
+ }
+ funcs[f.key] = f
+ return f
+}
+
+type invocation struct {
+ Key string
+ Args []interface{}
+}
+
+// Call invokes a delayed function.
+// err := f.Call(c, ...)
+// is equivalent to
+// t, _ := f.Task(...)
+// _, err := taskqueue.Add(c, t, "")
+func (f *Function) Call(c context.Context, args ...interface{}) error {
+ t, err := f.Task(args...)
+ if err != nil {
+ return err
+ }
+ _, err = taskqueueAdder(c, t, queue)
+ return err
+}
+
+// Task creates a Task that will invoke the function.
+// Its parameters may be tweaked before adding it to a queue.
+// Users should not modify the Path or Payload fields of the returned Task.
+func (f *Function) Task(args ...interface{}) (*taskqueue.Task, error) {
+ if f.err != nil {
+ return nil, fmt.Errorf("delay: func is invalid: %v", f.err)
+ }
+
+ nArgs := len(args) + 1 // +1 for the context.Context
+ ft := f.fv.Type()
+ minArgs := ft.NumIn()
+ if ft.IsVariadic() {
+ minArgs--
+ }
+ if nArgs < minArgs {
+ return nil, fmt.Errorf("delay: too few arguments to func: %d < %d", nArgs, minArgs)
+ }
+ if !ft.IsVariadic() && nArgs > minArgs {
+ return nil, fmt.Errorf("delay: too many arguments to func: %d > %d", nArgs, minArgs)
+ }
+
+ // Check arg types.
+ for i := 1; i < nArgs; i++ {
+ at := reflect.TypeOf(args[i-1])
+ var dt reflect.Type
+ if i < minArgs {
+ // not a variadic arg
+ dt = ft.In(i)
+ } else {
+ // a variadic arg
+ dt = ft.In(minArgs).Elem()
+ }
+ // nil arguments won't have a type, so they need special handling.
+ if at == nil {
+ // nil interface
+ switch dt.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ continue // may be nil
+ }
+ return nil, fmt.Errorf("delay: argument %d has wrong type: %v is not nilable", i, dt)
+ }
+ switch at.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ av := reflect.ValueOf(args[i-1])
+ if av.IsNil() {
+ // nil value in interface; not supported by gob, so we replace it
+ // with a nil interface value
+ args[i-1] = nil
+ }
+ }
+ if !at.AssignableTo(dt) {
+ return nil, fmt.Errorf("delay: argument %d has wrong type: %v is not assignable to %v", i, at, dt)
+ }
+ }
+
+ inv := invocation{
+ Key: f.key,
+ Args: args,
+ }
+
+ buf := new(bytes.Buffer)
+ if err := gob.NewEncoder(buf).Encode(inv); err != nil {
+ return nil, fmt.Errorf("delay: gob encoding failed: %v", err)
+ }
+
+ return &taskqueue.Task{
+ Path: path,
+ Payload: buf.Bytes(),
+ }, nil
+}
+
+// Request returns the special task-queue HTTP request headers for the current
+// task queue handler. Returns an error if called from outside a delay.Func.
+func RequestHeaders(c context.Context) (*taskqueue.RequestHeaders, error) {
+ if ret, ok := c.Value(headersContextKey).(*taskqueue.RequestHeaders); ok {
+ return ret, nil
+ }
+ return nil, errOutsideDelayFunc
+}
+
+var taskqueueAdder = taskqueue.Add // for testing
+
+func init() {
+ http.HandleFunc(path, func(w http.ResponseWriter, req *http.Request) {
+ runFunc(appengine.NewContext(req), w, req)
+ })
+}
+
+func runFunc(c context.Context, w http.ResponseWriter, req *http.Request) {
+ defer req.Body.Close()
+
+ c = context.WithValue(c, headersContextKey, taskqueue.ParseRequestHeaders(req.Header))
+
+ var inv invocation
+ if err := gob.NewDecoder(req.Body).Decode(&inv); err != nil {
+ log.Errorf(c, "delay: failed decoding task payload: %v", err)
+ log.Warningf(c, "delay: dropping task")
+ return
+ }
+
+ f := funcs[inv.Key]
+ if f == nil {
+ log.Errorf(c, "delay: no func with key %q found", inv.Key)
+ log.Warningf(c, "delay: dropping task")
+ return
+ }
+
+ ft := f.fv.Type()
+ in := []reflect.Value{reflect.ValueOf(c)}
+ for _, arg := range inv.Args {
+ var v reflect.Value
+ if arg != nil {
+ v = reflect.ValueOf(arg)
+ } else {
+ // Task was passed a nil argument, so we must construct
+ // the zero value for the argument here.
+ n := len(in) // we're constructing the nth argument
+ var at reflect.Type
+ if !ft.IsVariadic() || n < ft.NumIn()-1 {
+ at = ft.In(n)
+ } else {
+ at = ft.In(ft.NumIn() - 1).Elem()
+ }
+ v = reflect.Zero(at)
+ }
+ in = append(in, v)
+ }
+ out := f.fv.Call(in)
+
+ if n := ft.NumOut(); n > 0 && ft.Out(n-1) == errorType {
+ if errv := out[n-1]; !errv.IsNil() {
+ log.Errorf(c, "delay: func failed (will retry): %v", errv.Interface())
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/delay/delay_go17.go b/vendor/google.golang.org/appengine/delay/delay_go17.go
new file mode 100644
index 000000000..9a59e8b0d
--- /dev/null
+++ b/vendor/google.golang.org/appengine/delay/delay_go17.go
@@ -0,0 +1,23 @@
+// Copyright 2017 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+//+build go1.7
+
+package delay
+
+import (
+ stdctx "context"
+ "reflect"
+
+ netctx "golang.org/x/net/context"
+)
+
+var (
+ stdContextType = reflect.TypeOf((*stdctx.Context)(nil)).Elem()
+ netContextType = reflect.TypeOf((*netctx.Context)(nil)).Elem()
+)
+
+func isContext(t reflect.Type) bool {
+ return t == stdContextType || t == netContextType
+}
diff --git a/vendor/google.golang.org/appengine/delay/delay_go17_test.go b/vendor/google.golang.org/appengine/delay/delay_go17_test.go
new file mode 100644
index 000000000..0e708d005
--- /dev/null
+++ b/vendor/google.golang.org/appengine/delay/delay_go17_test.go
@@ -0,0 +1,55 @@
+// Copyright 2017 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+//+build go1.7
+
+package delay
+
+import (
+ "bytes"
+ stdctx "context"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ netctx "golang.org/x/net/context"
+ "google.golang.org/appengine/taskqueue"
+)
+
+var (
+ stdCtxRuns = 0
+ stdCtxFunc = Func("stdctx", func(c stdctx.Context) {
+ stdCtxRuns++
+ })
+)
+
+func TestStandardContext(t *testing.T) {
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ netctx.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ c := newFakeContext()
+ stdCtxRuns = 0 // reset state
+ if err := stdCtxFunc.Call(c.ctx); err != nil {
+ t.Fatal("Function.Call:", err)
+ }
+
+ // Simulate the Task Queue service.
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+
+ if stdCtxRuns != 1 {
+ t.Errorf("stdCtxRuns: got %d, want 1", stdCtxRuns)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/delay/delay_pre17.go b/vendor/google.golang.org/appengine/delay/delay_pre17.go
new file mode 100644
index 000000000..d30c75dfb
--- /dev/null
+++ b/vendor/google.golang.org/appengine/delay/delay_pre17.go
@@ -0,0 +1,19 @@
+// Copyright 2017 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+//+build !go1.7
+
+package delay
+
+import (
+ "reflect"
+
+ "golang.org/x/net/context"
+)
+
+var contextType = reflect.TypeOf((*context.Context)(nil)).Elem()
+
+func isContext(t reflect.Type) bool {
+ return t == contextType
+}
diff --git a/vendor/google.golang.org/appengine/delay/delay_test.go b/vendor/google.golang.org/appengine/delay/delay_test.go
new file mode 100644
index 000000000..3df2bf7e3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/delay/delay_test.go
@@ -0,0 +1,428 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package delay
+
+import (
+ "bytes"
+ "encoding/gob"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "reflect"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ "google.golang.org/appengine/taskqueue"
+)
+
+type CustomType struct {
+ N int
+}
+
+type CustomInterface interface {
+ N() int
+}
+
+type CustomImpl int
+
+func (c CustomImpl) N() int { return int(c) }
+
+// CustomImpl needs to be registered with gob.
+func init() {
+ gob.Register(CustomImpl(0))
+}
+
+var (
+ invalidFunc = Func("invalid", func() {})
+
+ regFuncRuns = 0
+ regFuncMsg = ""
+ regFunc = Func("reg", func(c context.Context, arg string) {
+ regFuncRuns++
+ regFuncMsg = arg
+ })
+
+ custFuncTally = 0
+ custFunc = Func("cust", func(c context.Context, ct *CustomType, ci CustomInterface) {
+ a, b := 2, 3
+ if ct != nil {
+ a = ct.N
+ }
+ if ci != nil {
+ b = ci.N()
+ }
+ custFuncTally += a + b
+ })
+
+ anotherCustFunc = Func("cust2", func(c context.Context, n int, ct *CustomType, ci CustomInterface) {
+ })
+
+ varFuncMsg = ""
+ varFunc = Func("variadic", func(c context.Context, format string, args ...int) {
+ // convert []int to []interface{} for fmt.Sprintf.
+ as := make([]interface{}, len(args))
+ for i, a := range args {
+ as[i] = a
+ }
+ varFuncMsg = fmt.Sprintf(format, as...)
+ })
+
+ errFuncRuns = 0
+ errFuncErr = errors.New("error!")
+ errFunc = Func("err", func(c context.Context) error {
+ errFuncRuns++
+ if errFuncRuns == 1 {
+ return nil
+ }
+ return errFuncErr
+ })
+
+ dupeWhich = 0
+ dupe1Func = Func("dupe", func(c context.Context) {
+ if dupeWhich == 0 {
+ dupeWhich = 1
+ }
+ })
+ dupe2Func = Func("dupe", func(c context.Context) {
+ if dupeWhich == 0 {
+ dupeWhich = 2
+ }
+ })
+
+ reqFuncRuns = 0
+ reqFuncHeaders *taskqueue.RequestHeaders
+ reqFuncErr error
+ reqFunc = Func("req", func(c context.Context) {
+ reqFuncRuns++
+ reqFuncHeaders, reqFuncErr = RequestHeaders(c)
+ })
+)
+
+type fakeContext struct {
+ ctx context.Context
+ logging [][]interface{}
+}
+
+func newFakeContext() *fakeContext {
+ f := new(fakeContext)
+ f.ctx = internal.WithCallOverride(context.Background(), f.call)
+ f.ctx = internal.WithLogOverride(f.ctx, f.logf)
+ return f
+}
+
+func (f *fakeContext) call(ctx context.Context, service, method string, in, out proto.Message) error {
+ panic("should never be called")
+}
+
+var logLevels = map[int64]string{1: "INFO", 3: "ERROR"}
+
+func (f *fakeContext) logf(level int64, format string, args ...interface{}) {
+ f.logging = append(f.logging, append([]interface{}{logLevels[level], format}, args...))
+}
+
+func TestInvalidFunction(t *testing.T) {
+ c := newFakeContext()
+
+ if got, want := invalidFunc.Call(c.ctx), fmt.Errorf("delay: func is invalid: %s", errFirstArg); got.Error() != want.Error() {
+ t.Errorf("Incorrect error: got %q, want %q", got, want)
+ }
+}
+
+func TestVariadicFunctionArguments(t *testing.T) {
+ // Check the argument type validation for variadic functions.
+
+ c := newFakeContext()
+
+ calls := 0
+ taskqueueAdder = func(c context.Context, t *taskqueue.Task, _ string) (*taskqueue.Task, error) {
+ calls++
+ return t, nil
+ }
+
+ varFunc.Call(c.ctx, "hi")
+ varFunc.Call(c.ctx, "%d", 12)
+ varFunc.Call(c.ctx, "%d %d %d", 3, 1, 4)
+ if calls != 3 {
+ t.Errorf("Got %d calls to taskqueueAdder, want 3", calls)
+ }
+
+ if got, want := varFunc.Call(c.ctx, "%d %s", 12, "a string is bad"), errors.New("delay: argument 3 has wrong type: string is not assignable to int"); got.Error() != want.Error() {
+ t.Errorf("Incorrect error: got %q, want %q", got, want)
+ }
+}
+
+func TestBadArguments(t *testing.T) {
+ // Try running regFunc with different sets of inappropriate arguments.
+
+ c := newFakeContext()
+
+ tests := []struct {
+ args []interface{} // all except context
+ wantErr string
+ }{
+ {
+ args: nil,
+ wantErr: "delay: too few arguments to func: 1 < 2",
+ },
+ {
+ args: []interface{}{"lala", 53},
+ wantErr: "delay: too many arguments to func: 3 > 2",
+ },
+ {
+ args: []interface{}{53},
+ wantErr: "delay: argument 1 has wrong type: int is not assignable to string",
+ },
+ }
+ for i, tc := range tests {
+ got := regFunc.Call(c.ctx, tc.args...)
+ if got.Error() != tc.wantErr {
+ t.Errorf("Call %v: got %q, want %q", i, got, tc.wantErr)
+ }
+ }
+}
+
+func TestRunningFunction(t *testing.T) {
+ c := newFakeContext()
+
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ regFuncRuns, regFuncMsg = 0, "" // reset state
+ const msg = "Why, hello!"
+ regFunc.Call(c.ctx, msg)
+
+ // Simulate the Task Queue service.
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+
+ if regFuncRuns != 1 {
+ t.Errorf("regFuncRuns: got %d, want 1", regFuncRuns)
+ }
+ if regFuncMsg != msg {
+ t.Errorf("regFuncMsg: got %q, want %q", regFuncMsg, msg)
+ }
+}
+
+func TestCustomType(t *testing.T) {
+ c := newFakeContext()
+
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ custFuncTally = 0 // reset state
+ custFunc.Call(c.ctx, &CustomType{N: 11}, CustomImpl(13))
+
+ // Simulate the Task Queue service.
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+
+ if custFuncTally != 24 {
+ t.Errorf("custFuncTally = %d, want 24", custFuncTally)
+ }
+
+ // Try the same, but with nil values; one is a nil pointer (and thus a non-nil interface value),
+ // and the other is a nil interface value.
+ custFuncTally = 0 // reset state
+ custFunc.Call(c.ctx, (*CustomType)(nil), nil)
+
+ // Simulate the Task Queue service.
+ req, err = http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw = httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+
+ if custFuncTally != 5 {
+ t.Errorf("custFuncTally = %d, want 5", custFuncTally)
+ }
+}
+
+func TestRunningVariadic(t *testing.T) {
+ c := newFakeContext()
+
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ varFuncMsg = "" // reset state
+ varFunc.Call(c.ctx, "Amiga %d has %d KB RAM", 500, 512)
+
+ // Simulate the Task Queue service.
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+
+ const expected = "Amiga 500 has 512 KB RAM"
+ if varFuncMsg != expected {
+ t.Errorf("varFuncMsg = %q, want %q", varFuncMsg, expected)
+ }
+}
+
+func TestErrorFunction(t *testing.T) {
+ c := newFakeContext()
+
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ errFunc.Call(c.ctx)
+
+ // Simulate the Task Queue service.
+ // The first call should succeed; the second call should fail.
+ {
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+ }
+ {
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+ if rw.Code != http.StatusInternalServerError {
+ t.Errorf("Got status code %d, want %d", rw.Code, http.StatusInternalServerError)
+ }
+
+ wantLogging := [][]interface{}{
+ {"ERROR", "delay: func failed (will retry): %v", errFuncErr},
+ }
+ if !reflect.DeepEqual(c.logging, wantLogging) {
+ t.Errorf("Incorrect logging: got %+v, want %+v", c.logging, wantLogging)
+ }
+ }
+}
+
+func TestDuplicateFunction(t *testing.T) {
+ c := newFakeContext()
+
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ if err := dupe1Func.Call(c.ctx); err == nil {
+ t.Error("dupe1Func.Call did not return error")
+ }
+ if task != nil {
+ t.Error("dupe1Func.Call posted a task")
+ }
+ if err := dupe2Func.Call(c.ctx); err != nil {
+ t.Errorf("dupe2Func.Call error: %v", err)
+ }
+ if task == nil {
+ t.Fatalf("dupe2Func.Call did not post a task")
+ }
+
+ // Simulate the Task Queue service.
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+
+ if dupeWhich == 1 {
+ t.Error("dupe2Func.Call used old registered function")
+ } else if dupeWhich != 2 {
+ t.Errorf("dupeWhich = %d; want 2", dupeWhich)
+ }
+}
+
+func TestGetRequestHeadersFromContext(t *testing.T) {
+ c := newFakeContext()
+
+ // Outside a delay.Func should return an error.
+ headers, err := RequestHeaders(c.ctx)
+ if headers != nil {
+ t.Errorf("RequestHeaders outside Func, got %v, want nil", headers)
+ }
+ if err != errOutsideDelayFunc {
+ t.Errorf("RequestHeaders outside Func err, got %v, want %v", err, errOutsideDelayFunc)
+ }
+
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ reqFunc.Call(c.ctx)
+
+ reqFuncRuns, reqFuncHeaders = 0, nil // reset state
+ // Simulate the Task Queue service.
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ req.Header.Set("x-appengine-taskname", "foobar")
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c.ctx, rw, req)
+
+ if reqFuncRuns != 1 {
+ t.Errorf("reqFuncRuns: got %d, want 1", reqFuncRuns)
+ }
+ if reqFuncHeaders.TaskName != "foobar" {
+ t.Errorf("reqFuncHeaders.TaskName: got %v, want 'foobar'", reqFuncHeaders.TaskName)
+ }
+ if reqFuncErr != nil {
+ t.Errorf("reqFuncErr: got %v, want nil", reqFuncErr)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/demos/guestbook/app.yaml b/vendor/google.golang.org/appengine/demos/guestbook/app.yaml
new file mode 100644
index 000000000..334250332
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/guestbook/app.yaml
@@ -0,0 +1,14 @@
+# Demo application for App Engine "flexible environment".
+runtime: go
+vm: true
+api_version: go1
+
+handlers:
+# Favicon. Without this, the browser hits this once per page view.
+- url: /favicon.ico
+ static_files: favicon.ico
+ upload: favicon.ico
+
+# Main app. All the real work is here.
+- url: /.*
+ script: _go_app
diff --git a/vendor/google.golang.org/appengine/demos/guestbook/favicon.ico b/vendor/google.golang.org/appengine/demos/guestbook/favicon.ico
new file mode 100644
index 000000000..1a71ea772
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/guestbook/favicon.ico
Binary files differ
diff --git a/vendor/google.golang.org/appengine/demos/guestbook/guestbook.go b/vendor/google.golang.org/appengine/demos/guestbook/guestbook.go
new file mode 100644
index 000000000..04a0432bb
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/guestbook/guestbook.go
@@ -0,0 +1,109 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// This example only works on App Engine "flexible environment".
+// +build !appengine
+
+package main
+
+import (
+ "html/template"
+ "net/http"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/datastore"
+ "google.golang.org/appengine/log"
+ "google.golang.org/appengine/user"
+)
+
+var initTime time.Time
+
+type Greeting struct {
+ Author string
+ Content string
+ Date time.Time
+}
+
+func main() {
+ http.HandleFunc("/", handleMainPage)
+ http.HandleFunc("/sign", handleSign)
+ appengine.Main()
+}
+
+// guestbookKey returns the key used for all guestbook entries.
+func guestbookKey(ctx context.Context) *datastore.Key {
+ // The string "default_guestbook" here could be varied to have multiple guestbooks.
+ return datastore.NewKey(ctx, "Guestbook", "default_guestbook", 0, nil)
+}
+
+var tpl = template.Must(template.ParseGlob("templates/*.html"))
+
+func handleMainPage(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "GET" {
+ http.Error(w, "GET requests only", http.StatusMethodNotAllowed)
+ return
+ }
+ if r.URL.Path != "/" {
+ http.NotFound(w, r)
+ return
+ }
+
+ ctx := appengine.NewContext(r)
+ tic := time.Now()
+ q := datastore.NewQuery("Greeting").Ancestor(guestbookKey(ctx)).Order("-Date").Limit(10)
+ var gg []*Greeting
+ if _, err := q.GetAll(ctx, &gg); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ log.Errorf(ctx, "GetAll: %v", err)
+ return
+ }
+ log.Infof(ctx, "Datastore lookup took %s", time.Since(tic).String())
+ log.Infof(ctx, "Rendering %d greetings", len(gg))
+
+ var email, logout, login string
+ if u := user.Current(ctx); u != nil {
+ logout, _ = user.LogoutURL(ctx, "/")
+ email = u.Email
+ } else {
+ login, _ = user.LoginURL(ctx, "/")
+ }
+ data := struct {
+ Greetings []*Greeting
+ Login, Logout, Email string
+ }{
+ Greetings: gg,
+ Login: login,
+ Logout: logout,
+ Email: email,
+ }
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+ if err := tpl.ExecuteTemplate(w, "guestbook.html", data); err != nil {
+ log.Errorf(ctx, "%v", err)
+ }
+}
+
+func handleSign(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "POST" {
+ http.Error(w, "POST requests only", http.StatusMethodNotAllowed)
+ return
+ }
+ ctx := appengine.NewContext(r)
+ g := &Greeting{
+ Content: r.FormValue("content"),
+ Date: time.Now(),
+ }
+ if u := user.Current(ctx); u != nil {
+ g.Author = u.String()
+ }
+ key := datastore.NewIncompleteKey(ctx, "Greeting", guestbookKey(ctx))
+ if _, err := datastore.Put(ctx, key, g); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ // Redirect with 303 which causes the subsequent request to use GET.
+ http.Redirect(w, r, "/", http.StatusSeeOther)
+}
diff --git a/vendor/google.golang.org/appengine/demos/guestbook/index.yaml b/vendor/google.golang.org/appengine/demos/guestbook/index.yaml
new file mode 100644
index 000000000..315ffeb0e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/guestbook/index.yaml
@@ -0,0 +1,7 @@
+indexes:
+
+- kind: Greeting
+ ancestor: yes
+ properties:
+ - name: Date
+ direction: desc
diff --git a/vendor/google.golang.org/appengine/demos/guestbook/templates/guestbook.html b/vendor/google.golang.org/appengine/demos/guestbook/templates/guestbook.html
new file mode 100644
index 000000000..322b7cf63
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/guestbook/templates/guestbook.html
@@ -0,0 +1,26 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>Guestbook Demo</title>
+ </head>
+ <body>
+ <p>
+ {{with .Email}}You are currently logged in as {{.}}.{{end}}
+ {{with .Login}}<a href="{{.}}">Sign in</a>{{end}}
+ {{with .Logout}}<a href="{{.}}">Sign out</a>{{end}}
+ </p>
+
+ {{range .Greetings }}
+ <p>
+ {{with .Author}}<b>{{.}}</b>{{else}}An anonymous person{{end}}
+ on <em>{{.Date.Format "3:04pm, Mon 2 Jan"}}</em>
+ wrote <blockquote>{{.Content}}</blockquote>
+ </p>
+ {{end}}
+
+ <form action="/sign" method="post">
+ <div><textarea name="content" rows="3" cols="60"></textarea></div>
+ <div><input type="submit" value="Sign Guestbook"></div>
+ </form>
+ </body>
+</html>
diff --git a/vendor/google.golang.org/appengine/demos/helloworld/app.yaml b/vendor/google.golang.org/appengine/demos/helloworld/app.yaml
new file mode 100644
index 000000000..15091192f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/helloworld/app.yaml
@@ -0,0 +1,10 @@
+runtime: go
+api_version: go1
+vm: true
+
+handlers:
+- url: /favicon.ico
+ static_files: favicon.ico
+ upload: favicon.ico
+- url: /.*
+ script: _go_app
diff --git a/vendor/google.golang.org/appengine/demos/helloworld/favicon.ico b/vendor/google.golang.org/appengine/demos/helloworld/favicon.ico
new file mode 100644
index 000000000..f19c04d27
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/helloworld/favicon.ico
Binary files differ
diff --git a/vendor/google.golang.org/appengine/demos/helloworld/helloworld.go b/vendor/google.golang.org/appengine/demos/helloworld/helloworld.go
new file mode 100644
index 000000000..fbe9f56ed
--- /dev/null
+++ b/vendor/google.golang.org/appengine/demos/helloworld/helloworld.go
@@ -0,0 +1,50 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// This example only works on App Engine "flexible environment".
+// +build !appengine
+
+package main
+
+import (
+ "html/template"
+ "net/http"
+ "time"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/log"
+)
+
+var initTime = time.Now()
+
+func main() {
+ http.HandleFunc("/", handle)
+ appengine.Main()
+}
+
+func handle(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path != "/" {
+ http.NotFound(w, r)
+ return
+ }
+
+ ctx := appengine.NewContext(r)
+ log.Infof(ctx, "Serving the front page.")
+
+ tmpl.Execute(w, time.Since(initTime))
+}
+
+var tmpl = template.Must(template.New("front").Parse(`
+<html><body>
+
+<p>
+Hello, World! 세상아 안녕!
+</p>
+
+<p>
+This instance has been running for <em>{{.}}</em>.
+</p>
+
+</body></html>
+`))
diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go
new file mode 100644
index 000000000..16d0772e2
--- /dev/null
+++ b/vendor/google.golang.org/appengine/errors.go
@@ -0,0 +1,46 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// This file provides error functions for common API failure modes.
+
+package appengine
+
+import (
+ "fmt"
+
+ "google.golang.org/appengine/internal"
+)
+
+// IsOverQuota reports whether err represents an API call failure
+// due to insufficient available quota.
+func IsOverQuota(err error) bool {
+ callErr, ok := err.(*internal.CallError)
+ return ok && callErr.Code == 4
+}
+
+// MultiError is returned by batch operations when there are errors with
+// particular elements. Errors will be in a one-to-one correspondence with
+// the input elements; successful elements will have a nil entry.
+type MultiError []error
+
+func (m MultiError) Error() string {
+ s, n := "", 0
+ for _, e := range m {
+ if e != nil {
+ if n == 0 {
+ s = e.Error()
+ }
+ n++
+ }
+ }
+ switch n {
+ case 0:
+ return "(0 errors)"
+ case 1:
+ return s
+ case 2:
+ return s + " (and 1 other error)"
+ }
+ return fmt.Sprintf("%s (and %d other errors)", s, n-1)
+}
diff --git a/vendor/google.golang.org/appengine/file/file.go b/vendor/google.golang.org/appengine/file/file.go
new file mode 100644
index 000000000..c3cd58baf
--- /dev/null
+++ b/vendor/google.golang.org/appengine/file/file.go
@@ -0,0 +1,28 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package file provides helper functions for using Google Cloud Storage.
+package file
+
+import (
+ "fmt"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ aipb "google.golang.org/appengine/internal/app_identity"
+)
+
+// DefaultBucketName returns the name of this application's
+// default Google Cloud Storage bucket.
+func DefaultBucketName(c context.Context) (string, error) {
+ req := &aipb.GetDefaultGcsBucketNameRequest{}
+ res := &aipb.GetDefaultGcsBucketNameResponse{}
+
+ err := internal.Call(c, "app_identity_service", "GetDefaultGcsBucketName", req, res)
+ if err != nil {
+ return "", fmt.Errorf("file: no default bucket name returned in RPC response: %v", res)
+ }
+ return res.GetDefaultGcsBucketName(), nil
+}
diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go
new file mode 100644
index 000000000..b8dcf8f36
--- /dev/null
+++ b/vendor/google.golang.org/appengine/identity.go
@@ -0,0 +1,142 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+ "time"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/app_identity"
+ modpb "google.golang.org/appengine/internal/modules"
+)
+
+// AppID returns the application ID for the current application.
+// The string will be a plain application ID (e.g. "appid"), with a
+// domain prefix for custom domain deployments (e.g. "example.com:appid").
+func AppID(c context.Context) string { return internal.AppID(c) }
+
+// DefaultVersionHostname returns the standard hostname of the default version
+// of the current application (e.g. "my-app.appspot.com"). This is suitable for
+// use in constructing URLs.
+func DefaultVersionHostname(c context.Context) string {
+ return internal.DefaultVersionHostname(c)
+}
+
+// ModuleName returns the module name of the current instance.
+func ModuleName(c context.Context) string {
+ return internal.ModuleName(c)
+}
+
+// ModuleHostname returns a hostname of a module instance.
+// If module is the empty string, it refers to the module of the current instance.
+// If version is empty, it refers to the version of the current instance if valid,
+// or the default version of the module of the current instance.
+// If instance is empty, ModuleHostname returns the load-balancing hostname.
+func ModuleHostname(c context.Context, module, version, instance string) (string, error) {
+ req := &modpb.GetHostnameRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ if instance != "" {
+ req.Instance = &instance
+ }
+ res := &modpb.GetHostnameResponse{}
+ if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil {
+ return "", err
+ }
+ return *res.Hostname, nil
+}
+
+// VersionID returns the version ID for the current application.
+// It will be of the form "X.Y", where X is specified in app.yaml,
+// and Y is a number generated when each version of the app is uploaded.
+// It does not include a module name.
+func VersionID(c context.Context) string { return internal.VersionID(c) }
+
+// InstanceID returns a mostly-unique identifier for this instance.
+func InstanceID() string { return internal.InstanceID() }
+
+// Datacenter returns an identifier for the datacenter that the instance is running in.
+func Datacenter(c context.Context) string { return internal.Datacenter(c) }
+
+// ServerSoftware returns the App Engine release version.
+// In production, it looks like "Google App Engine/X.Y.Z".
+// In the development appserver, it looks like "Development/X.Y".
+func ServerSoftware() string { return internal.ServerSoftware() }
+
+// RequestID returns a string that uniquely identifies the request.
+func RequestID(c context.Context) string { return internal.RequestID(c) }
+
+// AccessToken generates an OAuth2 access token for the specified scopes on
+// behalf of service account of this application. This token will expire after
+// the returned time.
+func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) {
+ req := &pb.GetAccessTokenRequest{Scope: scopes}
+ res := &pb.GetAccessTokenResponse{}
+
+ err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res)
+ if err != nil {
+ return "", time.Time{}, err
+ }
+ return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil
+}
+
+// Certificate represents a public certificate for the app.
+type Certificate struct {
+ KeyName string
+ Data []byte // PEM-encoded X.509 certificate
+}
+
+// PublicCertificates retrieves the public certificates for the app.
+// They can be used to verify a signature returned by SignBytes.
+func PublicCertificates(c context.Context) ([]Certificate, error) {
+ req := &pb.GetPublicCertificateForAppRequest{}
+ res := &pb.GetPublicCertificateForAppResponse{}
+ if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil {
+ return nil, err
+ }
+ var cs []Certificate
+ for _, pc := range res.PublicCertificateList {
+ cs = append(cs, Certificate{
+ KeyName: pc.GetKeyName(),
+ Data: []byte(pc.GetX509CertificatePem()),
+ })
+ }
+ return cs, nil
+}
+
+// ServiceAccount returns a string representing the service account name, in
+// the form of an email address (typically app_id@appspot.gserviceaccount.com).
+func ServiceAccount(c context.Context) (string, error) {
+ req := &pb.GetServiceAccountNameRequest{}
+ res := &pb.GetServiceAccountNameResponse{}
+
+ err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res)
+ if err != nil {
+ return "", err
+ }
+ return res.GetServiceAccountName(), err
+}
+
+// SignBytes signs bytes using a private key unique to your application.
+func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) {
+ req := &pb.SignForAppRequest{BytesToSign: bytes}
+ res := &pb.SignForAppResponse{}
+
+ if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil {
+ return "", nil, err
+ }
+ return res.GetKeyName(), res.GetSignatureBytes(), nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name)
+ internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/image/image.go b/vendor/google.golang.org/appengine/image/image.go
new file mode 100644
index 000000000..027a41b70
--- /dev/null
+++ b/vendor/google.golang.org/appengine/image/image.go
@@ -0,0 +1,67 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package image provides image services.
+package image // import "google.golang.org/appengine/image"
+
+import (
+ "fmt"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/image"
+)
+
+type ServingURLOptions struct {
+ Secure bool // whether the URL should use HTTPS
+
+ // Size must be between zero and 1600.
+ // If Size is non-zero, a resized version of the image is served,
+ // and Size is the served image's longest dimension. The aspect ratio is preserved.
+ // If Crop is true the image is cropped from the center instead of being resized.
+ Size int
+ Crop bool
+}
+
+// ServingURL returns a URL that will serve an image from Blobstore.
+func ServingURL(c context.Context, key appengine.BlobKey, opts *ServingURLOptions) (*url.URL, error) {
+ req := &pb.ImagesGetUrlBaseRequest{
+ BlobKey: (*string)(&key),
+ }
+ if opts != nil && opts.Secure {
+ req.CreateSecureUrl = &opts.Secure
+ }
+ res := &pb.ImagesGetUrlBaseResponse{}
+ if err := internal.Call(c, "images", "GetUrlBase", req, res); err != nil {
+ return nil, err
+ }
+
+ // The URL may have suffixes added to dynamically resize or crop:
+ // - adding "=s32" will serve the image resized to 32 pixels, preserving the aspect ratio.
+ // - adding "=s32-c" is the same as "=s32" except it will be cropped.
+ u := *res.Url
+ if opts != nil && opts.Size > 0 {
+ u += fmt.Sprintf("=s%d", opts.Size)
+ if opts.Crop {
+ u += "-c"
+ }
+ }
+ return url.Parse(u)
+}
+
+// DeleteServingURL deletes the serving URL for an image.
+func DeleteServingURL(c context.Context, key appengine.BlobKey) error {
+ req := &pb.ImagesDeleteUrlBaseRequest{
+ BlobKey: (*string)(&key),
+ }
+ res := &pb.ImagesDeleteUrlBaseResponse{}
+ return internal.Call(c, "images", "DeleteUrlBase", req, res)
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("images", pb.ImagesServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/internal/aetesting/fake.go b/vendor/google.golang.org/appengine/internal/aetesting/fake.go
new file mode 100644
index 000000000..eb5b2c65b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/aetesting/fake.go
@@ -0,0 +1,81 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package aetesting provides utilities for testing App Engine packages.
+// This is not for testing user applications.
+package aetesting
+
+import (
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// FakeSingleContext returns a context whose Call invocations will be serviced
+// by f, which should be a function that has two arguments of the input and output
+// protocol buffer type, and one error return.
+func FakeSingleContext(t *testing.T, service, method string, f interface{}) context.Context {
+ fv := reflect.ValueOf(f)
+ if fv.Kind() != reflect.Func {
+ t.Fatal("not a function")
+ }
+ ft := fv.Type()
+ if ft.NumIn() != 2 || ft.NumOut() != 1 {
+ t.Fatalf("f has %d in and %d out, want 2 in and 1 out", ft.NumIn(), ft.NumOut())
+ }
+ for i := 0; i < 2; i++ {
+ at := ft.In(i)
+ if !at.Implements(protoMessageType) {
+ t.Fatalf("arg %d does not implement proto.Message", i)
+ }
+ }
+ if ft.Out(0) != errorType {
+ t.Fatalf("f's return is %v, want error", ft.Out(0))
+ }
+ s := &single{
+ t: t,
+ service: service,
+ method: method,
+ f: fv,
+ }
+ return internal.WithCallOverride(internal.ContextForTesting(&http.Request{}), s.call)
+}
+
+var (
+ protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
+ errorType = reflect.TypeOf((*error)(nil)).Elem()
+)
+
+type single struct {
+ t *testing.T
+ service, method string
+ f reflect.Value
+}
+
+func (s *single) call(ctx context.Context, service, method string, in, out proto.Message) error {
+ if service == "__go__" {
+ if method == "GetNamespace" {
+ return nil // always yield an empty namespace
+ }
+ return fmt.Errorf("Unknown API call /%s.%s", service, method)
+ }
+ if service != s.service || method != s.method {
+ s.t.Fatalf("Unexpected call to /%s.%s", service, method)
+ }
+ ins := []reflect.Value{
+ reflect.ValueOf(in),
+ reflect.ValueOf(out),
+ }
+ outs := s.f.Call(ins)
+ if outs[0].IsNil() {
+ return nil
+ }
+ return outs[0].Interface().(error)
+}
diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go
new file mode 100644
index 000000000..16f87c5d3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api.go
@@ -0,0 +1,660 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build go1.7
+
+package internal
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+
+ basepb "google.golang.org/appengine/internal/base"
+ logpb "google.golang.org/appengine/internal/log"
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+const (
+ apiPath = "/rpc_http"
+ defaultTicketSuffix = "/default.20150612t184001.0"
+)
+
+var (
+ // Incoming headers.
+ ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
+ dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
+ traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
+ curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
+ userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP")
+ remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
+
+ // Outgoing headers.
+ apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
+ apiEndpointHeaderValue = []string{"app-engine-apis"}
+ apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
+ apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"}
+ apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
+ apiContentType = http.CanonicalHeaderKey("Content-Type")
+ apiContentTypeValue = []string{"application/octet-stream"}
+ logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
+
+ apiHTTPClient = &http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: limitDial,
+ },
+ }
+
+ defaultTicketOnce sync.Once
+ defaultTicket string
+ backgroundContextOnce sync.Once
+ backgroundContext netcontext.Context
+)
+
+func apiURL() *url.URL {
+ host, port := "appengine.googleapis.internal", "10001"
+ if h := os.Getenv("API_HOST"); h != "" {
+ host = h
+ }
+ if p := os.Getenv("API_PORT"); p != "" {
+ port = p
+ }
+ return &url.URL{
+ Scheme: "http",
+ Host: host + ":" + port,
+ Path: apiPath,
+ }
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+ c := &context{
+ req: r,
+ outHeader: w.Header(),
+ apiURL: apiURL(),
+ }
+ r = r.WithContext(withContext(r.Context(), c))
+ c.req = r
+
+ stopFlushing := make(chan int)
+
+ // Patch up RemoteAddr so it looks reasonable.
+ if addr := r.Header.Get(userIPHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else {
+ // Should not normally reach here, but pick a sensible default anyway.
+ r.RemoteAddr = "127.0.0.1"
+ }
+ // The address in the headers will most likely be of these forms:
+ // 123.123.123.123
+ // 2001:db8::1
+ // net/http.Request.RemoteAddr is specified to be in "IP:port" form.
+ if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
+ // Assume the remote address is only a host; add a default port.
+ r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
+ }
+
+ // Start goroutine responsible for flushing app logs.
+ // This is done after adding c to ctx.m (and stopped before removing it)
+ // because flushing logs requires making an API call.
+ go c.logFlusher(stopFlushing)
+
+ executeRequestSafely(c, r)
+ c.outHeader = nil // make sure header changes aren't respected any more
+
+ stopFlushing <- 1 // any logging beyond this point will be dropped
+
+ // Flush any pending logs asynchronously.
+ c.pendingLogs.Lock()
+ flushes := c.pendingLogs.flushes
+ if len(c.pendingLogs.lines) > 0 {
+ flushes++
+ }
+ c.pendingLogs.Unlock()
+ go c.flushLog(false)
+ w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
+
+ // Avoid nil Write call if c.Write is never called.
+ if c.outCode != 0 {
+ w.WriteHeader(c.outCode)
+ }
+ if c.outBody != nil {
+ w.Write(c.outBody)
+ }
+}
+
+func executeRequestSafely(c *context, r *http.Request) {
+ defer func() {
+ if x := recover(); x != nil {
+ logf(c, 4, "%s", renderPanic(x)) // 4 == critical
+ c.outCode = 500
+ }
+ }()
+
+ http.DefaultServeMux.ServeHTTP(c, r)
+}
+
+func renderPanic(x interface{}) string {
+ buf := make([]byte, 16<<10) // 16 KB should be plenty
+ buf = buf[:runtime.Stack(buf, false)]
+
+ // Remove the first few stack frames:
+ // this func
+ // the recover closure in the caller
+ // That will root the stack trace at the site of the panic.
+ const (
+ skipStart = "internal.renderPanic"
+ skipFrames = 2
+ )
+ start := bytes.Index(buf, []byte(skipStart))
+ p := start
+ for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
+ p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
+ if p < 0 {
+ break
+ }
+ }
+ if p >= 0 {
+ // buf[start:p+1] is the block to remove.
+ // Copy buf[p+1:] over buf[start:] and shrink buf.
+ copy(buf[start:], buf[p+1:])
+ buf = buf[:len(buf)-(p+1-start)]
+ }
+
+ // Add panic heading.
+ head := fmt.Sprintf("panic: %v\n\n", x)
+ if len(head) > len(buf) {
+ // Extremely unlikely to happen.
+ return head
+ }
+ copy(buf[len(head):], buf)
+ copy(buf, head)
+
+ return string(buf)
+}
+
+// context represents the context of an in-flight HTTP request.
+// It implements the appengine.Context and http.ResponseWriter interfaces.
+type context struct {
+ req *http.Request
+
+ outCode int
+ outHeader http.Header
+ outBody []byte
+
+ pendingLogs struct {
+ sync.Mutex
+ lines []*logpb.UserAppLogLine
+ flushes int
+ }
+
+ apiURL *url.URL
+}
+
+var contextKey = "holds a *context"
+
+// jointContext joins two contexts in a superficial way.
+// It takes values and timeouts from a base context, and only values from another context.
+type jointContext struct {
+ base netcontext.Context
+ valuesOnly netcontext.Context
+}
+
+func (c jointContext) Deadline() (time.Time, bool) {
+ return c.base.Deadline()
+}
+
+func (c jointContext) Done() <-chan struct{} {
+ return c.base.Done()
+}
+
+func (c jointContext) Err() error {
+ return c.base.Err()
+}
+
+func (c jointContext) Value(key interface{}) interface{} {
+ if val := c.base.Value(key); val != nil {
+ return val
+ }
+ return c.valuesOnly.Value(key)
+}
+
+// fromContext returns the App Engine context or nil if ctx is not
+// derived from an App Engine context.
+func fromContext(ctx netcontext.Context) *context {
+ c, _ := ctx.Value(&contextKey).(*context)
+ return c
+}
+
+func withContext(parent netcontext.Context, c *context) netcontext.Context {
+ ctx := netcontext.WithValue(parent, &contextKey, c)
+ if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
+ ctx = withNamespace(ctx, ns)
+ }
+ return ctx
+}
+
+func toContext(c *context) netcontext.Context {
+ return withContext(netcontext.Background(), c)
+}
+
+func IncomingHeaders(ctx netcontext.Context) http.Header {
+ if c := fromContext(ctx); c != nil {
+ return c.req.Header
+ }
+ return nil
+}
+
+func ReqContext(req *http.Request) netcontext.Context {
+ return req.Context()
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+ return jointContext{
+ base: parent,
+ valuesOnly: req.Context(),
+ }
+}
+
+// DefaultTicket returns a ticket used for background context or dev_appserver.
+func DefaultTicket() string {
+ defaultTicketOnce.Do(func() {
+ if IsDevAppServer() {
+ defaultTicket = "testapp" + defaultTicketSuffix
+ return
+ }
+ appID := partitionlessAppID()
+ escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
+ majVersion := VersionID(nil)
+ if i := strings.Index(majVersion, "."); i > 0 {
+ majVersion = majVersion[:i]
+ }
+ defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
+ })
+ return defaultTicket
+}
+
+func BackgroundContext() netcontext.Context {
+ backgroundContextOnce.Do(func() {
+ // Compute background security ticket.
+ ticket := DefaultTicket()
+
+ c := &context{
+ req: &http.Request{
+ Header: http.Header{
+ ticketHeader: []string{ticket},
+ },
+ },
+ apiURL: apiURL(),
+ }
+ backgroundContext = toContext(c)
+
+ // TODO(dsymonds): Wire up the shutdown handler to do a final flush.
+ go c.logFlusher(make(chan int))
+ })
+
+ return backgroundContext
+}
+
+// RegisterTestRequest registers the HTTP request req for testing, such that
+// any API calls are sent to the provided URL. It returns a closure to delete
+// the registration.
+// It should only be used by aetest package.
+func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) {
+ c := &context{
+ req: req,
+ apiURL: apiURL,
+ }
+ ctx := withContext(decorate(req.Context()), c)
+ req = req.WithContext(ctx)
+ c.req = req
+ return req, func() {}
+}
+
+var errTimeout = &CallError{
+ Detail: "Deadline exceeded",
+ Code: int32(remotepb.RpcError_CANCELLED),
+ Timeout: true,
+}
+
+func (c *context) Header() http.Header { return c.outHeader }
+
+// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
+// codes do not permit a response body (nor response entity headers such as
+// Content-Length, Content-Type, etc).
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+func (c *context) Write(b []byte) (int, error) {
+ if c.outCode == 0 {
+ c.WriteHeader(http.StatusOK)
+ }
+ if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
+ return 0, http.ErrBodyNotAllowed
+ }
+ c.outBody = append(c.outBody, b...)
+ return len(b), nil
+}
+
+func (c *context) WriteHeader(code int) {
+ if c.outCode != 0 {
+ logf(c, 3, "WriteHeader called multiple times on request.") // error level
+ return
+ }
+ c.outCode = code
+}
+
+func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
+ hreq := &http.Request{
+ Method: "POST",
+ URL: c.apiURL,
+ Header: http.Header{
+ apiEndpointHeader: apiEndpointHeaderValue,
+ apiMethodHeader: apiMethodHeaderValue,
+ apiContentType: apiContentTypeValue,
+ apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
+ },
+ Body: ioutil.NopCloser(bytes.NewReader(body)),
+ ContentLength: int64(len(body)),
+ Host: c.apiURL.Host,
+ }
+ if info := c.req.Header.Get(dapperHeader); info != "" {
+ hreq.Header.Set(dapperHeader, info)
+ }
+ if info := c.req.Header.Get(traceHeader); info != "" {
+ hreq.Header.Set(traceHeader, info)
+ }
+
+ tr := apiHTTPClient.Transport.(*http.Transport)
+
+ var timedOut int32 // atomic; set to 1 if timed out
+ t := time.AfterFunc(timeout, func() {
+ atomic.StoreInt32(&timedOut, 1)
+ tr.CancelRequest(hreq)
+ })
+ defer t.Stop()
+ defer func() {
+ // Check if timeout was exceeded.
+ if atomic.LoadInt32(&timedOut) != 0 {
+ err = errTimeout
+ }
+ }()
+
+ hresp, err := apiHTTPClient.Do(hreq)
+ if err != nil {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ defer hresp.Body.Close()
+ hrespBody, err := ioutil.ReadAll(hresp.Body)
+ if hresp.StatusCode != 200 {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ if err != nil {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge response bad: %v", err),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ return hrespBody, nil
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+ if ns := NamespaceFromContext(ctx); ns != "" {
+ if fn, ok := NamespaceMods[service]; ok {
+ fn(in, ns)
+ }
+ }
+
+ if f, ctx, ok := callOverrideFromContext(ctx); ok {
+ return f(ctx, service, method, in, out)
+ }
+
+ // Handle already-done contexts quickly.
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ c := fromContext(ctx)
+ if c == nil {
+ // Give a good error message rather than a panic lower down.
+ return errNotAppEngineContext
+ }
+
+ // Apply transaction modifications if we're in a transaction.
+ if t := transactionFromContext(ctx); t != nil {
+ if t.finished {
+ return errors.New("transaction context has expired")
+ }
+ applyTransaction(in, &t.transaction)
+ }
+
+ // Default RPC timeout is 60s.
+ timeout := 60 * time.Second
+ if deadline, ok := ctx.Deadline(); ok {
+ timeout = deadline.Sub(time.Now())
+ }
+
+ data, err := proto.Marshal(in)
+ if err != nil {
+ return err
+ }
+
+ ticket := c.req.Header.Get(ticketHeader)
+ // Use a test ticket under test environment.
+ if ticket == "" {
+ if appid := ctx.Value(&appIDOverrideKey); appid != nil {
+ ticket = appid.(string) + defaultTicketSuffix
+ }
+ }
+ // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
+ if ticket == "" {
+ ticket = DefaultTicket()
+ }
+ req := &remotepb.Request{
+ ServiceName: &service,
+ Method: &method,
+ Request: data,
+ RequestId: &ticket,
+ }
+ hreqBody, err := proto.Marshal(req)
+ if err != nil {
+ return err
+ }
+
+ hrespBody, err := c.post(hreqBody, timeout)
+ if err != nil {
+ return err
+ }
+
+ res := &remotepb.Response{}
+ if err := proto.Unmarshal(hrespBody, res); err != nil {
+ return err
+ }
+ if res.RpcError != nil {
+ ce := &CallError{
+ Detail: res.RpcError.GetDetail(),
+ Code: *res.RpcError.Code,
+ }
+ switch remotepb.RpcError_ErrorCode(ce.Code) {
+ case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
+ ce.Timeout = true
+ }
+ return ce
+ }
+ if res.ApplicationError != nil {
+ return &APIError{
+ Service: *req.ServiceName,
+ Detail: res.ApplicationError.GetDetail(),
+ Code: *res.ApplicationError.Code,
+ }
+ }
+ if res.Exception != nil || res.JavaException != nil {
+ // This shouldn't happen, but let's be defensive.
+ return &CallError{
+ Detail: "service bridge returned exception",
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ return proto.Unmarshal(res.Response, out)
+}
+
+func (c *context) Request() *http.Request {
+ return c.req
+}
+
+func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
+ // Truncate long log lines.
+ // TODO(dsymonds): Check if this is still necessary.
+ const lim = 8 << 10
+ if len(*ll.Message) > lim {
+ suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
+ ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
+ }
+
+ c.pendingLogs.Lock()
+ c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
+ c.pendingLogs.Unlock()
+}
+
+var logLevelName = map[int64]string{
+ 0: "DEBUG",
+ 1: "INFO",
+ 2: "WARNING",
+ 3: "ERROR",
+ 4: "CRITICAL",
+}
+
+func logf(c *context, level int64, format string, args ...interface{}) {
+ if c == nil {
+ panic("not an App Engine context")
+ }
+ s := fmt.Sprintf(format, args...)
+ s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
+ c.addLogLine(&logpb.UserAppLogLine{
+ TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
+ Level: &level,
+ Message: &s,
+ })
+ log.Print(logLevelName[level] + ": " + s)
+}
+
+// flushLog attempts to flush any pending logs to the appserver.
+// It should not be called concurrently.
+func (c *context) flushLog(force bool) (flushed bool) {
+ c.pendingLogs.Lock()
+ // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
+ n, rem := 0, 30<<20
+ for ; n < len(c.pendingLogs.lines); n++ {
+ ll := c.pendingLogs.lines[n]
+ // Each log line will require about 3 bytes of overhead.
+ nb := proto.Size(ll) + 3
+ if nb > rem {
+ break
+ }
+ rem -= nb
+ }
+ lines := c.pendingLogs.lines[:n]
+ c.pendingLogs.lines = c.pendingLogs.lines[n:]
+ c.pendingLogs.Unlock()
+
+ if len(lines) == 0 && !force {
+ // Nothing to flush.
+ return false
+ }
+
+ rescueLogs := false
+ defer func() {
+ if rescueLogs {
+ c.pendingLogs.Lock()
+ c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
+ c.pendingLogs.Unlock()
+ }
+ }()
+
+ buf, err := proto.Marshal(&logpb.UserAppLogGroup{
+ LogLine: lines,
+ })
+ if err != nil {
+ log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
+ rescueLogs = true
+ return false
+ }
+
+ req := &logpb.FlushRequest{
+ Logs: buf,
+ }
+ res := &basepb.VoidProto{}
+ c.pendingLogs.Lock()
+ c.pendingLogs.flushes++
+ c.pendingLogs.Unlock()
+ if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
+ log.Printf("internal.flushLog: Flush RPC: %v", err)
+ rescueLogs = true
+ return false
+ }
+ return true
+}
+
+const (
+ // Log flushing parameters.
+ flushInterval = 1 * time.Second
+ forceFlushInterval = 60 * time.Second
+)
+
+func (c *context) logFlusher(stop <-chan int) {
+ lastFlush := time.Now()
+ tick := time.NewTicker(flushInterval)
+ for {
+ select {
+ case <-stop:
+ // Request finished.
+ tick.Stop()
+ return
+ case <-tick.C:
+ force := time.Now().Sub(lastFlush) > forceFlushInterval
+ if c.flushLog(force) {
+ lastFlush = time.Now()
+ }
+ }
+ }
+}
+
+func ContextForTesting(req *http.Request) netcontext.Context {
+ return toContext(&context{req: req})
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go
new file mode 100644
index 000000000..f0f40b2e3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_classic.go
@@ -0,0 +1,169 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "time"
+
+ "appengine"
+ "appengine_internal"
+ basepb "appengine_internal/base"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+)
+
+var contextKey = "holds an appengine.Context"
+
+// fromContext returns the App Engine context or nil if ctx is not
+// derived from an App Engine context.
+func fromContext(ctx netcontext.Context) appengine.Context {
+ c, _ := ctx.Value(&contextKey).(appengine.Context)
+ return c
+}
+
+// This is only for classic App Engine adapters.
+func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) {
+ c := fromContext(ctx)
+ if c == nil {
+ return nil, errNotAppEngineContext
+ }
+ return c, nil
+}
+
+func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {
+ ctx := netcontext.WithValue(parent, &contextKey, c)
+
+ s := &basepb.StringProto{}
+ c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
+ if ns := s.GetValue(); ns != "" {
+ ctx = NamespacedContext(ctx, ns)
+ }
+
+ return ctx
+}
+
+func IncomingHeaders(ctx netcontext.Context) http.Header {
+ if c := fromContext(ctx); c != nil {
+ if req, ok := c.Request().(*http.Request); ok {
+ return req.Header
+ }
+ }
+ return nil
+}
+
+func ReqContext(req *http.Request) netcontext.Context {
+ return WithContext(netcontext.Background(), req)
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+ c := appengine.NewContext(req)
+ return withContext(parent, c)
+}
+
+type testingContext struct {
+ appengine.Context
+
+ req *http.Request
+}
+
+func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" }
+func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error {
+ if service == "__go__" && method == "GetNamespace" {
+ return nil
+ }
+ return fmt.Errorf("testingContext: unsupported Call")
+}
+func (t *testingContext) Request() interface{} { return t.req }
+
+func ContextForTesting(req *http.Request) netcontext.Context {
+ return withContext(netcontext.Background(), &testingContext{req: req})
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+ if ns := NamespaceFromContext(ctx); ns != "" {
+ if fn, ok := NamespaceMods[service]; ok {
+ fn(in, ns)
+ }
+ }
+
+ if f, ctx, ok := callOverrideFromContext(ctx); ok {
+ return f(ctx, service, method, in, out)
+ }
+
+ // Handle already-done contexts quickly.
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ c := fromContext(ctx)
+ if c == nil {
+ // Give a good error message rather than a panic lower down.
+ return errNotAppEngineContext
+ }
+
+ // Apply transaction modifications if we're in a transaction.
+ if t := transactionFromContext(ctx); t != nil {
+ if t.finished {
+ return errors.New("transaction context has expired")
+ }
+ applyTransaction(in, &t.transaction)
+ }
+
+ var opts *appengine_internal.CallOptions
+ if d, ok := ctx.Deadline(); ok {
+ opts = &appengine_internal.CallOptions{
+ Timeout: d.Sub(time.Now()),
+ }
+ }
+
+ err := c.Call(service, method, in, out, opts)
+ switch v := err.(type) {
+ case *appengine_internal.APIError:
+ return &APIError{
+ Service: v.Service,
+ Detail: v.Detail,
+ Code: v.Code,
+ }
+ case *appengine_internal.CallError:
+ return &CallError{
+ Detail: v.Detail,
+ Code: v.Code,
+ Timeout: v.Timeout,
+ }
+ }
+ return err
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+ panic("handleHTTP called; this should be impossible")
+}
+
+func logf(c appengine.Context, level int64, format string, args ...interface{}) {
+ var fn func(format string, args ...interface{})
+ switch level {
+ case 0:
+ fn = c.Debugf
+ case 1:
+ fn = c.Infof
+ case 2:
+ fn = c.Warningf
+ case 3:
+ fn = c.Errorf
+ case 4:
+ fn = c.Criticalf
+ default:
+ // This shouldn't happen.
+ fn = c.Criticalf
+ }
+ fn(format, args...)
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go
new file mode 100644
index 000000000..e0c0b214b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_common.go
@@ -0,0 +1,123 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "errors"
+ "os"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+)
+
+var errNotAppEngineContext = errors.New("not an App Engine context")
+
+type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error
+
+var callOverrideKey = "holds []CallOverrideFunc"
+
+func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context {
+ // We avoid appending to any existing call override
+ // so we don't risk overwriting a popped stack below.
+ var cofs []CallOverrideFunc
+ if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok {
+ cofs = append(cofs, uf...)
+ }
+ cofs = append(cofs, f)
+ return netcontext.WithValue(ctx, &callOverrideKey, cofs)
+}
+
+func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) {
+ cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
+ if len(cofs) == 0 {
+ return nil, nil, false
+ }
+ // We found a list of overrides; grab the last, and reconstitute a
+ // context that will hide it.
+ f := cofs[len(cofs)-1]
+ ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
+ return f, ctx, true
+}
+
+type logOverrideFunc func(level int64, format string, args ...interface{})
+
+var logOverrideKey = "holds a logOverrideFunc"
+
+func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context {
+ return netcontext.WithValue(ctx, &logOverrideKey, f)
+}
+
+var appIDOverrideKey = "holds a string, being the full app ID"
+
+func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context {
+ return netcontext.WithValue(ctx, &appIDOverrideKey, appID)
+}
+
+var namespaceKey = "holds the namespace string"
+
+func withNamespace(ctx netcontext.Context, ns string) netcontext.Context {
+ return netcontext.WithValue(ctx, &namespaceKey, ns)
+}
+
+func NamespaceFromContext(ctx netcontext.Context) string {
+ // If there's no namespace, return the empty string.
+ ns, _ := ctx.Value(&namespaceKey).(string)
+ return ns
+}
+
+// FullyQualifiedAppID returns the fully-qualified application ID.
+// This may contain a partition prefix (e.g. "s~" for High Replication apps),
+// or a domain prefix (e.g. "example.com:").
+func FullyQualifiedAppID(ctx netcontext.Context) string {
+ if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
+ return id
+ }
+ return fullyQualifiedAppID(ctx)
+}
+
+func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {
+ if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
+ f(level, format, args...)
+ return
+ }
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ logf(c, level, format, args...)
+}
+
+// NamespacedContext wraps a Context to support namespaces.
+func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
+ return withNamespace(ctx, namespace)
+}
+
+// SetTestEnv sets the env variables for testing background ticket in Flex.
+func SetTestEnv() func() {
+ var environ = []struct {
+ key, value string
+ }{
+ {"GAE_LONG_APP_ID", "my-app-id"},
+ {"GAE_MINOR_VERSION", "067924799508853122"},
+ {"GAE_MODULE_INSTANCE", "0"},
+ {"GAE_MODULE_NAME", "default"},
+ {"GAE_MODULE_VERSION", "20150612t184001"},
+ }
+
+ for _, v := range environ {
+ old := os.Getenv(v.key)
+ os.Setenv(v.key, v.value)
+ v.value = old
+ }
+ return func() { // Restore old environment after the test completes.
+ for _, v := range environ {
+ if v.value == "" {
+ os.Unsetenv(v.key)
+ continue
+ }
+ os.Setenv(v.key, v.value)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_pre17.go b/vendor/google.golang.org/appengine/internal/api_pre17.go
new file mode 100644
index 000000000..028b4f056
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_pre17.go
@@ -0,0 +1,682 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build !go1.7
+
+package internal
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+
+ basepb "google.golang.org/appengine/internal/base"
+ logpb "google.golang.org/appengine/internal/log"
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+const (
+ apiPath = "/rpc_http"
+ defaultTicketSuffix = "/default.20150612t184001.0"
+)
+
+var (
+ // Incoming headers.
+ ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
+ dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
+ traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
+ curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
+ userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP")
+ remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
+
+ // Outgoing headers.
+ apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
+ apiEndpointHeaderValue = []string{"app-engine-apis"}
+ apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
+ apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"}
+ apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
+ apiContentType = http.CanonicalHeaderKey("Content-Type")
+ apiContentTypeValue = []string{"application/octet-stream"}
+ logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
+
+ apiHTTPClient = &http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: limitDial,
+ },
+ }
+
+ defaultTicketOnce sync.Once
+ defaultTicket string
+)
+
+func apiURL() *url.URL {
+ host, port := "appengine.googleapis.internal", "10001"
+ if h := os.Getenv("API_HOST"); h != "" {
+ host = h
+ }
+ if p := os.Getenv("API_PORT"); p != "" {
+ port = p
+ }
+ return &url.URL{
+ Scheme: "http",
+ Host: host + ":" + port,
+ Path: apiPath,
+ }
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+ c := &context{
+ req: r,
+ outHeader: w.Header(),
+ apiURL: apiURL(),
+ }
+ stopFlushing := make(chan int)
+
+ ctxs.Lock()
+ ctxs.m[r] = c
+ ctxs.Unlock()
+ defer func() {
+ ctxs.Lock()
+ delete(ctxs.m, r)
+ ctxs.Unlock()
+ }()
+
+ // Patch up RemoteAddr so it looks reasonable.
+ if addr := r.Header.Get(userIPHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else {
+ // Should not normally reach here, but pick a sensible default anyway.
+ r.RemoteAddr = "127.0.0.1"
+ }
+ // The address in the headers will most likely be of these forms:
+ // 123.123.123.123
+ // 2001:db8::1
+ // net/http.Request.RemoteAddr is specified to be in "IP:port" form.
+ if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
+ // Assume the remote address is only a host; add a default port.
+ r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
+ }
+
+ // Start goroutine responsible for flushing app logs.
+ // This is done after adding c to ctx.m (and stopped before removing it)
+ // because flushing logs requires making an API call.
+ go c.logFlusher(stopFlushing)
+
+ executeRequestSafely(c, r)
+ c.outHeader = nil // make sure header changes aren't respected any more
+
+ stopFlushing <- 1 // any logging beyond this point will be dropped
+
+ // Flush any pending logs asynchronously.
+ c.pendingLogs.Lock()
+ flushes := c.pendingLogs.flushes
+ if len(c.pendingLogs.lines) > 0 {
+ flushes++
+ }
+ c.pendingLogs.Unlock()
+ go c.flushLog(false)
+ w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
+
+ // Avoid nil Write call if c.Write is never called.
+ if c.outCode != 0 {
+ w.WriteHeader(c.outCode)
+ }
+ if c.outBody != nil {
+ w.Write(c.outBody)
+ }
+}
+
+func executeRequestSafely(c *context, r *http.Request) {
+ defer func() {
+ if x := recover(); x != nil {
+ logf(c, 4, "%s", renderPanic(x)) // 4 == critical
+ c.outCode = 500
+ }
+ }()
+
+ http.DefaultServeMux.ServeHTTP(c, r)
+}
+
+func renderPanic(x interface{}) string {
+ buf := make([]byte, 16<<10) // 16 KB should be plenty
+ buf = buf[:runtime.Stack(buf, false)]
+
+ // Remove the first few stack frames:
+ // this func
+ // the recover closure in the caller
+ // That will root the stack trace at the site of the panic.
+ const (
+ skipStart = "internal.renderPanic"
+ skipFrames = 2
+ )
+ start := bytes.Index(buf, []byte(skipStart))
+ p := start
+ for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
+ p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
+ if p < 0 {
+ break
+ }
+ }
+ if p >= 0 {
+ // buf[start:p+1] is the block to remove.
+ // Copy buf[p+1:] over buf[start:] and shrink buf.
+ copy(buf[start:], buf[p+1:])
+ buf = buf[:len(buf)-(p+1-start)]
+ }
+
+ // Add panic heading.
+ head := fmt.Sprintf("panic: %v\n\n", x)
+ if len(head) > len(buf) {
+ // Extremely unlikely to happen.
+ return head
+ }
+ copy(buf[len(head):], buf)
+ copy(buf, head)
+
+ return string(buf)
+}
+
+var ctxs = struct {
+ sync.Mutex
+ m map[*http.Request]*context
+ bg *context // background context, lazily initialized
+ // dec is used by tests to decorate the netcontext.Context returned
+ // for a given request. This allows tests to add overrides (such as
+ // WithAppIDOverride) to the context. The map is nil outside tests.
+ dec map[*http.Request]func(netcontext.Context) netcontext.Context
+}{
+ m: make(map[*http.Request]*context),
+}
+
+// context represents the context of an in-flight HTTP request.
+// It implements the appengine.Context and http.ResponseWriter interfaces.
+type context struct {
+ req *http.Request
+
+ outCode int
+ outHeader http.Header
+ outBody []byte
+
+ pendingLogs struct {
+ sync.Mutex
+ lines []*logpb.UserAppLogLine
+ flushes int
+ }
+
+ apiURL *url.URL
+}
+
+var contextKey = "holds a *context"
+
+// fromContext returns the App Engine context or nil if ctx is not
+// derived from an App Engine context.
+func fromContext(ctx netcontext.Context) *context {
+ c, _ := ctx.Value(&contextKey).(*context)
+ return c
+}
+
+func withContext(parent netcontext.Context, c *context) netcontext.Context {
+ ctx := netcontext.WithValue(parent, &contextKey, c)
+ if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
+ ctx = withNamespace(ctx, ns)
+ }
+ return ctx
+}
+
+func toContext(c *context) netcontext.Context {
+ return withContext(netcontext.Background(), c)
+}
+
+func IncomingHeaders(ctx netcontext.Context) http.Header {
+ if c := fromContext(ctx); c != nil {
+ return c.req.Header
+ }
+ return nil
+}
+
+func ReqContext(req *http.Request) netcontext.Context {
+ return WithContext(netcontext.Background(), req)
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+ ctxs.Lock()
+ c := ctxs.m[req]
+ d := ctxs.dec[req]
+ ctxs.Unlock()
+
+ if d != nil {
+ parent = d(parent)
+ }
+
+ if c == nil {
+ // Someone passed in an http.Request that is not in-flight.
+ // We panic here rather than panicking at a later point
+ // so that stack traces will be more sensible.
+ log.Panic("appengine: NewContext passed an unknown http.Request")
+ }
+ return withContext(parent, c)
+}
+
+// DefaultTicket returns a ticket used for background context or dev_appserver.
+func DefaultTicket() string {
+ defaultTicketOnce.Do(func() {
+ if IsDevAppServer() {
+ defaultTicket = "testapp" + defaultTicketSuffix
+ return
+ }
+ appID := partitionlessAppID()
+ escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
+ majVersion := VersionID(nil)
+ if i := strings.Index(majVersion, "."); i > 0 {
+ majVersion = majVersion[:i]
+ }
+ defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
+ })
+ return defaultTicket
+}
+
+func BackgroundContext() netcontext.Context {
+ ctxs.Lock()
+ defer ctxs.Unlock()
+
+ if ctxs.bg != nil {
+ return toContext(ctxs.bg)
+ }
+
+ // Compute background security ticket.
+ ticket := DefaultTicket()
+
+ ctxs.bg = &context{
+ req: &http.Request{
+ Header: http.Header{
+ ticketHeader: []string{ticket},
+ },
+ },
+ apiURL: apiURL(),
+ }
+
+ // TODO(dsymonds): Wire up the shutdown handler to do a final flush.
+ go ctxs.bg.logFlusher(make(chan int))
+
+ return toContext(ctxs.bg)
+}
+
+// RegisterTestRequest registers the HTTP request req for testing, such that
+// any API calls are sent to the provided URL. It returns a closure to delete
+// the registration.
+// It should only be used by aetest package.
+func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) {
+ c := &context{
+ req: req,
+ apiURL: apiURL,
+ }
+ ctxs.Lock()
+ defer ctxs.Unlock()
+ if _, ok := ctxs.m[req]; ok {
+ log.Panic("req already associated with context")
+ }
+ if _, ok := ctxs.dec[req]; ok {
+ log.Panic("req already associated with context")
+ }
+ if ctxs.dec == nil {
+ ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context)
+ }
+ ctxs.m[req] = c
+ ctxs.dec[req] = decorate
+
+ return req, func() {
+ ctxs.Lock()
+ delete(ctxs.m, req)
+ delete(ctxs.dec, req)
+ ctxs.Unlock()
+ }
+}
+
+var errTimeout = &CallError{
+ Detail: "Deadline exceeded",
+ Code: int32(remotepb.RpcError_CANCELLED),
+ Timeout: true,
+}
+
+func (c *context) Header() http.Header { return c.outHeader }
+
+// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
+// codes do not permit a response body (nor response entity headers such as
+// Content-Length, Content-Type, etc).
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+func (c *context) Write(b []byte) (int, error) {
+ if c.outCode == 0 {
+ c.WriteHeader(http.StatusOK)
+ }
+ if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
+ return 0, http.ErrBodyNotAllowed
+ }
+ c.outBody = append(c.outBody, b...)
+ return len(b), nil
+}
+
+func (c *context) WriteHeader(code int) {
+ if c.outCode != 0 {
+ logf(c, 3, "WriteHeader called multiple times on request.") // error level
+ return
+ }
+ c.outCode = code
+}
+
+func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
+ hreq := &http.Request{
+ Method: "POST",
+ URL: c.apiURL,
+ Header: http.Header{
+ apiEndpointHeader: apiEndpointHeaderValue,
+ apiMethodHeader: apiMethodHeaderValue,
+ apiContentType: apiContentTypeValue,
+ apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
+ },
+ Body: ioutil.NopCloser(bytes.NewReader(body)),
+ ContentLength: int64(len(body)),
+ Host: c.apiURL.Host,
+ }
+ if info := c.req.Header.Get(dapperHeader); info != "" {
+ hreq.Header.Set(dapperHeader, info)
+ }
+ if info := c.req.Header.Get(traceHeader); info != "" {
+ hreq.Header.Set(traceHeader, info)
+ }
+
+ tr := apiHTTPClient.Transport.(*http.Transport)
+
+ var timedOut int32 // atomic; set to 1 if timed out
+ t := time.AfterFunc(timeout, func() {
+ atomic.StoreInt32(&timedOut, 1)
+ tr.CancelRequest(hreq)
+ })
+ defer t.Stop()
+ defer func() {
+ // Check if timeout was exceeded.
+ if atomic.LoadInt32(&timedOut) != 0 {
+ err = errTimeout
+ }
+ }()
+
+ hresp, err := apiHTTPClient.Do(hreq)
+ if err != nil {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ defer hresp.Body.Close()
+ hrespBody, err := ioutil.ReadAll(hresp.Body)
+ if hresp.StatusCode != 200 {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ if err != nil {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge response bad: %v", err),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ return hrespBody, nil
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+ if ns := NamespaceFromContext(ctx); ns != "" {
+ if fn, ok := NamespaceMods[service]; ok {
+ fn(in, ns)
+ }
+ }
+
+ if f, ctx, ok := callOverrideFromContext(ctx); ok {
+ return f(ctx, service, method, in, out)
+ }
+
+ // Handle already-done contexts quickly.
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ c := fromContext(ctx)
+ if c == nil {
+ // Give a good error message rather than a panic lower down.
+ return errNotAppEngineContext
+ }
+
+ // Apply transaction modifications if we're in a transaction.
+ if t := transactionFromContext(ctx); t != nil {
+ if t.finished {
+ return errors.New("transaction context has expired")
+ }
+ applyTransaction(in, &t.transaction)
+ }
+
+ // Default RPC timeout is 60s.
+ timeout := 60 * time.Second
+ if deadline, ok := ctx.Deadline(); ok {
+ timeout = deadline.Sub(time.Now())
+ }
+
+ data, err := proto.Marshal(in)
+ if err != nil {
+ return err
+ }
+
+ ticket := c.req.Header.Get(ticketHeader)
+ // Use a test ticket under test environment.
+ if ticket == "" {
+ if appid := ctx.Value(&appIDOverrideKey); appid != nil {
+ ticket = appid.(string) + defaultTicketSuffix
+ }
+ }
+ // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
+ if ticket == "" {
+ ticket = DefaultTicket()
+ }
+ req := &remotepb.Request{
+ ServiceName: &service,
+ Method: &method,
+ Request: data,
+ RequestId: &ticket,
+ }
+ hreqBody, err := proto.Marshal(req)
+ if err != nil {
+ return err
+ }
+
+ hrespBody, err := c.post(hreqBody, timeout)
+ if err != nil {
+ return err
+ }
+
+ res := &remotepb.Response{}
+ if err := proto.Unmarshal(hrespBody, res); err != nil {
+ return err
+ }
+ if res.RpcError != nil {
+ ce := &CallError{
+ Detail: res.RpcError.GetDetail(),
+ Code: *res.RpcError.Code,
+ }
+ switch remotepb.RpcError_ErrorCode(ce.Code) {
+ case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
+ ce.Timeout = true
+ }
+ return ce
+ }
+ if res.ApplicationError != nil {
+ return &APIError{
+ Service: *req.ServiceName,
+ Detail: res.ApplicationError.GetDetail(),
+ Code: *res.ApplicationError.Code,
+ }
+ }
+ if res.Exception != nil || res.JavaException != nil {
+ // This shouldn't happen, but let's be defensive.
+ return &CallError{
+ Detail: "service bridge returned exception",
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ return proto.Unmarshal(res.Response, out)
+}
+
+func (c *context) Request() *http.Request {
+ return c.req
+}
+
+func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
+ // Truncate long log lines.
+ // TODO(dsymonds): Check if this is still necessary.
+ const lim = 8 << 10
+ if len(*ll.Message) > lim {
+ suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
+ ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
+ }
+
+ c.pendingLogs.Lock()
+ c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
+ c.pendingLogs.Unlock()
+}
+
+var logLevelName = map[int64]string{
+ 0: "DEBUG",
+ 1: "INFO",
+ 2: "WARNING",
+ 3: "ERROR",
+ 4: "CRITICAL",
+}
+
+func logf(c *context, level int64, format string, args ...interface{}) {
+ if c == nil {
+ panic("not an App Engine context")
+ }
+ s := fmt.Sprintf(format, args...)
+ s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
+ c.addLogLine(&logpb.UserAppLogLine{
+ TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
+ Level: &level,
+ Message: &s,
+ })
+ log.Print(logLevelName[level] + ": " + s)
+}
+
+// flushLog attempts to flush any pending logs to the appserver.
+// It should not be called concurrently.
+func (c *context) flushLog(force bool) (flushed bool) {
+ c.pendingLogs.Lock()
+ // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
+ n, rem := 0, 30<<20
+ for ; n < len(c.pendingLogs.lines); n++ {
+ ll := c.pendingLogs.lines[n]
+ // Each log line will require about 3 bytes of overhead.
+ nb := proto.Size(ll) + 3
+ if nb > rem {
+ break
+ }
+ rem -= nb
+ }
+ lines := c.pendingLogs.lines[:n]
+ c.pendingLogs.lines = c.pendingLogs.lines[n:]
+ c.pendingLogs.Unlock()
+
+ if len(lines) == 0 && !force {
+ // Nothing to flush.
+ return false
+ }
+
+ rescueLogs := false
+ defer func() {
+ if rescueLogs {
+ c.pendingLogs.Lock()
+ c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
+ c.pendingLogs.Unlock()
+ }
+ }()
+
+ buf, err := proto.Marshal(&logpb.UserAppLogGroup{
+ LogLine: lines,
+ })
+ if err != nil {
+ log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
+ rescueLogs = true
+ return false
+ }
+
+ req := &logpb.FlushRequest{
+ Logs: buf,
+ }
+ res := &basepb.VoidProto{}
+ c.pendingLogs.Lock()
+ c.pendingLogs.flushes++
+ c.pendingLogs.Unlock()
+ if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
+ log.Printf("internal.flushLog: Flush RPC: %v", err)
+ rescueLogs = true
+ return false
+ }
+ return true
+}
+
+const (
+ // Log flushing parameters.
+ flushInterval = 1 * time.Second
+ forceFlushInterval = 60 * time.Second
+)
+
+func (c *context) logFlusher(stop <-chan int) {
+ lastFlush := time.Now()
+ tick := time.NewTicker(flushInterval)
+ for {
+ select {
+ case <-stop:
+ // Request finished.
+ tick.Stop()
+ return
+ case <-tick.C:
+ force := time.Now().Sub(lastFlush) > forceFlushInterval
+ if c.flushLog(force) {
+ lastFlush = time.Now()
+ }
+ }
+ }
+}
+
+func ContextForTesting(req *http.Request) netcontext.Context {
+ return toContext(&context{req: req})
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_race_test.go b/vendor/google.golang.org/appengine/internal/api_race_test.go
new file mode 100644
index 000000000..6cfe90649
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_race_test.go
@@ -0,0 +1,9 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build race
+
+package internal
+
+func init() { raceDetector = true }
diff --git a/vendor/google.golang.org/appengine/internal/api_test.go b/vendor/google.golang.org/appengine/internal/api_test.go
new file mode 100644
index 000000000..76624a28e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_test.go
@@ -0,0 +1,466 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "os/exec"
+ "strings"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+
+ basepb "google.golang.org/appengine/internal/base"
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+const testTicketHeader = "X-Magic-Ticket-Header"
+
+func init() {
+ ticketHeader = testTicketHeader
+}
+
+type fakeAPIHandler struct {
+ hang chan int // used for RunSlowly RPC
+
+ LogFlushes int32 // atomic
+}
+
+func (f *fakeAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ writeResponse := func(res *remotepb.Response) {
+ hresBody, err := proto.Marshal(res)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("Failed encoding API response: %v", err), 500)
+ return
+ }
+ w.Write(hresBody)
+ }
+
+ if r.URL.Path != "/rpc_http" {
+ http.NotFound(w, r)
+ return
+ }
+ hreqBody, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("Bad body: %v", err), 500)
+ return
+ }
+ apiReq := &remotepb.Request{}
+ if err := proto.Unmarshal(hreqBody, apiReq); err != nil {
+ http.Error(w, fmt.Sprintf("Bad encoded API request: %v", err), 500)
+ return
+ }
+ if *apiReq.RequestId != "s3cr3t" && *apiReq.RequestId != DefaultTicket() {
+ writeResponse(&remotepb.Response{
+ RpcError: &remotepb.RpcError{
+ Code: proto.Int32(int32(remotepb.RpcError_SECURITY_VIOLATION)),
+ Detail: proto.String("bad security ticket"),
+ },
+ })
+ return
+ }
+ if got, want := r.Header.Get(dapperHeader), "trace-001"; got != want {
+ writeResponse(&remotepb.Response{
+ RpcError: &remotepb.RpcError{
+ Code: proto.Int32(int32(remotepb.RpcError_BAD_REQUEST)),
+ Detail: proto.String(fmt.Sprintf("trace info = %q, want %q", got, want)),
+ },
+ })
+ return
+ }
+
+ service, method := *apiReq.ServiceName, *apiReq.Method
+ var resOut proto.Message
+ if service == "actordb" && method == "LookupActor" {
+ req := &basepb.StringProto{}
+ res := &basepb.StringProto{}
+ if err := proto.Unmarshal(apiReq.Request, req); err != nil {
+ http.Error(w, fmt.Sprintf("Bad encoded request: %v", err), 500)
+ return
+ }
+ if *req.Value == "Doctor Who" {
+ res.Value = proto.String("David Tennant")
+ }
+ resOut = res
+ }
+ if service == "errors" {
+ switch method {
+ case "Non200":
+ http.Error(w, "I'm a little teapot.", 418)
+ return
+ case "ShortResponse":
+ w.Header().Set("Content-Length", "100")
+ w.Write([]byte("way too short"))
+ return
+ case "OverQuota":
+ writeResponse(&remotepb.Response{
+ RpcError: &remotepb.RpcError{
+ Code: proto.Int32(int32(remotepb.RpcError_OVER_QUOTA)),
+ Detail: proto.String("you are hogging the resources!"),
+ },
+ })
+ return
+ case "RunSlowly":
+ // TestAPICallRPCFailure creates f.hang, but does not strobe it
+ // until Call returns with remotepb.RpcError_CANCELLED.
+ // This is here to force a happens-before relationship between
+ // the httptest server handler and shutdown.
+ <-f.hang
+ resOut = &basepb.VoidProto{}
+ }
+ }
+ if service == "logservice" && method == "Flush" {
+ // Pretend log flushing is slow.
+ time.Sleep(50 * time.Millisecond)
+ atomic.AddInt32(&f.LogFlushes, 1)
+ resOut = &basepb.VoidProto{}
+ }
+
+ encOut, err := proto.Marshal(resOut)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("Failed encoding response: %v", err), 500)
+ return
+ }
+ writeResponse(&remotepb.Response{
+ Response: encOut,
+ })
+}
+
+func setup() (f *fakeAPIHandler, c *context, cleanup func()) {
+ f = &fakeAPIHandler{}
+ srv := httptest.NewServer(f)
+ u, err := url.Parse(srv.URL + apiPath)
+ if err != nil {
+ panic(fmt.Sprintf("url.Parse(%q): %v", srv.URL+apiPath, err))
+ }
+ return f, &context{
+ req: &http.Request{
+ Header: http.Header{
+ ticketHeader: []string{"s3cr3t"},
+ dapperHeader: []string{"trace-001"},
+ },
+ },
+ apiURL: u,
+ }, srv.Close
+}
+
+func TestAPICall(t *testing.T) {
+ _, c, cleanup := setup()
+ defer cleanup()
+
+ req := &basepb.StringProto{
+ Value: proto.String("Doctor Who"),
+ }
+ res := &basepb.StringProto{}
+ err := Call(toContext(c), "actordb", "LookupActor", req, res)
+ if err != nil {
+ t.Fatalf("API call failed: %v", err)
+ }
+ if got, want := *res.Value, "David Tennant"; got != want {
+ t.Errorf("Response is %q, want %q", got, want)
+ }
+}
+
+func TestAPICallTicketUnavailable(t *testing.T) {
+ resetEnv := SetTestEnv()
+ defer resetEnv()
+ _, c, cleanup := setup()
+ defer cleanup()
+
+ c.req.Header.Set(ticketHeader, "")
+ req := &basepb.StringProto{
+ Value: proto.String("Doctor Who"),
+ }
+ res := &basepb.StringProto{}
+ err := Call(toContext(c), "actordb", "LookupActor", req, res)
+ if err != nil {
+ t.Fatalf("API call failed: %v", err)
+ }
+ if got, want := *res.Value, "David Tennant"; got != want {
+ t.Errorf("Response is %q, want %q", got, want)
+ }
+}
+
+func TestAPICallRPCFailure(t *testing.T) {
+ f, c, cleanup := setup()
+ defer cleanup()
+
+ testCases := []struct {
+ method string
+ code remotepb.RpcError_ErrorCode
+ }{
+ {"Non200", remotepb.RpcError_UNKNOWN},
+ {"ShortResponse", remotepb.RpcError_UNKNOWN},
+ {"OverQuota", remotepb.RpcError_OVER_QUOTA},
+ {"RunSlowly", remotepb.RpcError_CANCELLED},
+ }
+ f.hang = make(chan int) // only for RunSlowly
+ for _, tc := range testCases {
+ ctx, _ := netcontext.WithTimeout(toContext(c), 100*time.Millisecond)
+ err := Call(ctx, "errors", tc.method, &basepb.VoidProto{}, &basepb.VoidProto{})
+ ce, ok := err.(*CallError)
+ if !ok {
+ t.Errorf("%s: API call error is %T (%v), want *CallError", tc.method, err, err)
+ continue
+ }
+ if ce.Code != int32(tc.code) {
+ t.Errorf("%s: ce.Code = %d, want %d", tc.method, ce.Code, tc.code)
+ }
+ if tc.method == "RunSlowly" {
+ f.hang <- 1 // release the HTTP handler
+ }
+ }
+}
+
+func TestAPICallDialFailure(t *testing.T) {
+ // See what happens if the API host is unresponsive.
+ // This should time out quickly, not hang forever.
+ _, c, cleanup := setup()
+ defer cleanup()
+ // Reset the URL to the production address so that dialing fails.
+ c.apiURL = apiURL()
+
+ start := time.Now()
+ err := Call(toContext(c), "foo", "bar", &basepb.VoidProto{}, &basepb.VoidProto{})
+ const max = 1 * time.Second
+ if taken := time.Since(start); taken > max {
+ t.Errorf("Dial hang took too long: %v > %v", taken, max)
+ }
+ if err == nil {
+ t.Error("Call did not fail")
+ }
+}
+
+func TestDelayedLogFlushing(t *testing.T) {
+ f, c, cleanup := setup()
+ defer cleanup()
+
+ http.HandleFunc("/quick_log", func(w http.ResponseWriter, r *http.Request) {
+ logC := WithContext(netcontext.Background(), r)
+ fromContext(logC).apiURL = c.apiURL // Otherwise it will try to use the default URL.
+ Logf(logC, 1, "It's a lovely day.")
+ w.WriteHeader(200)
+ w.Write(make([]byte, 100<<10)) // write 100 KB to force HTTP flush
+ })
+
+ r := &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Scheme: "http",
+ Path: "/quick_log",
+ },
+ Header: c.req.Header,
+ Body: ioutil.NopCloser(bytes.NewReader(nil)),
+ }
+ w := httptest.NewRecorder()
+
+ // Check that log flushing does not hold up the HTTP response.
+ start := time.Now()
+ handleHTTP(w, r)
+ if d := time.Since(start); d > 10*time.Millisecond {
+ t.Errorf("handleHTTP took %v, want under 10ms", d)
+ }
+ const hdr = "X-AppEngine-Log-Flush-Count"
+ if h := w.HeaderMap.Get(hdr); h != "1" {
+ t.Errorf("%s header = %q, want %q", hdr, h, "1")
+ }
+ if f := atomic.LoadInt32(&f.LogFlushes); f != 0 {
+ t.Errorf("After HTTP response: f.LogFlushes = %d, want 0", f)
+ }
+
+ // Check that the log flush eventually comes in.
+ time.Sleep(100 * time.Millisecond)
+ if f := atomic.LoadInt32(&f.LogFlushes); f != 1 {
+ t.Errorf("After 100ms: f.LogFlushes = %d, want 1", f)
+ }
+}
+
+func TestRemoteAddr(t *testing.T) {
+ var addr string
+ http.HandleFunc("/remote_addr", func(w http.ResponseWriter, r *http.Request) {
+ addr = r.RemoteAddr
+ })
+
+ testCases := []struct {
+ headers http.Header
+ addr string
+ }{
+ {http.Header{"X-Appengine-User-Ip": []string{"10.5.2.1"}}, "10.5.2.1:80"},
+ {http.Header{"X-Appengine-Remote-Addr": []string{"1.2.3.4"}}, "1.2.3.4:80"},
+ {http.Header{"X-Appengine-Remote-Addr": []string{"1.2.3.4:8080"}}, "1.2.3.4:8080"},
+ {
+ http.Header{"X-Appengine-Remote-Addr": []string{"2401:fa00:9:1:7646:a0ff:fe90:ca66"}},
+ "[2401:fa00:9:1:7646:a0ff:fe90:ca66]:80",
+ },
+ {
+ http.Header{"X-Appengine-Remote-Addr": []string{"[::1]:http"}},
+ "[::1]:http",
+ },
+ {http.Header{}, "127.0.0.1:80"},
+ }
+
+ for _, tc := range testCases {
+ r := &http.Request{
+ Method: "GET",
+ URL: &url.URL{Scheme: "http", Path: "/remote_addr"},
+ Header: tc.headers,
+ Body: ioutil.NopCloser(bytes.NewReader(nil)),
+ }
+ handleHTTP(httptest.NewRecorder(), r)
+ if addr != tc.addr {
+ t.Errorf("Header %v, got %q, want %q", tc.headers, addr, tc.addr)
+ }
+ }
+}
+
+func TestPanickingHandler(t *testing.T) {
+ http.HandleFunc("/panic", func(http.ResponseWriter, *http.Request) {
+ panic("whoops!")
+ })
+ r := &http.Request{
+ Method: "GET",
+ URL: &url.URL{Scheme: "http", Path: "/panic"},
+ Body: ioutil.NopCloser(bytes.NewReader(nil)),
+ }
+ rec := httptest.NewRecorder()
+ handleHTTP(rec, r)
+ if rec.Code != 500 {
+ t.Errorf("Panicking handler returned HTTP %d, want HTTP %d", rec.Code, 500)
+ }
+}
+
+var raceDetector = false
+
+func TestAPICallAllocations(t *testing.T) {
+ if raceDetector {
+ t.Skip("not running under race detector")
+ }
+
+ // Run the test API server in a subprocess so we aren't counting its allocations.
+ u, cleanup := launchHelperProcess(t)
+ defer cleanup()
+ c := &context{
+ req: &http.Request{
+ Header: http.Header{
+ ticketHeader: []string{"s3cr3t"},
+ dapperHeader: []string{"trace-001"},
+ },
+ },
+ apiURL: u,
+ }
+
+ req := &basepb.StringProto{
+ Value: proto.String("Doctor Who"),
+ }
+ res := &basepb.StringProto{}
+ var apiErr error
+ avg := testing.AllocsPerRun(100, func() {
+ ctx, _ := netcontext.WithTimeout(toContext(c), 100*time.Millisecond)
+ if err := Call(ctx, "actordb", "LookupActor", req, res); err != nil && apiErr == nil {
+ apiErr = err // get the first error only
+ }
+ })
+ if apiErr != nil {
+ t.Errorf("API call failed: %v", apiErr)
+ }
+
+ // Lots of room for improvement...
+ // TODO(djd): Reduce maximum to 85 once the App Engine SDK is based on 1.6.
+ const min, max float64 = 70, 100
+ if avg < min || max < avg {
+ t.Errorf("Allocations per API call = %g, want in [%g,%g]", avg, min, max)
+ }
+}
+
+func launchHelperProcess(t *testing.T) (apiURL *url.URL, cleanup func()) {
+ cmd := exec.Command(os.Args[0], "-test.run=TestHelperProcess")
+ cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
+ stdin, err := cmd.StdinPipe()
+ if err != nil {
+ t.Fatalf("StdinPipe: %v", err)
+ }
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ t.Fatalf("StdoutPipe: %v", err)
+ }
+ if err := cmd.Start(); err != nil {
+ t.Fatalf("Starting helper process: %v", err)
+ }
+
+ scan := bufio.NewScanner(stdout)
+ var u *url.URL
+ for scan.Scan() {
+ line := scan.Text()
+ if hp := strings.TrimPrefix(line, helperProcessMagic); hp != line {
+ var err error
+ u, err = url.Parse(hp)
+ if err != nil {
+ t.Fatalf("Failed to parse %q: %v", hp, err)
+ }
+ break
+ }
+ }
+ if err := scan.Err(); err != nil {
+ t.Fatalf("Scanning helper process stdout: %v", err)
+ }
+ if u == nil {
+ t.Fatal("Helper process never reported")
+ }
+
+ return u, func() {
+ stdin.Close()
+ if err := cmd.Wait(); err != nil {
+ t.Errorf("Helper process did not exit cleanly: %v", err)
+ }
+ }
+}
+
+const helperProcessMagic = "A lovely helper process is listening at "
+
+// This isn't a real test. It's used as a helper process.
+func TestHelperProcess(*testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
+ return
+ }
+ defer os.Exit(0)
+
+ f := &fakeAPIHandler{}
+ srv := httptest.NewServer(f)
+ defer srv.Close()
+ fmt.Println(helperProcessMagic + srv.URL + apiPath)
+
+ // Wait for stdin to be closed.
+ io.Copy(ioutil.Discard, os.Stdin)
+}
+
+func TestBackgroundContext(t *testing.T) {
+ resetEnv := SetTestEnv()
+ defer resetEnv()
+
+ ctx, key := fromContext(BackgroundContext()), "X-Magic-Ticket-Header"
+ if g, w := ctx.req.Header.Get(key), "my-app-id/default.20150612t184001.0"; g != w {
+ t.Errorf("%v = %q, want %q", key, g, w)
+ }
+
+ // Check that using the background context doesn't panic.
+ req := &basepb.StringProto{
+ Value: proto.String("Doctor Who"),
+ }
+ res := &basepb.StringProto{}
+ Call(BackgroundContext(), "actordb", "LookupActor", req, res) // expected to fail
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go
new file mode 100644
index 000000000..11df8c07b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_id.go
@@ -0,0 +1,28 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "strings"
+)
+
+func parseFullAppID(appid string) (partition, domain, displayID string) {
+ if i := strings.Index(appid, "~"); i != -1 {
+ partition, appid = appid[:i], appid[i+1:]
+ }
+ if i := strings.Index(appid, ":"); i != -1 {
+ domain, appid = appid[:i], appid[i+1:]
+ }
+ return partition, domain, appid
+}
+
+// appID returns "appid" or "domain.com:appid".
+func appID(fullAppID string) string {
+ _, dom, dis := parseFullAppID(fullAppID)
+ if dom != "" {
+ return dom + ":" + dis
+ }
+ return dis
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_id_test.go b/vendor/google.golang.org/appengine/internal/app_id_test.go
new file mode 100644
index 000000000..e69195cd4
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_id_test.go
@@ -0,0 +1,34 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "testing"
+)
+
+func TestAppIDParsing(t *testing.T) {
+ testCases := []struct {
+ in string
+ partition, domain, displayID string
+ }{
+ {"simple-app-id", "", "", "simple-app-id"},
+ {"domain.com:domain-app-id", "", "domain.com", "domain-app-id"},
+ {"part~partition-app-id", "part", "", "partition-app-id"},
+ {"part~domain.com:display", "part", "domain.com", "display"},
+ }
+
+ for _, tc := range testCases {
+ part, dom, dis := parseFullAppID(tc.in)
+ if part != tc.partition {
+ t.Errorf("partition of %q: got %q, want %q", tc.in, part, tc.partition)
+ }
+ if dom != tc.domain {
+ t.Errorf("domain of %q: got %q, want %q", tc.in, dom, tc.domain)
+ }
+ if dis != tc.displayID {
+ t.Errorf("displayID of %q: got %q, want %q", tc.in, dis, tc.displayID)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
new file mode 100644
index 000000000..87d9701b8
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
@@ -0,0 +1,296 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto
+// DO NOT EDIT!
+
+/*
+Package app_identity is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/app_identity/app_identity_service.proto
+
+It has these top-level messages:
+ AppIdentityServiceError
+ SignForAppRequest
+ SignForAppResponse
+ GetPublicCertificateForAppRequest
+ PublicCertificate
+ GetPublicCertificateForAppResponse
+ GetServiceAccountNameRequest
+ GetServiceAccountNameResponse
+ GetAccessTokenRequest
+ GetAccessTokenResponse
+ GetDefaultGcsBucketNameRequest
+ GetDefaultGcsBucketNameResponse
+*/
+package app_identity
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type AppIdentityServiceError_ErrorCode int32
+
+const (
+ AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0
+ AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9
+ AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000
+ AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001
+ AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002
+ AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003
+ AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005
+ AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006
+)
+
+var AppIdentityServiceError_ErrorCode_name = map[int32]string{
+ 0: "SUCCESS",
+ 9: "UNKNOWN_SCOPE",
+ 1000: "BLOB_TOO_LARGE",
+ 1001: "DEADLINE_EXCEEDED",
+ 1002: "NOT_A_VALID_APP",
+ 1003: "UNKNOWN_ERROR",
+ 1005: "NOT_ALLOWED",
+ 1006: "NOT_IMPLEMENTED",
+}
+var AppIdentityServiceError_ErrorCode_value = map[string]int32{
+ "SUCCESS": 0,
+ "UNKNOWN_SCOPE": 9,
+ "BLOB_TOO_LARGE": 1000,
+ "DEADLINE_EXCEEDED": 1001,
+ "NOT_A_VALID_APP": 1002,
+ "UNKNOWN_ERROR": 1003,
+ "NOT_ALLOWED": 1005,
+ "NOT_IMPLEMENTED": 1006,
+}
+
+func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode {
+ p := new(AppIdentityServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x AppIdentityServiceError_ErrorCode) String() string {
+ return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x))
+}
+func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = AppIdentityServiceError_ErrorCode(value)
+ return nil
+}
+
+type AppIdentityServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} }
+func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) }
+func (*AppIdentityServiceError) ProtoMessage() {}
+
+type SignForAppRequest struct {
+ BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign" json:"bytes_to_sign,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} }
+func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) }
+func (*SignForAppRequest) ProtoMessage() {}
+
+func (m *SignForAppRequest) GetBytesToSign() []byte {
+ if m != nil {
+ return m.BytesToSign
+ }
+ return nil
+}
+
+type SignForAppResponse struct {
+ KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"`
+ SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes" json:"signature_bytes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} }
+func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) }
+func (*SignForAppResponse) ProtoMessage() {}
+
+func (m *SignForAppResponse) GetKeyName() string {
+ if m != nil && m.KeyName != nil {
+ return *m.KeyName
+ }
+ return ""
+}
+
+func (m *SignForAppResponse) GetSignatureBytes() []byte {
+ if m != nil {
+ return m.SignatureBytes
+ }
+ return nil
+}
+
+type GetPublicCertificateForAppRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} }
+func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) }
+func (*GetPublicCertificateForAppRequest) ProtoMessage() {}
+
+type PublicCertificate struct {
+ KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"`
+ X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem" json:"x509_certificate_pem,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PublicCertificate) Reset() { *m = PublicCertificate{} }
+func (m *PublicCertificate) String() string { return proto.CompactTextString(m) }
+func (*PublicCertificate) ProtoMessage() {}
+
+func (m *PublicCertificate) GetKeyName() string {
+ if m != nil && m.KeyName != nil {
+ return *m.KeyName
+ }
+ return ""
+}
+
+func (m *PublicCertificate) GetX509CertificatePem() string {
+ if m != nil && m.X509CertificatePem != nil {
+ return *m.X509CertificatePem
+ }
+ return ""
+}
+
+type GetPublicCertificateForAppResponse struct {
+ PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list" json:"public_certificate_list,omitempty"`
+ MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second" json:"max_client_cache_time_in_second,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} }
+func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) }
+func (*GetPublicCertificateForAppResponse) ProtoMessage() {}
+
+func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate {
+ if m != nil {
+ return m.PublicCertificateList
+ }
+ return nil
+}
+
+func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 {
+ if m != nil && m.MaxClientCacheTimeInSecond != nil {
+ return *m.MaxClientCacheTimeInSecond
+ }
+ return 0
+}
+
+type GetServiceAccountNameRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} }
+func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetServiceAccountNameRequest) ProtoMessage() {}
+
+type GetServiceAccountNameResponse struct {
+ ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name" json:"service_account_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} }
+func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetServiceAccountNameResponse) ProtoMessage() {}
+
+func (m *GetServiceAccountNameResponse) GetServiceAccountName() string {
+ if m != nil && m.ServiceAccountName != nil {
+ return *m.ServiceAccountName
+ }
+ return ""
+}
+
+type GetAccessTokenRequest struct {
+ Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"`
+ ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id" json:"service_account_id,omitempty"`
+ ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name" json:"service_account_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} }
+func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) }
+func (*GetAccessTokenRequest) ProtoMessage() {}
+
+func (m *GetAccessTokenRequest) GetScope() []string {
+ if m != nil {
+ return m.Scope
+ }
+ return nil
+}
+
+func (m *GetAccessTokenRequest) GetServiceAccountId() int64 {
+ if m != nil && m.ServiceAccountId != nil {
+ return *m.ServiceAccountId
+ }
+ return 0
+}
+
+func (m *GetAccessTokenRequest) GetServiceAccountName() string {
+ if m != nil && m.ServiceAccountName != nil {
+ return *m.ServiceAccountName
+ }
+ return ""
+}
+
+type GetAccessTokenResponse struct {
+ AccessToken *string `protobuf:"bytes,1,opt,name=access_token" json:"access_token,omitempty"`
+ ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} }
+func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) }
+func (*GetAccessTokenResponse) ProtoMessage() {}
+
+func (m *GetAccessTokenResponse) GetAccessToken() string {
+ if m != nil && m.AccessToken != nil {
+ return *m.AccessToken
+ }
+ return ""
+}
+
+func (m *GetAccessTokenResponse) GetExpirationTime() int64 {
+ if m != nil && m.ExpirationTime != nil {
+ return *m.ExpirationTime
+ }
+ return 0
+}
+
+type GetDefaultGcsBucketNameRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} }
+func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {}
+
+type GetDefaultGcsBucketNameResponse struct {
+ DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name" json:"default_gcs_bucket_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} }
+func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {}
+
+func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string {
+ if m != nil && m.DefaultGcsBucketName != nil {
+ return *m.DefaultGcsBucketName
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
new file mode 100644
index 000000000..19610ca5b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
@@ -0,0 +1,64 @@
+syntax = "proto2";
+option go_package = "app_identity";
+
+package appengine;
+
+message AppIdentityServiceError {
+ enum ErrorCode {
+ SUCCESS = 0;
+ UNKNOWN_SCOPE = 9;
+ BLOB_TOO_LARGE = 1000;
+ DEADLINE_EXCEEDED = 1001;
+ NOT_A_VALID_APP = 1002;
+ UNKNOWN_ERROR = 1003;
+ NOT_ALLOWED = 1005;
+ NOT_IMPLEMENTED = 1006;
+ }
+}
+
+message SignForAppRequest {
+ optional bytes bytes_to_sign = 1;
+}
+
+message SignForAppResponse {
+ optional string key_name = 1;
+ optional bytes signature_bytes = 2;
+}
+
+message GetPublicCertificateForAppRequest {
+}
+
+message PublicCertificate {
+ optional string key_name = 1;
+ optional string x509_certificate_pem = 2;
+}
+
+message GetPublicCertificateForAppResponse {
+ repeated PublicCertificate public_certificate_list = 1;
+ optional int64 max_client_cache_time_in_second = 2;
+}
+
+message GetServiceAccountNameRequest {
+}
+
+message GetServiceAccountNameResponse {
+ optional string service_account_name = 1;
+}
+
+message GetAccessTokenRequest {
+ repeated string scope = 1;
+ optional int64 service_account_id = 2;
+ optional string service_account_name = 3;
+}
+
+message GetAccessTokenResponse {
+ optional string access_token = 1;
+ optional int64 expiration_time = 2;
+}
+
+message GetDefaultGcsBucketNameRequest {
+}
+
+message GetDefaultGcsBucketNameResponse {
+ optional string default_gcs_bucket_name = 1;
+}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
new file mode 100644
index 000000000..36a195650
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
@@ -0,0 +1,133 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/base/api_base.proto
+// DO NOT EDIT!
+
+/*
+Package base is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/base/api_base.proto
+
+It has these top-level messages:
+ StringProto
+ Integer32Proto
+ Integer64Proto
+ BoolProto
+ DoubleProto
+ BytesProto
+ VoidProto
+*/
+package base
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type StringProto struct {
+ Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StringProto) Reset() { *m = StringProto{} }
+func (m *StringProto) String() string { return proto.CompactTextString(m) }
+func (*StringProto) ProtoMessage() {}
+
+func (m *StringProto) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Integer32Proto struct {
+ Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Integer32Proto) Reset() { *m = Integer32Proto{} }
+func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
+func (*Integer32Proto) ProtoMessage() {}
+
+func (m *Integer32Proto) GetValue() int32 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Integer64Proto struct {
+ Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Integer64Proto) Reset() { *m = Integer64Proto{} }
+func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
+func (*Integer64Proto) ProtoMessage() {}
+
+func (m *Integer64Proto) GetValue() int64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type BoolProto struct {
+ Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BoolProto) Reset() { *m = BoolProto{} }
+func (m *BoolProto) String() string { return proto.CompactTextString(m) }
+func (*BoolProto) ProtoMessage() {}
+
+func (m *BoolProto) GetValue() bool {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return false
+}
+
+type DoubleProto struct {
+ Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DoubleProto) Reset() { *m = DoubleProto{} }
+func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
+func (*DoubleProto) ProtoMessage() {}
+
+func (m *DoubleProto) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type BytesProto struct {
+ Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BytesProto) Reset() { *m = BytesProto{} }
+func (m *BytesProto) String() string { return proto.CompactTextString(m) }
+func (*BytesProto) ProtoMessage() {}
+
+func (m *BytesProto) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type VoidProto struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *VoidProto) Reset() { *m = VoidProto{} }
+func (m *VoidProto) String() string { return proto.CompactTextString(m) }
+func (*VoidProto) ProtoMessage() {}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto
new file mode 100644
index 000000000..56cd7a3ca
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/base/api_base.proto
@@ -0,0 +1,33 @@
+// Built-in base types for API calls. Primarily useful as return types.
+
+syntax = "proto2";
+option go_package = "base";
+
+package appengine.base;
+
+message StringProto {
+ required string value = 1;
+}
+
+message Integer32Proto {
+ required int32 value = 1;
+}
+
+message Integer64Proto {
+ required int64 value = 1;
+}
+
+message BoolProto {
+ required bool value = 1;
+}
+
+message DoubleProto {
+ required double value = 1;
+}
+
+message BytesProto {
+ required bytes value = 1 [ctype=CORD];
+}
+
+message VoidProto {
+}
diff --git a/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go b/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go
new file mode 100644
index 000000000..8705ec348
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go
@@ -0,0 +1,347 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/blobstore/blobstore_service.proto
+// DO NOT EDIT!
+
+/*
+Package blobstore is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/blobstore/blobstore_service.proto
+
+It has these top-level messages:
+ BlobstoreServiceError
+ CreateUploadURLRequest
+ CreateUploadURLResponse
+ DeleteBlobRequest
+ FetchDataRequest
+ FetchDataResponse
+ CloneBlobRequest
+ CloneBlobResponse
+ DecodeBlobKeyRequest
+ DecodeBlobKeyResponse
+ CreateEncodedGoogleStorageKeyRequest
+ CreateEncodedGoogleStorageKeyResponse
+*/
+package blobstore
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type BlobstoreServiceError_ErrorCode int32
+
+const (
+ BlobstoreServiceError_OK BlobstoreServiceError_ErrorCode = 0
+ BlobstoreServiceError_INTERNAL_ERROR BlobstoreServiceError_ErrorCode = 1
+ BlobstoreServiceError_URL_TOO_LONG BlobstoreServiceError_ErrorCode = 2
+ BlobstoreServiceError_PERMISSION_DENIED BlobstoreServiceError_ErrorCode = 3
+ BlobstoreServiceError_BLOB_NOT_FOUND BlobstoreServiceError_ErrorCode = 4
+ BlobstoreServiceError_DATA_INDEX_OUT_OF_RANGE BlobstoreServiceError_ErrorCode = 5
+ BlobstoreServiceError_BLOB_FETCH_SIZE_TOO_LARGE BlobstoreServiceError_ErrorCode = 6
+ BlobstoreServiceError_ARGUMENT_OUT_OF_RANGE BlobstoreServiceError_ErrorCode = 8
+ BlobstoreServiceError_INVALID_BLOB_KEY BlobstoreServiceError_ErrorCode = 9
+)
+
+var BlobstoreServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INTERNAL_ERROR",
+ 2: "URL_TOO_LONG",
+ 3: "PERMISSION_DENIED",
+ 4: "BLOB_NOT_FOUND",
+ 5: "DATA_INDEX_OUT_OF_RANGE",
+ 6: "BLOB_FETCH_SIZE_TOO_LARGE",
+ 8: "ARGUMENT_OUT_OF_RANGE",
+ 9: "INVALID_BLOB_KEY",
+}
+var BlobstoreServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INTERNAL_ERROR": 1,
+ "URL_TOO_LONG": 2,
+ "PERMISSION_DENIED": 3,
+ "BLOB_NOT_FOUND": 4,
+ "DATA_INDEX_OUT_OF_RANGE": 5,
+ "BLOB_FETCH_SIZE_TOO_LARGE": 6,
+ "ARGUMENT_OUT_OF_RANGE": 8,
+ "INVALID_BLOB_KEY": 9,
+}
+
+func (x BlobstoreServiceError_ErrorCode) Enum() *BlobstoreServiceError_ErrorCode {
+ p := new(BlobstoreServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x BlobstoreServiceError_ErrorCode) String() string {
+ return proto.EnumName(BlobstoreServiceError_ErrorCode_name, int32(x))
+}
+func (x *BlobstoreServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(BlobstoreServiceError_ErrorCode_value, data, "BlobstoreServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = BlobstoreServiceError_ErrorCode(value)
+ return nil
+}
+
+type BlobstoreServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BlobstoreServiceError) Reset() { *m = BlobstoreServiceError{} }
+func (m *BlobstoreServiceError) String() string { return proto.CompactTextString(m) }
+func (*BlobstoreServiceError) ProtoMessage() {}
+
+type CreateUploadURLRequest struct {
+ SuccessPath *string `protobuf:"bytes,1,req,name=success_path" json:"success_path,omitempty"`
+ MaxUploadSizeBytes *int64 `protobuf:"varint,2,opt,name=max_upload_size_bytes" json:"max_upload_size_bytes,omitempty"`
+ MaxUploadSizePerBlobBytes *int64 `protobuf:"varint,3,opt,name=max_upload_size_per_blob_bytes" json:"max_upload_size_per_blob_bytes,omitempty"`
+ GsBucketName *string `protobuf:"bytes,4,opt,name=gs_bucket_name" json:"gs_bucket_name,omitempty"`
+ UrlExpiryTimeSeconds *int32 `protobuf:"varint,5,opt,name=url_expiry_time_seconds" json:"url_expiry_time_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateUploadURLRequest) Reset() { *m = CreateUploadURLRequest{} }
+func (m *CreateUploadURLRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateUploadURLRequest) ProtoMessage() {}
+
+func (m *CreateUploadURLRequest) GetSuccessPath() string {
+ if m != nil && m.SuccessPath != nil {
+ return *m.SuccessPath
+ }
+ return ""
+}
+
+func (m *CreateUploadURLRequest) GetMaxUploadSizeBytes() int64 {
+ if m != nil && m.MaxUploadSizeBytes != nil {
+ return *m.MaxUploadSizeBytes
+ }
+ return 0
+}
+
+func (m *CreateUploadURLRequest) GetMaxUploadSizePerBlobBytes() int64 {
+ if m != nil && m.MaxUploadSizePerBlobBytes != nil {
+ return *m.MaxUploadSizePerBlobBytes
+ }
+ return 0
+}
+
+func (m *CreateUploadURLRequest) GetGsBucketName() string {
+ if m != nil && m.GsBucketName != nil {
+ return *m.GsBucketName
+ }
+ return ""
+}
+
+func (m *CreateUploadURLRequest) GetUrlExpiryTimeSeconds() int32 {
+ if m != nil && m.UrlExpiryTimeSeconds != nil {
+ return *m.UrlExpiryTimeSeconds
+ }
+ return 0
+}
+
+type CreateUploadURLResponse struct {
+ Url *string `protobuf:"bytes,1,req,name=url" json:"url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateUploadURLResponse) Reset() { *m = CreateUploadURLResponse{} }
+func (m *CreateUploadURLResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateUploadURLResponse) ProtoMessage() {}
+
+func (m *CreateUploadURLResponse) GetUrl() string {
+ if m != nil && m.Url != nil {
+ return *m.Url
+ }
+ return ""
+}
+
+type DeleteBlobRequest struct {
+ BlobKey []string `protobuf:"bytes,1,rep,name=blob_key" json:"blob_key,omitempty"`
+ Token *string `protobuf:"bytes,2,opt,name=token" json:"token,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteBlobRequest) Reset() { *m = DeleteBlobRequest{} }
+func (m *DeleteBlobRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteBlobRequest) ProtoMessage() {}
+
+func (m *DeleteBlobRequest) GetBlobKey() []string {
+ if m != nil {
+ return m.BlobKey
+ }
+ return nil
+}
+
+func (m *DeleteBlobRequest) GetToken() string {
+ if m != nil && m.Token != nil {
+ return *m.Token
+ }
+ return ""
+}
+
+type FetchDataRequest struct {
+ BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"`
+ StartIndex *int64 `protobuf:"varint,2,req,name=start_index" json:"start_index,omitempty"`
+ EndIndex *int64 `protobuf:"varint,3,req,name=end_index" json:"end_index,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FetchDataRequest) Reset() { *m = FetchDataRequest{} }
+func (m *FetchDataRequest) String() string { return proto.CompactTextString(m) }
+func (*FetchDataRequest) ProtoMessage() {}
+
+func (m *FetchDataRequest) GetBlobKey() string {
+ if m != nil && m.BlobKey != nil {
+ return *m.BlobKey
+ }
+ return ""
+}
+
+func (m *FetchDataRequest) GetStartIndex() int64 {
+ if m != nil && m.StartIndex != nil {
+ return *m.StartIndex
+ }
+ return 0
+}
+
+func (m *FetchDataRequest) GetEndIndex() int64 {
+ if m != nil && m.EndIndex != nil {
+ return *m.EndIndex
+ }
+ return 0
+}
+
+type FetchDataResponse struct {
+ Data []byte `protobuf:"bytes,1000,req,name=data" json:"data,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FetchDataResponse) Reset() { *m = FetchDataResponse{} }
+func (m *FetchDataResponse) String() string { return proto.CompactTextString(m) }
+func (*FetchDataResponse) ProtoMessage() {}
+
+func (m *FetchDataResponse) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+type CloneBlobRequest struct {
+ BlobKey []byte `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"`
+ MimeType []byte `protobuf:"bytes,2,req,name=mime_type" json:"mime_type,omitempty"`
+ TargetAppId []byte `protobuf:"bytes,3,req,name=target_app_id" json:"target_app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CloneBlobRequest) Reset() { *m = CloneBlobRequest{} }
+func (m *CloneBlobRequest) String() string { return proto.CompactTextString(m) }
+func (*CloneBlobRequest) ProtoMessage() {}
+
+func (m *CloneBlobRequest) GetBlobKey() []byte {
+ if m != nil {
+ return m.BlobKey
+ }
+ return nil
+}
+
+func (m *CloneBlobRequest) GetMimeType() []byte {
+ if m != nil {
+ return m.MimeType
+ }
+ return nil
+}
+
+func (m *CloneBlobRequest) GetTargetAppId() []byte {
+ if m != nil {
+ return m.TargetAppId
+ }
+ return nil
+}
+
+type CloneBlobResponse struct {
+ BlobKey []byte `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CloneBlobResponse) Reset() { *m = CloneBlobResponse{} }
+func (m *CloneBlobResponse) String() string { return proto.CompactTextString(m) }
+func (*CloneBlobResponse) ProtoMessage() {}
+
+func (m *CloneBlobResponse) GetBlobKey() []byte {
+ if m != nil {
+ return m.BlobKey
+ }
+ return nil
+}
+
+type DecodeBlobKeyRequest struct {
+ BlobKey []string `protobuf:"bytes,1,rep,name=blob_key" json:"blob_key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DecodeBlobKeyRequest) Reset() { *m = DecodeBlobKeyRequest{} }
+func (m *DecodeBlobKeyRequest) String() string { return proto.CompactTextString(m) }
+func (*DecodeBlobKeyRequest) ProtoMessage() {}
+
+func (m *DecodeBlobKeyRequest) GetBlobKey() []string {
+ if m != nil {
+ return m.BlobKey
+ }
+ return nil
+}
+
+type DecodeBlobKeyResponse struct {
+ Decoded []string `protobuf:"bytes,1,rep,name=decoded" json:"decoded,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DecodeBlobKeyResponse) Reset() { *m = DecodeBlobKeyResponse{} }
+func (m *DecodeBlobKeyResponse) String() string { return proto.CompactTextString(m) }
+func (*DecodeBlobKeyResponse) ProtoMessage() {}
+
+func (m *DecodeBlobKeyResponse) GetDecoded() []string {
+ if m != nil {
+ return m.Decoded
+ }
+ return nil
+}
+
+type CreateEncodedGoogleStorageKeyRequest struct {
+ Filename *string `protobuf:"bytes,1,req,name=filename" json:"filename,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateEncodedGoogleStorageKeyRequest) Reset() { *m = CreateEncodedGoogleStorageKeyRequest{} }
+func (m *CreateEncodedGoogleStorageKeyRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateEncodedGoogleStorageKeyRequest) ProtoMessage() {}
+
+func (m *CreateEncodedGoogleStorageKeyRequest) GetFilename() string {
+ if m != nil && m.Filename != nil {
+ return *m.Filename
+ }
+ return ""
+}
+
+type CreateEncodedGoogleStorageKeyResponse struct {
+ BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateEncodedGoogleStorageKeyResponse) Reset() { *m = CreateEncodedGoogleStorageKeyResponse{} }
+func (m *CreateEncodedGoogleStorageKeyResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateEncodedGoogleStorageKeyResponse) ProtoMessage() {}
+
+func (m *CreateEncodedGoogleStorageKeyResponse) GetBlobKey() string {
+ if m != nil && m.BlobKey != nil {
+ return *m.BlobKey
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto b/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto
new file mode 100644
index 000000000..33b265032
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto
@@ -0,0 +1,71 @@
+syntax = "proto2";
+option go_package = "blobstore";
+
+package appengine;
+
+message BlobstoreServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INTERNAL_ERROR = 1;
+ URL_TOO_LONG = 2;
+ PERMISSION_DENIED = 3;
+ BLOB_NOT_FOUND = 4;
+ DATA_INDEX_OUT_OF_RANGE = 5;
+ BLOB_FETCH_SIZE_TOO_LARGE = 6;
+ ARGUMENT_OUT_OF_RANGE = 8;
+ INVALID_BLOB_KEY = 9;
+ }
+}
+
+message CreateUploadURLRequest {
+ required string success_path = 1;
+ optional int64 max_upload_size_bytes = 2;
+ optional int64 max_upload_size_per_blob_bytes = 3;
+ optional string gs_bucket_name = 4;
+ optional int32 url_expiry_time_seconds = 5;
+}
+
+message CreateUploadURLResponse {
+ required string url = 1;
+}
+
+message DeleteBlobRequest {
+ repeated string blob_key = 1;
+ optional string token = 2;
+}
+
+message FetchDataRequest {
+ required string blob_key = 1;
+ required int64 start_index = 2;
+ required int64 end_index = 3;
+}
+
+message FetchDataResponse {
+ required bytes data = 1000 [ctype = CORD];
+}
+
+message CloneBlobRequest {
+ required bytes blob_key = 1;
+ required bytes mime_type = 2;
+ required bytes target_app_id = 3;
+}
+
+message CloneBlobResponse {
+ required bytes blob_key = 1;
+}
+
+message DecodeBlobKeyRequest {
+ repeated string blob_key = 1;
+}
+
+message DecodeBlobKeyResponse {
+ repeated string decoded = 1;
+}
+
+message CreateEncodedGoogleStorageKeyRequest {
+ required string filename = 1;
+}
+
+message CreateEncodedGoogleStorageKeyResponse {
+ required string blob_key = 1;
+}
diff --git a/vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go b/vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go
new file mode 100644
index 000000000..173636400
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go
@@ -0,0 +1,125 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/capability/capability_service.proto
+// DO NOT EDIT!
+
+/*
+Package capability is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/capability/capability_service.proto
+
+It has these top-level messages:
+ IsEnabledRequest
+ IsEnabledResponse
+*/
+package capability
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type IsEnabledResponse_SummaryStatus int32
+
+const (
+ IsEnabledResponse_DEFAULT IsEnabledResponse_SummaryStatus = 0
+ IsEnabledResponse_ENABLED IsEnabledResponse_SummaryStatus = 1
+ IsEnabledResponse_SCHEDULED_FUTURE IsEnabledResponse_SummaryStatus = 2
+ IsEnabledResponse_SCHEDULED_NOW IsEnabledResponse_SummaryStatus = 3
+ IsEnabledResponse_DISABLED IsEnabledResponse_SummaryStatus = 4
+ IsEnabledResponse_UNKNOWN IsEnabledResponse_SummaryStatus = 5
+)
+
+var IsEnabledResponse_SummaryStatus_name = map[int32]string{
+ 0: "DEFAULT",
+ 1: "ENABLED",
+ 2: "SCHEDULED_FUTURE",
+ 3: "SCHEDULED_NOW",
+ 4: "DISABLED",
+ 5: "UNKNOWN",
+}
+var IsEnabledResponse_SummaryStatus_value = map[string]int32{
+ "DEFAULT": 0,
+ "ENABLED": 1,
+ "SCHEDULED_FUTURE": 2,
+ "SCHEDULED_NOW": 3,
+ "DISABLED": 4,
+ "UNKNOWN": 5,
+}
+
+func (x IsEnabledResponse_SummaryStatus) Enum() *IsEnabledResponse_SummaryStatus {
+ p := new(IsEnabledResponse_SummaryStatus)
+ *p = x
+ return p
+}
+func (x IsEnabledResponse_SummaryStatus) String() string {
+ return proto.EnumName(IsEnabledResponse_SummaryStatus_name, int32(x))
+}
+func (x *IsEnabledResponse_SummaryStatus) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IsEnabledResponse_SummaryStatus_value, data, "IsEnabledResponse_SummaryStatus")
+ if err != nil {
+ return err
+ }
+ *x = IsEnabledResponse_SummaryStatus(value)
+ return nil
+}
+
+type IsEnabledRequest struct {
+ Package *string `protobuf:"bytes,1,req,name=package" json:"package,omitempty"`
+ Capability []string `protobuf:"bytes,2,rep,name=capability" json:"capability,omitempty"`
+ Call []string `protobuf:"bytes,3,rep,name=call" json:"call,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IsEnabledRequest) Reset() { *m = IsEnabledRequest{} }
+func (m *IsEnabledRequest) String() string { return proto.CompactTextString(m) }
+func (*IsEnabledRequest) ProtoMessage() {}
+
+func (m *IsEnabledRequest) GetPackage() string {
+ if m != nil && m.Package != nil {
+ return *m.Package
+ }
+ return ""
+}
+
+func (m *IsEnabledRequest) GetCapability() []string {
+ if m != nil {
+ return m.Capability
+ }
+ return nil
+}
+
+func (m *IsEnabledRequest) GetCall() []string {
+ if m != nil {
+ return m.Call
+ }
+ return nil
+}
+
+type IsEnabledResponse struct {
+ SummaryStatus *IsEnabledResponse_SummaryStatus `protobuf:"varint,1,opt,name=summary_status,enum=appengine.IsEnabledResponse_SummaryStatus" json:"summary_status,omitempty"`
+ TimeUntilScheduled *int64 `protobuf:"varint,2,opt,name=time_until_scheduled" json:"time_until_scheduled,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IsEnabledResponse) Reset() { *m = IsEnabledResponse{} }
+func (m *IsEnabledResponse) String() string { return proto.CompactTextString(m) }
+func (*IsEnabledResponse) ProtoMessage() {}
+
+func (m *IsEnabledResponse) GetSummaryStatus() IsEnabledResponse_SummaryStatus {
+ if m != nil && m.SummaryStatus != nil {
+ return *m.SummaryStatus
+ }
+ return IsEnabledResponse_DEFAULT
+}
+
+func (m *IsEnabledResponse) GetTimeUntilScheduled() int64 {
+ if m != nil && m.TimeUntilScheduled != nil {
+ return *m.TimeUntilScheduled
+ }
+ return 0
+}
diff --git a/vendor/google.golang.org/appengine/internal/capability/capability_service.proto b/vendor/google.golang.org/appengine/internal/capability/capability_service.proto
new file mode 100644
index 000000000..5660ab6ee
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/capability/capability_service.proto
@@ -0,0 +1,28 @@
+syntax = "proto2";
+option go_package = "capability";
+
+package appengine;
+
+message IsEnabledRequest {
+ required string package = 1;
+ repeated string capability = 2;
+ repeated string call = 3;
+}
+
+message IsEnabledResponse {
+ enum SummaryStatus {
+ DEFAULT = 0;
+ ENABLED = 1;
+ SCHEDULED_FUTURE = 2;
+ SCHEDULED_NOW = 3;
+ DISABLED = 4;
+ UNKNOWN = 5;
+ }
+ optional SummaryStatus summary_status = 1;
+
+ optional int64 time_until_scheduled = 2;
+}
+
+service CapabilityService {
+ rpc IsEnabled(IsEnabledRequest) returns (IsEnabledResponse) {};
+}
diff --git a/vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go b/vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go
new file mode 100644
index 000000000..7b8d00c98
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go
@@ -0,0 +1,154 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/channel/channel_service.proto
+// DO NOT EDIT!
+
+/*
+Package channel is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/channel/channel_service.proto
+
+It has these top-level messages:
+ ChannelServiceError
+ CreateChannelRequest
+ CreateChannelResponse
+ SendMessageRequest
+*/
+package channel
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type ChannelServiceError_ErrorCode int32
+
+const (
+ ChannelServiceError_OK ChannelServiceError_ErrorCode = 0
+ ChannelServiceError_INTERNAL_ERROR ChannelServiceError_ErrorCode = 1
+ ChannelServiceError_INVALID_CHANNEL_KEY ChannelServiceError_ErrorCode = 2
+ ChannelServiceError_BAD_MESSAGE ChannelServiceError_ErrorCode = 3
+ ChannelServiceError_INVALID_CHANNEL_TOKEN_DURATION ChannelServiceError_ErrorCode = 4
+ ChannelServiceError_APPID_ALIAS_REQUIRED ChannelServiceError_ErrorCode = 5
+)
+
+var ChannelServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INTERNAL_ERROR",
+ 2: "INVALID_CHANNEL_KEY",
+ 3: "BAD_MESSAGE",
+ 4: "INVALID_CHANNEL_TOKEN_DURATION",
+ 5: "APPID_ALIAS_REQUIRED",
+}
+var ChannelServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INTERNAL_ERROR": 1,
+ "INVALID_CHANNEL_KEY": 2,
+ "BAD_MESSAGE": 3,
+ "INVALID_CHANNEL_TOKEN_DURATION": 4,
+ "APPID_ALIAS_REQUIRED": 5,
+}
+
+func (x ChannelServiceError_ErrorCode) Enum() *ChannelServiceError_ErrorCode {
+ p := new(ChannelServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x ChannelServiceError_ErrorCode) String() string {
+ return proto.EnumName(ChannelServiceError_ErrorCode_name, int32(x))
+}
+func (x *ChannelServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ChannelServiceError_ErrorCode_value, data, "ChannelServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = ChannelServiceError_ErrorCode(value)
+ return nil
+}
+
+type ChannelServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ChannelServiceError) Reset() { *m = ChannelServiceError{} }
+func (m *ChannelServiceError) String() string { return proto.CompactTextString(m) }
+func (*ChannelServiceError) ProtoMessage() {}
+
+type CreateChannelRequest struct {
+ ApplicationKey *string `protobuf:"bytes,1,req,name=application_key" json:"application_key,omitempty"`
+ DurationMinutes *int32 `protobuf:"varint,2,opt,name=duration_minutes" json:"duration_minutes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateChannelRequest) Reset() { *m = CreateChannelRequest{} }
+func (m *CreateChannelRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateChannelRequest) ProtoMessage() {}
+
+func (m *CreateChannelRequest) GetApplicationKey() string {
+ if m != nil && m.ApplicationKey != nil {
+ return *m.ApplicationKey
+ }
+ return ""
+}
+
+func (m *CreateChannelRequest) GetDurationMinutes() int32 {
+ if m != nil && m.DurationMinutes != nil {
+ return *m.DurationMinutes
+ }
+ return 0
+}
+
+type CreateChannelResponse struct {
+ Token *string `protobuf:"bytes,2,opt,name=token" json:"token,omitempty"`
+ DurationMinutes *int32 `protobuf:"varint,3,opt,name=duration_minutes" json:"duration_minutes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateChannelResponse) Reset() { *m = CreateChannelResponse{} }
+func (m *CreateChannelResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateChannelResponse) ProtoMessage() {}
+
+func (m *CreateChannelResponse) GetToken() string {
+ if m != nil && m.Token != nil {
+ return *m.Token
+ }
+ return ""
+}
+
+func (m *CreateChannelResponse) GetDurationMinutes() int32 {
+ if m != nil && m.DurationMinutes != nil {
+ return *m.DurationMinutes
+ }
+ return 0
+}
+
+type SendMessageRequest struct {
+ ApplicationKey *string `protobuf:"bytes,1,req,name=application_key" json:"application_key,omitempty"`
+ Message *string `protobuf:"bytes,2,req,name=message" json:"message,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SendMessageRequest) Reset() { *m = SendMessageRequest{} }
+func (m *SendMessageRequest) String() string { return proto.CompactTextString(m) }
+func (*SendMessageRequest) ProtoMessage() {}
+
+func (m *SendMessageRequest) GetApplicationKey() string {
+ if m != nil && m.ApplicationKey != nil {
+ return *m.ApplicationKey
+ }
+ return ""
+}
+
+func (m *SendMessageRequest) GetMessage() string {
+ if m != nil && m.Message != nil {
+ return *m.Message
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/channel/channel_service.proto b/vendor/google.golang.org/appengine/internal/channel/channel_service.proto
new file mode 100644
index 000000000..2b5a918ca
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/channel/channel_service.proto
@@ -0,0 +1,30 @@
+syntax = "proto2";
+option go_package = "channel";
+
+package appengine;
+
+message ChannelServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INTERNAL_ERROR = 1;
+ INVALID_CHANNEL_KEY = 2;
+ BAD_MESSAGE = 3;
+ INVALID_CHANNEL_TOKEN_DURATION = 4;
+ APPID_ALIAS_REQUIRED = 5;
+ }
+}
+
+message CreateChannelRequest {
+ required string application_key = 1;
+ optional int32 duration_minutes = 2;
+}
+
+message CreateChannelResponse {
+ optional string token = 2;
+ optional int32 duration_minutes = 3;
+}
+
+message SendMessageRequest {
+ required string application_key = 1;
+ required string message = 2;
+}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
new file mode 100644
index 000000000..8613cb731
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
@@ -0,0 +1,2778 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto
+// DO NOT EDIT!
+
+/*
+Package datastore is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/datastore/datastore_v3.proto
+
+It has these top-level messages:
+ Action
+ PropertyValue
+ Property
+ Path
+ Reference
+ User
+ EntityProto
+ CompositeProperty
+ Index
+ CompositeIndex
+ IndexPostfix
+ IndexPosition
+ Snapshot
+ InternalHeader
+ Transaction
+ Query
+ CompiledQuery
+ CompiledCursor
+ Cursor
+ Error
+ Cost
+ GetRequest
+ GetResponse
+ PutRequest
+ PutResponse
+ TouchRequest
+ TouchResponse
+ DeleteRequest
+ DeleteResponse
+ NextRequest
+ QueryResult
+ AllocateIdsRequest
+ AllocateIdsResponse
+ CompositeIndices
+ AddActionsRequest
+ AddActionsResponse
+ BeginTransactionRequest
+ CommitResponse
+*/
+package datastore
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type Property_Meaning int32
+
+const (
+ Property_NO_MEANING Property_Meaning = 0
+ Property_BLOB Property_Meaning = 14
+ Property_TEXT Property_Meaning = 15
+ Property_BYTESTRING Property_Meaning = 16
+ Property_ATOM_CATEGORY Property_Meaning = 1
+ Property_ATOM_LINK Property_Meaning = 2
+ Property_ATOM_TITLE Property_Meaning = 3
+ Property_ATOM_CONTENT Property_Meaning = 4
+ Property_ATOM_SUMMARY Property_Meaning = 5
+ Property_ATOM_AUTHOR Property_Meaning = 6
+ Property_GD_WHEN Property_Meaning = 7
+ Property_GD_EMAIL Property_Meaning = 8
+ Property_GEORSS_POINT Property_Meaning = 9
+ Property_GD_IM Property_Meaning = 10
+ Property_GD_PHONENUMBER Property_Meaning = 11
+ Property_GD_POSTALADDRESS Property_Meaning = 12
+ Property_GD_RATING Property_Meaning = 13
+ Property_BLOBKEY Property_Meaning = 17
+ Property_ENTITY_PROTO Property_Meaning = 19
+ Property_INDEX_VALUE Property_Meaning = 18
+)
+
+var Property_Meaning_name = map[int32]string{
+ 0: "NO_MEANING",
+ 14: "BLOB",
+ 15: "TEXT",
+ 16: "BYTESTRING",
+ 1: "ATOM_CATEGORY",
+ 2: "ATOM_LINK",
+ 3: "ATOM_TITLE",
+ 4: "ATOM_CONTENT",
+ 5: "ATOM_SUMMARY",
+ 6: "ATOM_AUTHOR",
+ 7: "GD_WHEN",
+ 8: "GD_EMAIL",
+ 9: "GEORSS_POINT",
+ 10: "GD_IM",
+ 11: "GD_PHONENUMBER",
+ 12: "GD_POSTALADDRESS",
+ 13: "GD_RATING",
+ 17: "BLOBKEY",
+ 19: "ENTITY_PROTO",
+ 18: "INDEX_VALUE",
+}
+var Property_Meaning_value = map[string]int32{
+ "NO_MEANING": 0,
+ "BLOB": 14,
+ "TEXT": 15,
+ "BYTESTRING": 16,
+ "ATOM_CATEGORY": 1,
+ "ATOM_LINK": 2,
+ "ATOM_TITLE": 3,
+ "ATOM_CONTENT": 4,
+ "ATOM_SUMMARY": 5,
+ "ATOM_AUTHOR": 6,
+ "GD_WHEN": 7,
+ "GD_EMAIL": 8,
+ "GEORSS_POINT": 9,
+ "GD_IM": 10,
+ "GD_PHONENUMBER": 11,
+ "GD_POSTALADDRESS": 12,
+ "GD_RATING": 13,
+ "BLOBKEY": 17,
+ "ENTITY_PROTO": 19,
+ "INDEX_VALUE": 18,
+}
+
+func (x Property_Meaning) Enum() *Property_Meaning {
+ p := new(Property_Meaning)
+ *p = x
+ return p
+}
+func (x Property_Meaning) String() string {
+ return proto.EnumName(Property_Meaning_name, int32(x))
+}
+func (x *Property_Meaning) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning")
+ if err != nil {
+ return err
+ }
+ *x = Property_Meaning(value)
+ return nil
+}
+
+type Property_FtsTokenizationOption int32
+
+const (
+ Property_HTML Property_FtsTokenizationOption = 1
+ Property_ATOM Property_FtsTokenizationOption = 2
+)
+
+var Property_FtsTokenizationOption_name = map[int32]string{
+ 1: "HTML",
+ 2: "ATOM",
+}
+var Property_FtsTokenizationOption_value = map[string]int32{
+ "HTML": 1,
+ "ATOM": 2,
+}
+
+func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption {
+ p := new(Property_FtsTokenizationOption)
+ *p = x
+ return p
+}
+func (x Property_FtsTokenizationOption) String() string {
+ return proto.EnumName(Property_FtsTokenizationOption_name, int32(x))
+}
+func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption")
+ if err != nil {
+ return err
+ }
+ *x = Property_FtsTokenizationOption(value)
+ return nil
+}
+
+type EntityProto_Kind int32
+
+const (
+ EntityProto_GD_CONTACT EntityProto_Kind = 1
+ EntityProto_GD_EVENT EntityProto_Kind = 2
+ EntityProto_GD_MESSAGE EntityProto_Kind = 3
+)
+
+var EntityProto_Kind_name = map[int32]string{
+ 1: "GD_CONTACT",
+ 2: "GD_EVENT",
+ 3: "GD_MESSAGE",
+}
+var EntityProto_Kind_value = map[string]int32{
+ "GD_CONTACT": 1,
+ "GD_EVENT": 2,
+ "GD_MESSAGE": 3,
+}
+
+func (x EntityProto_Kind) Enum() *EntityProto_Kind {
+ p := new(EntityProto_Kind)
+ *p = x
+ return p
+}
+func (x EntityProto_Kind) String() string {
+ return proto.EnumName(EntityProto_Kind_name, int32(x))
+}
+func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind")
+ if err != nil {
+ return err
+ }
+ *x = EntityProto_Kind(value)
+ return nil
+}
+
+type Index_Property_Direction int32
+
+const (
+ Index_Property_ASCENDING Index_Property_Direction = 1
+ Index_Property_DESCENDING Index_Property_Direction = 2
+)
+
+var Index_Property_Direction_name = map[int32]string{
+ 1: "ASCENDING",
+ 2: "DESCENDING",
+}
+var Index_Property_Direction_value = map[string]int32{
+ "ASCENDING": 1,
+ "DESCENDING": 2,
+}
+
+func (x Index_Property_Direction) Enum() *Index_Property_Direction {
+ p := new(Index_Property_Direction)
+ *p = x
+ return p
+}
+func (x Index_Property_Direction) String() string {
+ return proto.EnumName(Index_Property_Direction_name, int32(x))
+}
+func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction")
+ if err != nil {
+ return err
+ }
+ *x = Index_Property_Direction(value)
+ return nil
+}
+
+type CompositeIndex_State int32
+
+const (
+ CompositeIndex_WRITE_ONLY CompositeIndex_State = 1
+ CompositeIndex_READ_WRITE CompositeIndex_State = 2
+ CompositeIndex_DELETED CompositeIndex_State = 3
+ CompositeIndex_ERROR CompositeIndex_State = 4
+)
+
+var CompositeIndex_State_name = map[int32]string{
+ 1: "WRITE_ONLY",
+ 2: "READ_WRITE",
+ 3: "DELETED",
+ 4: "ERROR",
+}
+var CompositeIndex_State_value = map[string]int32{
+ "WRITE_ONLY": 1,
+ "READ_WRITE": 2,
+ "DELETED": 3,
+ "ERROR": 4,
+}
+
+func (x CompositeIndex_State) Enum() *CompositeIndex_State {
+ p := new(CompositeIndex_State)
+ *p = x
+ return p
+}
+func (x CompositeIndex_State) String() string {
+ return proto.EnumName(CompositeIndex_State_name, int32(x))
+}
+func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State")
+ if err != nil {
+ return err
+ }
+ *x = CompositeIndex_State(value)
+ return nil
+}
+
+type Snapshot_Status int32
+
+const (
+ Snapshot_INACTIVE Snapshot_Status = 0
+ Snapshot_ACTIVE Snapshot_Status = 1
+)
+
+var Snapshot_Status_name = map[int32]string{
+ 0: "INACTIVE",
+ 1: "ACTIVE",
+}
+var Snapshot_Status_value = map[string]int32{
+ "INACTIVE": 0,
+ "ACTIVE": 1,
+}
+
+func (x Snapshot_Status) Enum() *Snapshot_Status {
+ p := new(Snapshot_Status)
+ *p = x
+ return p
+}
+func (x Snapshot_Status) String() string {
+ return proto.EnumName(Snapshot_Status_name, int32(x))
+}
+func (x *Snapshot_Status) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status")
+ if err != nil {
+ return err
+ }
+ *x = Snapshot_Status(value)
+ return nil
+}
+
+type Query_Hint int32
+
+const (
+ Query_ORDER_FIRST Query_Hint = 1
+ Query_ANCESTOR_FIRST Query_Hint = 2
+ Query_FILTER_FIRST Query_Hint = 3
+)
+
+var Query_Hint_name = map[int32]string{
+ 1: "ORDER_FIRST",
+ 2: "ANCESTOR_FIRST",
+ 3: "FILTER_FIRST",
+}
+var Query_Hint_value = map[string]int32{
+ "ORDER_FIRST": 1,
+ "ANCESTOR_FIRST": 2,
+ "FILTER_FIRST": 3,
+}
+
+func (x Query_Hint) Enum() *Query_Hint {
+ p := new(Query_Hint)
+ *p = x
+ return p
+}
+func (x Query_Hint) String() string {
+ return proto.EnumName(Query_Hint_name, int32(x))
+}
+func (x *Query_Hint) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint")
+ if err != nil {
+ return err
+ }
+ *x = Query_Hint(value)
+ return nil
+}
+
+type Query_Filter_Operator int32
+
+const (
+ Query_Filter_LESS_THAN Query_Filter_Operator = 1
+ Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2
+ Query_Filter_GREATER_THAN Query_Filter_Operator = 3
+ Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4
+ Query_Filter_EQUAL Query_Filter_Operator = 5
+ Query_Filter_IN Query_Filter_Operator = 6
+ Query_Filter_EXISTS Query_Filter_Operator = 7
+)
+
+var Query_Filter_Operator_name = map[int32]string{
+ 1: "LESS_THAN",
+ 2: "LESS_THAN_OR_EQUAL",
+ 3: "GREATER_THAN",
+ 4: "GREATER_THAN_OR_EQUAL",
+ 5: "EQUAL",
+ 6: "IN",
+ 7: "EXISTS",
+}
+var Query_Filter_Operator_value = map[string]int32{
+ "LESS_THAN": 1,
+ "LESS_THAN_OR_EQUAL": 2,
+ "GREATER_THAN": 3,
+ "GREATER_THAN_OR_EQUAL": 4,
+ "EQUAL": 5,
+ "IN": 6,
+ "EXISTS": 7,
+}
+
+func (x Query_Filter_Operator) Enum() *Query_Filter_Operator {
+ p := new(Query_Filter_Operator)
+ *p = x
+ return p
+}
+func (x Query_Filter_Operator) String() string {
+ return proto.EnumName(Query_Filter_Operator_name, int32(x))
+}
+func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator")
+ if err != nil {
+ return err
+ }
+ *x = Query_Filter_Operator(value)
+ return nil
+}
+
+type Query_Order_Direction int32
+
+const (
+ Query_Order_ASCENDING Query_Order_Direction = 1
+ Query_Order_DESCENDING Query_Order_Direction = 2
+)
+
+var Query_Order_Direction_name = map[int32]string{
+ 1: "ASCENDING",
+ 2: "DESCENDING",
+}
+var Query_Order_Direction_value = map[string]int32{
+ "ASCENDING": 1,
+ "DESCENDING": 2,
+}
+
+func (x Query_Order_Direction) Enum() *Query_Order_Direction {
+ p := new(Query_Order_Direction)
+ *p = x
+ return p
+}
+func (x Query_Order_Direction) String() string {
+ return proto.EnumName(Query_Order_Direction_name, int32(x))
+}
+func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction")
+ if err != nil {
+ return err
+ }
+ *x = Query_Order_Direction(value)
+ return nil
+}
+
+type Error_ErrorCode int32
+
+const (
+ Error_BAD_REQUEST Error_ErrorCode = 1
+ Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2
+ Error_INTERNAL_ERROR Error_ErrorCode = 3
+ Error_NEED_INDEX Error_ErrorCode = 4
+ Error_TIMEOUT Error_ErrorCode = 5
+ Error_PERMISSION_DENIED Error_ErrorCode = 6
+ Error_BIGTABLE_ERROR Error_ErrorCode = 7
+ Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8
+ Error_CAPABILITY_DISABLED Error_ErrorCode = 9
+ Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10
+ Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11
+)
+
+var Error_ErrorCode_name = map[int32]string{
+ 1: "BAD_REQUEST",
+ 2: "CONCURRENT_TRANSACTION",
+ 3: "INTERNAL_ERROR",
+ 4: "NEED_INDEX",
+ 5: "TIMEOUT",
+ 6: "PERMISSION_DENIED",
+ 7: "BIGTABLE_ERROR",
+ 8: "COMMITTED_BUT_STILL_APPLYING",
+ 9: "CAPABILITY_DISABLED",
+ 10: "TRY_ALTERNATE_BACKEND",
+ 11: "SAFE_TIME_TOO_OLD",
+}
+var Error_ErrorCode_value = map[string]int32{
+ "BAD_REQUEST": 1,
+ "CONCURRENT_TRANSACTION": 2,
+ "INTERNAL_ERROR": 3,
+ "NEED_INDEX": 4,
+ "TIMEOUT": 5,
+ "PERMISSION_DENIED": 6,
+ "BIGTABLE_ERROR": 7,
+ "COMMITTED_BUT_STILL_APPLYING": 8,
+ "CAPABILITY_DISABLED": 9,
+ "TRY_ALTERNATE_BACKEND": 10,
+ "SAFE_TIME_TOO_OLD": 11,
+}
+
+func (x Error_ErrorCode) Enum() *Error_ErrorCode {
+ p := new(Error_ErrorCode)
+ *p = x
+ return p
+}
+func (x Error_ErrorCode) String() string {
+ return proto.EnumName(Error_ErrorCode_name, int32(x))
+}
+func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = Error_ErrorCode(value)
+ return nil
+}
+
+type PutRequest_AutoIdPolicy int32
+
+const (
+ PutRequest_CURRENT PutRequest_AutoIdPolicy = 0
+ PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1
+)
+
+var PutRequest_AutoIdPolicy_name = map[int32]string{
+ 0: "CURRENT",
+ 1: "SEQUENTIAL",
+}
+var PutRequest_AutoIdPolicy_value = map[string]int32{
+ "CURRENT": 0,
+ "SEQUENTIAL": 1,
+}
+
+func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy {
+ p := new(PutRequest_AutoIdPolicy)
+ *p = x
+ return p
+}
+func (x PutRequest_AutoIdPolicy) String() string {
+ return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x))
+}
+func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy")
+ if err != nil {
+ return err
+ }
+ *x = PutRequest_AutoIdPolicy(value)
+ return nil
+}
+
+type Action struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Action) Reset() { *m = Action{} }
+func (m *Action) String() string { return proto.CompactTextString(m) }
+func (*Action) ProtoMessage() {}
+
+type PropertyValue struct {
+ Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"`
+ BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"`
+ StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"`
+ DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"`
+ Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue" json:"pointvalue,omitempty"`
+ Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue" json:"uservalue,omitempty"`
+ Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue" json:"referencevalue,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue) Reset() { *m = PropertyValue{} }
+func (m *PropertyValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue) ProtoMessage() {}
+
+func (m *PropertyValue) GetInt64Value() int64 {
+ if m != nil && m.Int64Value != nil {
+ return *m.Int64Value
+ }
+ return 0
+}
+
+func (m *PropertyValue) GetBooleanValue() bool {
+ if m != nil && m.BooleanValue != nil {
+ return *m.BooleanValue
+ }
+ return false
+}
+
+func (m *PropertyValue) GetStringValue() string {
+ if m != nil && m.StringValue != nil {
+ return *m.StringValue
+ }
+ return ""
+}
+
+func (m *PropertyValue) GetDoubleValue() float64 {
+ if m != nil && m.DoubleValue != nil {
+ return *m.DoubleValue
+ }
+ return 0
+}
+
+func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue {
+ if m != nil {
+ return m.Pointvalue
+ }
+ return nil
+}
+
+func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue {
+ if m != nil {
+ return m.Uservalue
+ }
+ return nil
+}
+
+func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue {
+ if m != nil {
+ return m.Referencevalue
+ }
+ return nil
+}
+
+type PropertyValue_PointValue struct {
+ X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"`
+ Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} }
+func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_PointValue) ProtoMessage() {}
+
+func (m *PropertyValue_PointValue) GetX() float64 {
+ if m != nil && m.X != nil {
+ return *m.X
+ }
+ return 0
+}
+
+func (m *PropertyValue_PointValue) GetY() float64 {
+ if m != nil && m.Y != nil {
+ return *m.Y
+ }
+ return 0
+}
+
+type PropertyValue_UserValue struct {
+ Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"`
+ AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain" json:"auth_domain,omitempty"`
+ Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity" json:"federated_identity,omitempty"`
+ FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider" json:"federated_provider,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} }
+func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_UserValue) ProtoMessage() {}
+
+func (m *PropertyValue_UserValue) GetEmail() string {
+ if m != nil && m.Email != nil {
+ return *m.Email
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetFederatedIdentity() string {
+ if m != nil && m.FederatedIdentity != nil {
+ return *m.FederatedIdentity
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetFederatedProvider() string {
+ if m != nil && m.FederatedProvider != nil {
+ return *m.FederatedProvider
+ }
+ return ""
+}
+
+type PropertyValue_ReferenceValue struct {
+ App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"`
+ Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement" json:"pathelement,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} }
+func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_ReferenceValue) ProtoMessage() {}
+
+func (m *PropertyValue_ReferenceValue) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement {
+ if m != nil {
+ return m.Pathelement
+ }
+ return nil
+}
+
+type PropertyValue_ReferenceValue_PathElement struct {
+ Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"`
+ Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"`
+ Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) Reset() {
+ *m = PropertyValue_ReferenceValue_PathElement{}
+}
+func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+type Property struct {
+ Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"`
+ MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri" json:"meaning_uri,omitempty"`
+ Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"`
+ Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"`
+ Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"`
+ FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"`
+ Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Property) Reset() { *m = Property{} }
+func (m *Property) String() string { return proto.CompactTextString(m) }
+func (*Property) ProtoMessage() {}
+
+const Default_Property_Meaning Property_Meaning = Property_NO_MEANING
+const Default_Property_Searchable bool = false
+const Default_Property_Locale string = "en"
+
+func (m *Property) GetMeaning() Property_Meaning {
+ if m != nil && m.Meaning != nil {
+ return *m.Meaning
+ }
+ return Default_Property_Meaning
+}
+
+func (m *Property) GetMeaningUri() string {
+ if m != nil && m.MeaningUri != nil {
+ return *m.MeaningUri
+ }
+ return ""
+}
+
+func (m *Property) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Property) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *Property) GetMultiple() bool {
+ if m != nil && m.Multiple != nil {
+ return *m.Multiple
+ }
+ return false
+}
+
+func (m *Property) GetSearchable() bool {
+ if m != nil && m.Searchable != nil {
+ return *m.Searchable
+ }
+ return Default_Property_Searchable
+}
+
+func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption {
+ if m != nil && m.FtsTokenizationOption != nil {
+ return *m.FtsTokenizationOption
+ }
+ return Property_HTML
+}
+
+func (m *Property) GetLocale() string {
+ if m != nil && m.Locale != nil {
+ return *m.Locale
+ }
+ return Default_Property_Locale
+}
+
+type Path struct {
+ Element []*Path_Element `protobuf:"group,1,rep,name=Element" json:"element,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Path) Reset() { *m = Path{} }
+func (m *Path) String() string { return proto.CompactTextString(m) }
+func (*Path) ProtoMessage() {}
+
+func (m *Path) GetElement() []*Path_Element {
+ if m != nil {
+ return m.Element
+ }
+ return nil
+}
+
+type Path_Element struct {
+ Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"`
+ Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"`
+ Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Path_Element) Reset() { *m = Path_Element{} }
+func (m *Path_Element) String() string { return proto.CompactTextString(m) }
+func (*Path_Element) ProtoMessage() {}
+
+func (m *Path_Element) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+func (m *Path_Element) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *Path_Element) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+type Reference struct {
+ App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"`
+ Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Reference) Reset() { *m = Reference{} }
+func (m *Reference) String() string { return proto.CompactTextString(m) }
+func (*Reference) ProtoMessage() {}
+
+func (m *Reference) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Reference) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *Reference) GetPath() *Path {
+ if m != nil {
+ return m.Path
+ }
+ return nil
+}
+
+type User struct {
+ Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
+ AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain" json:"auth_domain,omitempty"`
+ Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity" json:"federated_identity,omitempty"`
+ FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider" json:"federated_provider,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *User) Reset() { *m = User{} }
+func (m *User) String() string { return proto.CompactTextString(m) }
+func (*User) ProtoMessage() {}
+
+func (m *User) GetEmail() string {
+ if m != nil && m.Email != nil {
+ return *m.Email
+ }
+ return ""
+}
+
+func (m *User) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *User) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *User) GetFederatedIdentity() string {
+ if m != nil && m.FederatedIdentity != nil {
+ return *m.FederatedIdentity
+ }
+ return ""
+}
+
+func (m *User) GetFederatedProvider() string {
+ if m != nil && m.FederatedProvider != nil {
+ return *m.FederatedProvider
+ }
+ return ""
+}
+
+type EntityProto struct {
+ Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"`
+ EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group" json:"entity_group,omitempty"`
+ Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"`
+ Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"`
+ KindUri *string `protobuf:"bytes,5,opt,name=kind_uri" json:"kind_uri,omitempty"`
+ Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
+ RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property" json:"raw_property,omitempty"`
+ Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EntityProto) Reset() { *m = EntityProto{} }
+func (m *EntityProto) String() string { return proto.CompactTextString(m) }
+func (*EntityProto) ProtoMessage() {}
+
+func (m *EntityProto) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *EntityProto) GetEntityGroup() *Path {
+ if m != nil {
+ return m.EntityGroup
+ }
+ return nil
+}
+
+func (m *EntityProto) GetOwner() *User {
+ if m != nil {
+ return m.Owner
+ }
+ return nil
+}
+
+func (m *EntityProto) GetKind() EntityProto_Kind {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return EntityProto_GD_CONTACT
+}
+
+func (m *EntityProto) GetKindUri() string {
+ if m != nil && m.KindUri != nil {
+ return *m.KindUri
+ }
+ return ""
+}
+
+func (m *EntityProto) GetProperty() []*Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+func (m *EntityProto) GetRawProperty() []*Property {
+ if m != nil {
+ return m.RawProperty
+ }
+ return nil
+}
+
+func (m *EntityProto) GetRank() int32 {
+ if m != nil && m.Rank != nil {
+ return *m.Rank
+ }
+ return 0
+}
+
+type CompositeProperty struct {
+ IndexId *int64 `protobuf:"varint,1,req,name=index_id" json:"index_id,omitempty"`
+ Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeProperty) Reset() { *m = CompositeProperty{} }
+func (m *CompositeProperty) String() string { return proto.CompactTextString(m) }
+func (*CompositeProperty) ProtoMessage() {}
+
+func (m *CompositeProperty) GetIndexId() int64 {
+ if m != nil && m.IndexId != nil {
+ return *m.IndexId
+ }
+ return 0
+}
+
+func (m *CompositeProperty) GetValue() []string {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type Index struct {
+ EntityType *string `protobuf:"bytes,1,req,name=entity_type" json:"entity_type,omitempty"`
+ Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"`
+ Property []*Index_Property `protobuf:"group,2,rep,name=Property" json:"property,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Index) Reset() { *m = Index{} }
+func (m *Index) String() string { return proto.CompactTextString(m) }
+func (*Index) ProtoMessage() {}
+
+func (m *Index) GetEntityType() string {
+ if m != nil && m.EntityType != nil {
+ return *m.EntityType
+ }
+ return ""
+}
+
+func (m *Index) GetAncestor() bool {
+ if m != nil && m.Ancestor != nil {
+ return *m.Ancestor
+ }
+ return false
+}
+
+func (m *Index) GetProperty() []*Index_Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+type Index_Property struct {
+ Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+ Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Index_Property) Reset() { *m = Index_Property{} }
+func (m *Index_Property) String() string { return proto.CompactTextString(m) }
+func (*Index_Property) ProtoMessage() {}
+
+const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING
+
+func (m *Index_Property) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Index_Property) GetDirection() Index_Property_Direction {
+ if m != nil && m.Direction != nil {
+ return *m.Direction
+ }
+ return Default_Index_Property_Direction
+}
+
+type CompositeIndex struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
+ Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"`
+ State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"`
+ OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,def=0" json:"only_use_if_required,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeIndex) Reset() { *m = CompositeIndex{} }
+func (m *CompositeIndex) String() string { return proto.CompactTextString(m) }
+func (*CompositeIndex) ProtoMessage() {}
+
+const Default_CompositeIndex_OnlyUseIfRequired bool = false
+
+func (m *CompositeIndex) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *CompositeIndex) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *CompositeIndex) GetDefinition() *Index {
+ if m != nil {
+ return m.Definition
+ }
+ return nil
+}
+
+func (m *CompositeIndex) GetState() CompositeIndex_State {
+ if m != nil && m.State != nil {
+ return *m.State
+ }
+ return CompositeIndex_WRITE_ONLY
+}
+
+func (m *CompositeIndex) GetOnlyUseIfRequired() bool {
+ if m != nil && m.OnlyUseIfRequired != nil {
+ return *m.OnlyUseIfRequired
+ }
+ return Default_CompositeIndex_OnlyUseIfRequired
+}
+
+type IndexPostfix struct {
+ IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value" json:"index_value,omitempty"`
+ Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"`
+ Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexPostfix) Reset() { *m = IndexPostfix{} }
+func (m *IndexPostfix) String() string { return proto.CompactTextString(m) }
+func (*IndexPostfix) ProtoMessage() {}
+
+const Default_IndexPostfix_Before bool = true
+
+func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue {
+ if m != nil {
+ return m.IndexValue
+ }
+ return nil
+}
+
+func (m *IndexPostfix) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *IndexPostfix) GetBefore() bool {
+ if m != nil && m.Before != nil {
+ return *m.Before
+ }
+ return Default_IndexPostfix_Before
+}
+
+type IndexPostfix_IndexValue struct {
+ PropertyName *string `protobuf:"bytes,1,req,name=property_name" json:"property_name,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} }
+func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) }
+func (*IndexPostfix_IndexValue) ProtoMessage() {}
+
+func (m *IndexPostfix_IndexValue) GetPropertyName() string {
+ if m != nil && m.PropertyName != nil {
+ return *m.PropertyName
+ }
+ return ""
+}
+
+func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type IndexPosition struct {
+ Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
+ Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexPosition) Reset() { *m = IndexPosition{} }
+func (m *IndexPosition) String() string { return proto.CompactTextString(m) }
+func (*IndexPosition) ProtoMessage() {}
+
+const Default_IndexPosition_Before bool = true
+
+func (m *IndexPosition) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *IndexPosition) GetBefore() bool {
+ if m != nil && m.Before != nil {
+ return *m.Before
+ }
+ return Default_IndexPosition_Before
+}
+
+type Snapshot struct {
+ Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+
+func (m *Snapshot) GetTs() int64 {
+ if m != nil && m.Ts != nil {
+ return *m.Ts
+ }
+ return 0
+}
+
+type InternalHeader struct {
+ Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *InternalHeader) Reset() { *m = InternalHeader{} }
+func (m *InternalHeader) String() string { return proto.CompactTextString(m) }
+func (*InternalHeader) ProtoMessage() {}
+
+func (m *InternalHeader) GetQos() string {
+ if m != nil && m.Qos != nil {
+ return *m.Qos
+ }
+ return ""
+}
+
+type Transaction struct {
+ Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
+ Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"`
+ App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"`
+ MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Transaction) Reset() { *m = Transaction{} }
+func (m *Transaction) String() string { return proto.CompactTextString(m) }
+func (*Transaction) ProtoMessage() {}
+
+const Default_Transaction_MarkChanges bool = false
+
+func (m *Transaction) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *Transaction) GetHandle() uint64 {
+ if m != nil && m.Handle != nil {
+ return *m.Handle
+ }
+ return 0
+}
+
+func (m *Transaction) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Transaction) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_Transaction_MarkChanges
+}
+
+type Query struct {
+ Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"`
+ App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,29,opt,name=name_space" json:"name_space,omitempty"`
+ Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"`
+ Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"`
+ Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter" json:"filter,omitempty"`
+ SearchQuery *string `protobuf:"bytes,8,opt,name=search_query" json:"search_query,omitempty"`
+ Order []*Query_Order `protobuf:"group,9,rep,name=Order" json:"order,omitempty"`
+ Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"`
+ Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"`
+ Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"`
+ Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"`
+ CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"`
+ EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor" json:"end_compiled_cursor,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index" json:"composite_index,omitempty"`
+ RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,def=0" json:"require_perfect_plan,omitempty"`
+ KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,def=0" json:"keys_only,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"`
+ Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"`
+ FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms" json:"failover_ms,omitempty"`
+ Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"`
+ PropertyName []string `protobuf:"bytes,33,rep,name=property_name" json:"property_name,omitempty"`
+ GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name" json:"group_by_property_name,omitempty"`
+ Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"`
+ MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds" json:"min_safe_time_seconds,omitempty"`
+ SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name" json:"safe_replica_name,omitempty"`
+ PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,def=0" json:"persist_offset,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Query) Reset() { *m = Query{} }
+func (m *Query) String() string { return proto.CompactTextString(m) }
+func (*Query) ProtoMessage() {}
+
+const Default_Query_Offset int32 = 0
+const Default_Query_RequirePerfectPlan bool = false
+const Default_Query_KeysOnly bool = false
+const Default_Query_Compile bool = false
+const Default_Query_PersistOffset bool = false
+
+func (m *Query) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *Query) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Query) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *Query) GetKind() string {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return ""
+}
+
+func (m *Query) GetAncestor() *Reference {
+ if m != nil {
+ return m.Ancestor
+ }
+ return nil
+}
+
+func (m *Query) GetFilter() []*Query_Filter {
+ if m != nil {
+ return m.Filter
+ }
+ return nil
+}
+
+func (m *Query) GetSearchQuery() string {
+ if m != nil && m.SearchQuery != nil {
+ return *m.SearchQuery
+ }
+ return ""
+}
+
+func (m *Query) GetOrder() []*Query_Order {
+ if m != nil {
+ return m.Order
+ }
+ return nil
+}
+
+func (m *Query) GetHint() Query_Hint {
+ if m != nil && m.Hint != nil {
+ return *m.Hint
+ }
+ return Query_ORDER_FIRST
+}
+
+func (m *Query) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *Query) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_Query_Offset
+}
+
+func (m *Query) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+func (m *Query) GetCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.CompiledCursor
+ }
+ return nil
+}
+
+func (m *Query) GetEndCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.EndCompiledCursor
+ }
+ return nil
+}
+
+func (m *Query) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *Query) GetRequirePerfectPlan() bool {
+ if m != nil && m.RequirePerfectPlan != nil {
+ return *m.RequirePerfectPlan
+ }
+ return Default_Query_RequirePerfectPlan
+}
+
+func (m *Query) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return Default_Query_KeysOnly
+}
+
+func (m *Query) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *Query) GetCompile() bool {
+ if m != nil && m.Compile != nil {
+ return *m.Compile
+ }
+ return Default_Query_Compile
+}
+
+func (m *Query) GetFailoverMs() int64 {
+ if m != nil && m.FailoverMs != nil {
+ return *m.FailoverMs
+ }
+ return 0
+}
+
+func (m *Query) GetStrong() bool {
+ if m != nil && m.Strong != nil {
+ return *m.Strong
+ }
+ return false
+}
+
+func (m *Query) GetPropertyName() []string {
+ if m != nil {
+ return m.PropertyName
+ }
+ return nil
+}
+
+func (m *Query) GetGroupByPropertyName() []string {
+ if m != nil {
+ return m.GroupByPropertyName
+ }
+ return nil
+}
+
+func (m *Query) GetDistinct() bool {
+ if m != nil && m.Distinct != nil {
+ return *m.Distinct
+ }
+ return false
+}
+
+func (m *Query) GetMinSafeTimeSeconds() int64 {
+ if m != nil && m.MinSafeTimeSeconds != nil {
+ return *m.MinSafeTimeSeconds
+ }
+ return 0
+}
+
+func (m *Query) GetSafeReplicaName() []string {
+ if m != nil {
+ return m.SafeReplicaName
+ }
+ return nil
+}
+
+func (m *Query) GetPersistOffset() bool {
+ if m != nil && m.PersistOffset != nil {
+ return *m.PersistOffset
+ }
+ return Default_Query_PersistOffset
+}
+
+type Query_Filter struct {
+ Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"`
+ Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Query_Filter) Reset() { *m = Query_Filter{} }
+func (m *Query_Filter) String() string { return proto.CompactTextString(m) }
+func (*Query_Filter) ProtoMessage() {}
+
+func (m *Query_Filter) GetOp() Query_Filter_Operator {
+ if m != nil && m.Op != nil {
+ return *m.Op
+ }
+ return Query_Filter_LESS_THAN
+}
+
+func (m *Query_Filter) GetProperty() []*Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+type Query_Order struct {
+ Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"`
+ Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Query_Order) Reset() { *m = Query_Order{} }
+func (m *Query_Order) String() string { return proto.CompactTextString(m) }
+func (*Query_Order) ProtoMessage() {}
+
+const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING
+
+func (m *Query_Order) GetProperty() string {
+ if m != nil && m.Property != nil {
+ return *m.Property
+ }
+ return ""
+}
+
+func (m *Query_Order) GetDirection() Query_Order_Direction {
+ if m != nil && m.Direction != nil {
+ return *m.Direction
+ }
+ return Default_Query_Order_Direction
+}
+
+type CompiledQuery struct {
+ Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan" json:"primaryscan,omitempty"`
+ Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan" json:"mergejoinscan,omitempty"`
+ IndexDef *Index `protobuf:"bytes,21,opt,name=index_def" json:"index_def,omitempty"`
+ Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"`
+ Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"`
+ KeysOnly *bool `protobuf:"varint,12,req,name=keys_only" json:"keys_only,omitempty"`
+ PropertyName []string `protobuf:"bytes,24,rep,name=property_name" json:"property_name,omitempty"`
+ DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size" json:"distinct_infix_size,omitempty"`
+ Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter" json:"entityfilter,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery) Reset() { *m = CompiledQuery{} }
+func (m *CompiledQuery) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery) ProtoMessage() {}
+
+const Default_CompiledQuery_Offset int32 = 0
+
+func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan {
+ if m != nil {
+ return m.Primaryscan
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan {
+ if m != nil {
+ return m.Mergejoinscan
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetIndexDef() *Index {
+ if m != nil {
+ return m.IndexDef
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_CompiledQuery_Offset
+}
+
+func (m *CompiledQuery) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+func (m *CompiledQuery) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+func (m *CompiledQuery) GetPropertyName() []string {
+ if m != nil {
+ return m.PropertyName
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetDistinctInfixSize() int32 {
+ if m != nil && m.DistinctInfixSize != nil {
+ return *m.DistinctInfixSize
+ }
+ return 0
+}
+
+func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter {
+ if m != nil {
+ return m.Entityfilter
+ }
+ return nil
+}
+
+type CompiledQuery_PrimaryScan struct {
+ IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"`
+ StartKey *string `protobuf:"bytes,3,opt,name=start_key" json:"start_key,omitempty"`
+ StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive" json:"start_inclusive,omitempty"`
+ EndKey *string `protobuf:"bytes,5,opt,name=end_key" json:"end_key,omitempty"`
+ EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive" json:"end_inclusive,omitempty"`
+ StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value" json:"start_postfix_value,omitempty"`
+ EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value" json:"end_postfix_value,omitempty"`
+ EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us" json:"end_unapplied_log_timestamp_us,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} }
+func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_PrimaryScan) ProtoMessage() {}
+
+func (m *CompiledQuery_PrimaryScan) GetIndexName() string {
+ if m != nil && m.IndexName != nil {
+ return *m.IndexName
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartKey() string {
+ if m != nil && m.StartKey != nil {
+ return *m.StartKey
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool {
+ if m != nil && m.StartInclusive != nil {
+ return *m.StartInclusive
+ }
+ return false
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndKey() string {
+ if m != nil && m.EndKey != nil {
+ return *m.EndKey
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool {
+ if m != nil && m.EndInclusive != nil {
+ return *m.EndInclusive
+ }
+ return false
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string {
+ if m != nil {
+ return m.StartPostfixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string {
+ if m != nil {
+ return m.EndPostfixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 {
+ if m != nil && m.EndUnappliedLogTimestampUs != nil {
+ return *m.EndUnappliedLogTimestampUs
+ }
+ return 0
+}
+
+type CompiledQuery_MergeJoinScan struct {
+ IndexName *string `protobuf:"bytes,8,req,name=index_name" json:"index_name,omitempty"`
+ PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value" json:"prefix_value,omitempty"`
+ ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,def=0" json:"value_prefix,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} }
+func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_MergeJoinScan) ProtoMessage() {}
+
+const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false
+
+func (m *CompiledQuery_MergeJoinScan) GetIndexName() string {
+ if m != nil && m.IndexName != nil {
+ return *m.IndexName
+ }
+ return ""
+}
+
+func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string {
+ if m != nil {
+ return m.PrefixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool {
+ if m != nil && m.ValuePrefix != nil {
+ return *m.ValuePrefix
+ }
+ return Default_CompiledQuery_MergeJoinScan_ValuePrefix
+}
+
+type CompiledQuery_EntityFilter struct {
+ Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"`
+ Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"`
+ Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} }
+func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_EntityFilter) ProtoMessage() {}
+
+const Default_CompiledQuery_EntityFilter_Distinct bool = false
+
+func (m *CompiledQuery_EntityFilter) GetDistinct() bool {
+ if m != nil && m.Distinct != nil {
+ return *m.Distinct
+ }
+ return Default_CompiledQuery_EntityFilter_Distinct
+}
+
+func (m *CompiledQuery_EntityFilter) GetKind() string {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return ""
+}
+
+func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference {
+ if m != nil {
+ return m.Ancestor
+ }
+ return nil
+}
+
+type CompiledCursor struct {
+ Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position" json:"position,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledCursor) Reset() { *m = CompiledCursor{} }
+func (m *CompiledCursor) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor) ProtoMessage() {}
+
+func (m *CompiledCursor) GetPosition() *CompiledCursor_Position {
+ if m != nil {
+ return m.Position
+ }
+ return nil
+}
+
+type CompiledCursor_Position struct {
+ StartKey *string `protobuf:"bytes,27,opt,name=start_key" json:"start_key,omitempty"`
+ Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue" json:"indexvalue,omitempty"`
+ Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"`
+ StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,def=1" json:"start_inclusive,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} }
+func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor_Position) ProtoMessage() {}
+
+const Default_CompiledCursor_Position_StartInclusive bool = true
+
+func (m *CompiledCursor_Position) GetStartKey() string {
+ if m != nil && m.StartKey != nil {
+ return *m.StartKey
+ }
+ return ""
+}
+
+func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue {
+ if m != nil {
+ return m.Indexvalue
+ }
+ return nil
+}
+
+func (m *CompiledCursor_Position) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *CompiledCursor_Position) GetStartInclusive() bool {
+ if m != nil && m.StartInclusive != nil {
+ return *m.StartInclusive
+ }
+ return Default_CompiledCursor_Position_StartInclusive
+}
+
+type CompiledCursor_Position_IndexValue struct {
+ Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} }
+func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor_Position_IndexValue) ProtoMessage() {}
+
+func (m *CompiledCursor_Position_IndexValue) GetProperty() string {
+ if m != nil && m.Property != nil {
+ return *m.Property
+ }
+ return ""
+}
+
+func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type Cursor struct {
+ Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"`
+ App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Cursor) Reset() { *m = Cursor{} }
+func (m *Cursor) String() string { return proto.CompactTextString(m) }
+func (*Cursor) ProtoMessage() {}
+
+func (m *Cursor) GetCursor() uint64 {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return 0
+}
+
+func (m *Cursor) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+type Error struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Error) Reset() { *m = Error{} }
+func (m *Error) String() string { return proto.CompactTextString(m) }
+func (*Error) ProtoMessage() {}
+
+type Cost struct {
+ IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes" json:"index_writes,omitempty"`
+ IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes" json:"index_write_bytes,omitempty"`
+ EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes" json:"entity_writes,omitempty"`
+ EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes" json:"entity_write_bytes,omitempty"`
+ Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost" json:"commitcost,omitempty"`
+ ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta" json:"approximate_storage_delta,omitempty"`
+ IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates" json:"id_sequence_updates,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Cost) Reset() { *m = Cost{} }
+func (m *Cost) String() string { return proto.CompactTextString(m) }
+func (*Cost) ProtoMessage() {}
+
+func (m *Cost) GetIndexWrites() int32 {
+ if m != nil && m.IndexWrites != nil {
+ return *m.IndexWrites
+ }
+ return 0
+}
+
+func (m *Cost) GetIndexWriteBytes() int32 {
+ if m != nil && m.IndexWriteBytes != nil {
+ return *m.IndexWriteBytes
+ }
+ return 0
+}
+
+func (m *Cost) GetEntityWrites() int32 {
+ if m != nil && m.EntityWrites != nil {
+ return *m.EntityWrites
+ }
+ return 0
+}
+
+func (m *Cost) GetEntityWriteBytes() int32 {
+ if m != nil && m.EntityWriteBytes != nil {
+ return *m.EntityWriteBytes
+ }
+ return 0
+}
+
+func (m *Cost) GetCommitcost() *Cost_CommitCost {
+ if m != nil {
+ return m.Commitcost
+ }
+ return nil
+}
+
+func (m *Cost) GetApproximateStorageDelta() int32 {
+ if m != nil && m.ApproximateStorageDelta != nil {
+ return *m.ApproximateStorageDelta
+ }
+ return 0
+}
+
+func (m *Cost) GetIdSequenceUpdates() int32 {
+ if m != nil && m.IdSequenceUpdates != nil {
+ return *m.IdSequenceUpdates
+ }
+ return 0
+}
+
+type Cost_CommitCost struct {
+ RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts" json:"requested_entity_puts,omitempty"`
+ RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes" json:"requested_entity_deletes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} }
+func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) }
+func (*Cost_CommitCost) ProtoMessage() {}
+
+func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 {
+ if m != nil && m.RequestedEntityPuts != nil {
+ return *m.RequestedEntityPuts
+ }
+ return 0
+}
+
+func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 {
+ if m != nil && m.RequestedEntityDeletes != nil {
+ return *m.RequestedEntityDeletes
+ }
+ return 0
+}
+
+type GetRequest struct {
+ Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+ FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms" json:"failover_ms,omitempty"`
+ Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"`
+ AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,def=0" json:"allow_deferred,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetRequest) Reset() { *m = GetRequest{} }
+func (m *GetRequest) String() string { return proto.CompactTextString(m) }
+func (*GetRequest) ProtoMessage() {}
+
+const Default_GetRequest_AllowDeferred bool = false
+
+func (m *GetRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *GetRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *GetRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *GetRequest) GetFailoverMs() int64 {
+ if m != nil && m.FailoverMs != nil {
+ return *m.FailoverMs
+ }
+ return 0
+}
+
+func (m *GetRequest) GetStrong() bool {
+ if m != nil && m.Strong != nil {
+ return *m.Strong
+ }
+ return false
+}
+
+func (m *GetRequest) GetAllowDeferred() bool {
+ if m != nil && m.AllowDeferred != nil {
+ return *m.AllowDeferred
+ }
+ return Default_GetRequest_AllowDeferred
+}
+
+type GetResponse struct {
+ Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity" json:"entity,omitempty"`
+ Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"`
+ InOrder *bool `protobuf:"varint,6,opt,name=in_order,def=1" json:"in_order,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetResponse) Reset() { *m = GetResponse{} }
+func (m *GetResponse) String() string { return proto.CompactTextString(m) }
+func (*GetResponse) ProtoMessage() {}
+
+const Default_GetResponse_InOrder bool = true
+
+func (m *GetResponse) GetEntity() []*GetResponse_Entity {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *GetResponse) GetDeferred() []*Reference {
+ if m != nil {
+ return m.Deferred
+ }
+ return nil
+}
+
+func (m *GetResponse) GetInOrder() bool {
+ if m != nil && m.InOrder != nil {
+ return *m.InOrder
+ }
+ return Default_GetResponse_InOrder
+}
+
+type GetResponse_Entity struct {
+ Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"`
+ Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"`
+ Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} }
+func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) }
+func (*GetResponse_Entity) ProtoMessage() {}
+
+func (m *GetResponse_Entity) GetEntity() *EntityProto {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *GetResponse_Entity) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *GetResponse_Entity) GetVersion() int64 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+type PutRequest struct {
+ Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"`
+ Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index" json:"composite_index,omitempty"`
+ Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
+ Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
+ MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PutRequest) Reset() { *m = PutRequest{} }
+func (m *PutRequest) String() string { return proto.CompactTextString(m) }
+func (*PutRequest) ProtoMessage() {}
+
+const Default_PutRequest_Trusted bool = false
+const Default_PutRequest_Force bool = false
+const Default_PutRequest_MarkChanges bool = false
+const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT
+
+func (m *PutRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *PutRequest) GetEntity() []*EntityProto {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *PutRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *PutRequest) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *PutRequest) GetTrusted() bool {
+ if m != nil && m.Trusted != nil {
+ return *m.Trusted
+ }
+ return Default_PutRequest_Trusted
+}
+
+func (m *PutRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_PutRequest_Force
+}
+
+func (m *PutRequest) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_PutRequest_MarkChanges
+}
+
+func (m *PutRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy {
+ if m != nil && m.AutoIdPolicy != nil {
+ return *m.AutoIdPolicy
+ }
+ return Default_PutRequest_AutoIdPolicy
+}
+
+type PutResponse struct {
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"`
+ Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PutResponse) Reset() { *m = PutResponse{} }
+func (m *PutResponse) String() string { return proto.CompactTextString(m) }
+func (*PutResponse) ProtoMessage() {}
+
+func (m *PutResponse) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *PutResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *PutResponse) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type TouchRequest struct {
+ Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index" json:"composite_index,omitempty"`
+ Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TouchRequest) Reset() { *m = TouchRequest{} }
+func (m *TouchRequest) String() string { return proto.CompactTextString(m) }
+func (*TouchRequest) ProtoMessage() {}
+
+const Default_TouchRequest_Force bool = false
+
+func (m *TouchRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_TouchRequest_Force
+}
+
+func (m *TouchRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+type TouchResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TouchResponse) Reset() { *m = TouchResponse{} }
+func (m *TouchResponse) String() string { return proto.CompactTextString(m) }
+func (*TouchResponse) ProtoMessage() {}
+
+func (m *TouchResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+type DeleteRequest struct {
+ Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"`
+ Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
+ Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
+ MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteRequest) Reset() { *m = DeleteRequest{} }
+func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteRequest) ProtoMessage() {}
+
+const Default_DeleteRequest_Trusted bool = false
+const Default_DeleteRequest_Force bool = false
+const Default_DeleteRequest_MarkChanges bool = false
+
+func (m *DeleteRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetTrusted() bool {
+ if m != nil && m.Trusted != nil {
+ return *m.Trusted
+ }
+ return Default_DeleteRequest_Trusted
+}
+
+func (m *DeleteRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_DeleteRequest_Force
+}
+
+func (m *DeleteRequest) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_DeleteRequest_MarkChanges
+}
+
+func (m *DeleteRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+type DeleteResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteResponse) Reset() { *m = DeleteResponse{} }
+func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteResponse) ProtoMessage() {}
+
+func (m *DeleteResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *DeleteResponse) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type NextRequest struct {
+ Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"`
+ Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"`
+ Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
+ Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"`
+ Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *NextRequest) Reset() { *m = NextRequest{} }
+func (m *NextRequest) String() string { return proto.CompactTextString(m) }
+func (*NextRequest) ProtoMessage() {}
+
+const Default_NextRequest_Offset int32 = 0
+const Default_NextRequest_Compile bool = false
+
+func (m *NextRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *NextRequest) GetCursor() *Cursor {
+ if m != nil {
+ return m.Cursor
+ }
+ return nil
+}
+
+func (m *NextRequest) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *NextRequest) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_NextRequest_Offset
+}
+
+func (m *NextRequest) GetCompile() bool {
+ if m != nil && m.Compile != nil {
+ return *m.Compile
+ }
+ return Default_NextRequest_Compile
+}
+
+type QueryResult struct {
+ Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"`
+ Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"`
+ SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results" json:"skipped_results,omitempty"`
+ MoreResults *bool `protobuf:"varint,3,req,name=more_results" json:"more_results,omitempty"`
+ KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only" json:"keys_only,omitempty"`
+ IndexOnly *bool `protobuf:"varint,9,opt,name=index_only" json:"index_only,omitempty"`
+ SmallOps *bool `protobuf:"varint,10,opt,name=small_ops" json:"small_ops,omitempty"`
+ CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query" json:"compiled_query,omitempty"`
+ CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"`
+ Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"`
+ Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *QueryResult) Reset() { *m = QueryResult{} }
+func (m *QueryResult) String() string { return proto.CompactTextString(m) }
+func (*QueryResult) ProtoMessage() {}
+
+func (m *QueryResult) GetCursor() *Cursor {
+ if m != nil {
+ return m.Cursor
+ }
+ return nil
+}
+
+func (m *QueryResult) GetResult() []*EntityProto {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+func (m *QueryResult) GetSkippedResults() int32 {
+ if m != nil && m.SkippedResults != nil {
+ return *m.SkippedResults
+ }
+ return 0
+}
+
+func (m *QueryResult) GetMoreResults() bool {
+ if m != nil && m.MoreResults != nil {
+ return *m.MoreResults
+ }
+ return false
+}
+
+func (m *QueryResult) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+func (m *QueryResult) GetIndexOnly() bool {
+ if m != nil && m.IndexOnly != nil {
+ return *m.IndexOnly
+ }
+ return false
+}
+
+func (m *QueryResult) GetSmallOps() bool {
+ if m != nil && m.SmallOps != nil {
+ return *m.SmallOps
+ }
+ return false
+}
+
+func (m *QueryResult) GetCompiledQuery() *CompiledQuery {
+ if m != nil {
+ return m.CompiledQuery
+ }
+ return nil
+}
+
+func (m *QueryResult) GetCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.CompiledCursor
+ }
+ return nil
+}
+
+func (m *QueryResult) GetIndex() []*CompositeIndex {
+ if m != nil {
+ return m.Index
+ }
+ return nil
+}
+
+func (m *QueryResult) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type AllocateIdsRequest struct {
+ Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
+ ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key" json:"model_key,omitempty"`
+ Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
+ Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"`
+ Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} }
+func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsRequest) ProtoMessage() {}
+
+func (m *AllocateIdsRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AllocateIdsRequest) GetModelKey() *Reference {
+ if m != nil {
+ return m.ModelKey
+ }
+ return nil
+}
+
+func (m *AllocateIdsRequest) GetSize() int64 {
+ if m != nil && m.Size != nil {
+ return *m.Size
+ }
+ return 0
+}
+
+func (m *AllocateIdsRequest) GetMax() int64 {
+ if m != nil && m.Max != nil {
+ return *m.Max
+ }
+ return 0
+}
+
+func (m *AllocateIdsRequest) GetReserve() []*Reference {
+ if m != nil {
+ return m.Reserve
+ }
+ return nil
+}
+
+type AllocateIdsResponse struct {
+ Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"`
+ End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"`
+ Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} }
+func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsResponse) ProtoMessage() {}
+
+func (m *AllocateIdsResponse) GetStart() int64 {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return 0
+}
+
+func (m *AllocateIdsResponse) GetEnd() int64 {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return 0
+}
+
+func (m *AllocateIdsResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+type CompositeIndices struct {
+ Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeIndices) Reset() { *m = CompositeIndices{} }
+func (m *CompositeIndices) String() string { return proto.CompactTextString(m) }
+func (*CompositeIndices) ProtoMessage() {}
+
+func (m *CompositeIndices) GetIndex() []*CompositeIndex {
+ if m != nil {
+ return m.Index
+ }
+ return nil
+}
+
+type AddActionsRequest struct {
+ Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"`
+ Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} }
+func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) }
+func (*AddActionsRequest) ProtoMessage() {}
+
+func (m *AddActionsRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AddActionsRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *AddActionsRequest) GetAction() []*Action {
+ if m != nil {
+ return m.Action
+ }
+ return nil
+}
+
+type AddActionsResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} }
+func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) }
+func (*AddActionsResponse) ProtoMessage() {}
+
+type BeginTransactionRequest struct {
+ Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
+ App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
+ AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,def=0" json:"allow_multiple_eg,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} }
+func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }
+func (*BeginTransactionRequest) ProtoMessage() {}
+
+const Default_BeginTransactionRequest_AllowMultipleEg bool = false
+
+func (m *BeginTransactionRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *BeginTransactionRequest) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *BeginTransactionRequest) GetAllowMultipleEg() bool {
+ if m != nil && m.AllowMultipleEg != nil {
+ return *m.AllowMultipleEg
+ }
+ return Default_BeginTransactionRequest_AllowMultipleEg
+}
+
+type CommitResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CommitResponse) Reset() { *m = CommitResponse{} }
+func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse) ProtoMessage() {}
+
+func (m *CommitResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *CommitResponse) GetVersion() []*CommitResponse_Version {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type CommitResponse_Version struct {
+ RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key" json:"root_entity_key,omitempty"`
+ Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} }
+func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse_Version) ProtoMessage() {}
+
+func (m *CommitResponse_Version) GetRootEntityKey() *Reference {
+ if m != nil {
+ return m.RootEntityKey
+ }
+ return nil
+}
+
+func (m *CommitResponse_Version) GetVersion() int64 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
new file mode 100755
index 000000000..e76f126ff
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
@@ -0,0 +1,541 @@
+syntax = "proto2";
+option go_package = "datastore";
+
+package appengine;
+
+message Action{}
+
+message PropertyValue {
+ optional int64 int64Value = 1;
+ optional bool booleanValue = 2;
+ optional string stringValue = 3;
+ optional double doubleValue = 4;
+
+ optional group PointValue = 5 {
+ required double x = 6;
+ required double y = 7;
+ }
+
+ optional group UserValue = 8 {
+ required string email = 9;
+ required string auth_domain = 10;
+ optional string nickname = 11;
+ optional string federated_identity = 21;
+ optional string federated_provider = 22;
+ }
+
+ optional group ReferenceValue = 12 {
+ required string app = 13;
+ optional string name_space = 20;
+ repeated group PathElement = 14 {
+ required string type = 15;
+ optional int64 id = 16;
+ optional string name = 17;
+ }
+ }
+}
+
+message Property {
+ enum Meaning {
+ NO_MEANING = 0;
+ BLOB = 14;
+ TEXT = 15;
+ BYTESTRING = 16;
+
+ ATOM_CATEGORY = 1;
+ ATOM_LINK = 2;
+ ATOM_TITLE = 3;
+ ATOM_CONTENT = 4;
+ ATOM_SUMMARY = 5;
+ ATOM_AUTHOR = 6;
+
+ GD_WHEN = 7;
+ GD_EMAIL = 8;
+ GEORSS_POINT = 9;
+ GD_IM = 10;
+
+ GD_PHONENUMBER = 11;
+ GD_POSTALADDRESS = 12;
+
+ GD_RATING = 13;
+
+ BLOBKEY = 17;
+ ENTITY_PROTO = 19;
+
+ INDEX_VALUE = 18;
+ };
+
+ optional Meaning meaning = 1 [default = NO_MEANING];
+ optional string meaning_uri = 2;
+
+ required string name = 3;
+
+ required PropertyValue value = 5;
+
+ required bool multiple = 4;
+
+ optional bool searchable = 6 [default=false];
+
+ enum FtsTokenizationOption {
+ HTML = 1;
+ ATOM = 2;
+ }
+
+ optional FtsTokenizationOption fts_tokenization_option = 8;
+
+ optional string locale = 9 [default = "en"];
+}
+
+message Path {
+ repeated group Element = 1 {
+ required string type = 2;
+ optional int64 id = 3;
+ optional string name = 4;
+ }
+}
+
+message Reference {
+ required string app = 13;
+ optional string name_space = 20;
+ required Path path = 14;
+}
+
+message User {
+ required string email = 1;
+ required string auth_domain = 2;
+ optional string nickname = 3;
+ optional string federated_identity = 6;
+ optional string federated_provider = 7;
+}
+
+message EntityProto {
+ required Reference key = 13;
+ required Path entity_group = 16;
+ optional User owner = 17;
+
+ enum Kind {
+ GD_CONTACT = 1;
+ GD_EVENT = 2;
+ GD_MESSAGE = 3;
+ }
+ optional Kind kind = 4;
+ optional string kind_uri = 5;
+
+ repeated Property property = 14;
+ repeated Property raw_property = 15;
+
+ optional int32 rank = 18;
+}
+
+message CompositeProperty {
+ required int64 index_id = 1;
+ repeated string value = 2;
+}
+
+message Index {
+ required string entity_type = 1;
+ required bool ancestor = 5;
+ repeated group Property = 2 {
+ required string name = 3;
+ enum Direction {
+ ASCENDING = 1;
+ DESCENDING = 2;
+ }
+ optional Direction direction = 4 [default = ASCENDING];
+ }
+}
+
+message CompositeIndex {
+ required string app_id = 1;
+ required int64 id = 2;
+ required Index definition = 3;
+
+ enum State {
+ WRITE_ONLY = 1;
+ READ_WRITE = 2;
+ DELETED = 3;
+ ERROR = 4;
+ }
+ required State state = 4;
+
+ optional bool only_use_if_required = 6 [default = false];
+}
+
+message IndexPostfix {
+ message IndexValue {
+ required string property_name = 1;
+ required PropertyValue value = 2;
+ }
+
+ repeated IndexValue index_value = 1;
+
+ optional Reference key = 2;
+
+ optional bool before = 3 [default=true];
+}
+
+message IndexPosition {
+ optional string key = 1;
+
+ optional bool before = 2 [default=true];
+}
+
+message Snapshot {
+ enum Status {
+ INACTIVE = 0;
+ ACTIVE = 1;
+ }
+
+ required int64 ts = 1;
+}
+
+message InternalHeader {
+ optional string qos = 1;
+}
+
+message Transaction {
+ optional InternalHeader header = 4;
+ required fixed64 handle = 1;
+ required string app = 2;
+ optional bool mark_changes = 3 [default = false];
+}
+
+message Query {
+ optional InternalHeader header = 39;
+
+ required string app = 1;
+ optional string name_space = 29;
+
+ optional string kind = 3;
+ optional Reference ancestor = 17;
+
+ repeated group Filter = 4 {
+ enum Operator {
+ LESS_THAN = 1;
+ LESS_THAN_OR_EQUAL = 2;
+ GREATER_THAN = 3;
+ GREATER_THAN_OR_EQUAL = 4;
+ EQUAL = 5;
+ IN = 6;
+ EXISTS = 7;
+ }
+
+ required Operator op = 6;
+ repeated Property property = 14;
+ }
+
+ optional string search_query = 8;
+
+ repeated group Order = 9 {
+ enum Direction {
+ ASCENDING = 1;
+ DESCENDING = 2;
+ }
+
+ required string property = 10;
+ optional Direction direction = 11 [default = ASCENDING];
+ }
+
+ enum Hint {
+ ORDER_FIRST = 1;
+ ANCESTOR_FIRST = 2;
+ FILTER_FIRST = 3;
+ }
+ optional Hint hint = 18;
+
+ optional int32 count = 23;
+
+ optional int32 offset = 12 [default = 0];
+
+ optional int32 limit = 16;
+
+ optional CompiledCursor compiled_cursor = 30;
+ optional CompiledCursor end_compiled_cursor = 31;
+
+ repeated CompositeIndex composite_index = 19;
+
+ optional bool require_perfect_plan = 20 [default = false];
+
+ optional bool keys_only = 21 [default = false];
+
+ optional Transaction transaction = 22;
+
+ optional bool compile = 25 [default = false];
+
+ optional int64 failover_ms = 26;
+
+ optional bool strong = 32;
+
+ repeated string property_name = 33;
+
+ repeated string group_by_property_name = 34;
+
+ optional bool distinct = 24;
+
+ optional int64 min_safe_time_seconds = 35;
+
+ repeated string safe_replica_name = 36;
+
+ optional bool persist_offset = 37 [default=false];
+}
+
+message CompiledQuery {
+ required group PrimaryScan = 1 {
+ optional string index_name = 2;
+
+ optional string start_key = 3;
+ optional bool start_inclusive = 4;
+ optional string end_key = 5;
+ optional bool end_inclusive = 6;
+
+ repeated string start_postfix_value = 22;
+ repeated string end_postfix_value = 23;
+
+ optional int64 end_unapplied_log_timestamp_us = 19;
+ }
+
+ repeated group MergeJoinScan = 7 {
+ required string index_name = 8;
+
+ repeated string prefix_value = 9;
+
+ optional bool value_prefix = 20 [default=false];
+ }
+
+ optional Index index_def = 21;
+
+ optional int32 offset = 10 [default = 0];
+
+ optional int32 limit = 11;
+
+ required bool keys_only = 12;
+
+ repeated string property_name = 24;
+
+ optional int32 distinct_infix_size = 25;
+
+ optional group EntityFilter = 13 {
+ optional bool distinct = 14 [default=false];
+
+ optional string kind = 17;
+ optional Reference ancestor = 18;
+ }
+}
+
+message CompiledCursor {
+ optional group Position = 2 {
+ optional string start_key = 27;
+
+ repeated group IndexValue = 29 {
+ optional string property = 30;
+ required PropertyValue value = 31;
+ }
+
+ optional Reference key = 32;
+
+ optional bool start_inclusive = 28 [default=true];
+ }
+}
+
+message Cursor {
+ required fixed64 cursor = 1;
+
+ optional string app = 2;
+}
+
+message Error {
+ enum ErrorCode {
+ BAD_REQUEST = 1;
+ CONCURRENT_TRANSACTION = 2;
+ INTERNAL_ERROR = 3;
+ NEED_INDEX = 4;
+ TIMEOUT = 5;
+ PERMISSION_DENIED = 6;
+ BIGTABLE_ERROR = 7;
+ COMMITTED_BUT_STILL_APPLYING = 8;
+ CAPABILITY_DISABLED = 9;
+ TRY_ALTERNATE_BACKEND = 10;
+ SAFE_TIME_TOO_OLD = 11;
+ }
+}
+
+message Cost {
+ optional int32 index_writes = 1;
+ optional int32 index_write_bytes = 2;
+ optional int32 entity_writes = 3;
+ optional int32 entity_write_bytes = 4;
+ optional group CommitCost = 5 {
+ optional int32 requested_entity_puts = 6;
+ optional int32 requested_entity_deletes = 7;
+ };
+ optional int32 approximate_storage_delta = 8;
+ optional int32 id_sequence_updates = 9;
+}
+
+message GetRequest {
+ optional InternalHeader header = 6;
+
+ repeated Reference key = 1;
+ optional Transaction transaction = 2;
+
+ optional int64 failover_ms = 3;
+
+ optional bool strong = 4;
+
+ optional bool allow_deferred = 5 [default=false];
+}
+
+message GetResponse {
+ repeated group Entity = 1 {
+ optional EntityProto entity = 2;
+ optional Reference key = 4;
+
+ optional int64 version = 3;
+ }
+
+ repeated Reference deferred = 5;
+
+ optional bool in_order = 6 [default=true];
+}
+
+message PutRequest {
+ optional InternalHeader header = 11;
+
+ repeated EntityProto entity = 1;
+ optional Transaction transaction = 2;
+ repeated CompositeIndex composite_index = 3;
+
+ optional bool trusted = 4 [default = false];
+
+ optional bool force = 7 [default = false];
+
+ optional bool mark_changes = 8 [default = false];
+ repeated Snapshot snapshot = 9;
+
+ enum AutoIdPolicy {
+ CURRENT = 0;
+ SEQUENTIAL = 1;
+ }
+ optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];
+}
+
+message PutResponse {
+ repeated Reference key = 1;
+ optional Cost cost = 2;
+ repeated int64 version = 3;
+}
+
+message TouchRequest {
+ optional InternalHeader header = 10;
+
+ repeated Reference key = 1;
+ repeated CompositeIndex composite_index = 2;
+ optional bool force = 3 [default = false];
+ repeated Snapshot snapshot = 9;
+}
+
+message TouchResponse {
+ optional Cost cost = 1;
+}
+
+message DeleteRequest {
+ optional InternalHeader header = 10;
+
+ repeated Reference key = 6;
+ optional Transaction transaction = 5;
+
+ optional bool trusted = 4 [default = false];
+
+ optional bool force = 7 [default = false];
+
+ optional bool mark_changes = 8 [default = false];
+ repeated Snapshot snapshot = 9;
+}
+
+message DeleteResponse {
+ optional Cost cost = 1;
+ repeated int64 version = 3;
+}
+
+message NextRequest {
+ optional InternalHeader header = 5;
+
+ required Cursor cursor = 1;
+ optional int32 count = 2;
+
+ optional int32 offset = 4 [default = 0];
+
+ optional bool compile = 3 [default = false];
+}
+
+message QueryResult {
+ optional Cursor cursor = 1;
+
+ repeated EntityProto result = 2;
+
+ optional int32 skipped_results = 7;
+
+ required bool more_results = 3;
+
+ optional bool keys_only = 4;
+
+ optional bool index_only = 9;
+
+ optional bool small_ops = 10;
+
+ optional CompiledQuery compiled_query = 5;
+
+ optional CompiledCursor compiled_cursor = 6;
+
+ repeated CompositeIndex index = 8;
+
+ repeated int64 version = 11;
+}
+
+message AllocateIdsRequest {
+ optional InternalHeader header = 4;
+
+ optional Reference model_key = 1;
+
+ optional int64 size = 2;
+
+ optional int64 max = 3;
+
+ repeated Reference reserve = 5;
+}
+
+message AllocateIdsResponse {
+ required int64 start = 1;
+ required int64 end = 2;
+ optional Cost cost = 3;
+}
+
+message CompositeIndices {
+ repeated CompositeIndex index = 1;
+}
+
+message AddActionsRequest {
+ optional InternalHeader header = 3;
+
+ required Transaction transaction = 1;
+ repeated Action action = 2;
+}
+
+message AddActionsResponse {
+}
+
+message BeginTransactionRequest {
+ optional InternalHeader header = 3;
+
+ required string app = 1;
+ optional bool allow_multiple_eg = 2 [default = false];
+}
+
+message CommitResponse {
+ optional Cost cost = 1;
+
+ repeated group Version = 3 {
+ required Reference root_entity_key = 4;
+ required int64 version = 5;
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go
new file mode 100644
index 000000000..d538701ab
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity.go
@@ -0,0 +1,14 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import netcontext "golang.org/x/net/context"
+
+// These functions are implementations of the wrapper functions
+// in ../appengine/identity.go. See that file for commentary.
+
+func AppID(c netcontext.Context) string {
+ return appID(FullyQualifiedAppID(c))
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go
new file mode 100644
index 000000000..b59603f13
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_classic.go
@@ -0,0 +1,57 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+ "appengine"
+
+ netcontext "golang.org/x/net/context"
+)
+
+func DefaultVersionHostname(ctx netcontext.Context) string {
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ return appengine.DefaultVersionHostname(c)
+}
+
+func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() }
+func ServerSoftware() string { return appengine.ServerSoftware() }
+func InstanceID() string { return appengine.InstanceID() }
+func IsDevAppServer() bool { return appengine.IsDevAppServer() }
+
+func RequestID(ctx netcontext.Context) string {
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ return appengine.RequestID(c)
+}
+
+func ModuleName(ctx netcontext.Context) string {
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ return appengine.ModuleName(c)
+}
+func VersionID(ctx netcontext.Context) string {
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ return appengine.VersionID(c)
+}
+
+func fullyQualifiedAppID(ctx netcontext.Context) string {
+ c := fromContext(ctx)
+ if c == nil {
+ panic(errNotAppEngineContext)
+ }
+ return c.FullyQualifiedAppID()
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go
new file mode 100644
index 000000000..d5fa75be7
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_vm.go
@@ -0,0 +1,101 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "net/http"
+ "os"
+
+ netcontext "golang.org/x/net/context"
+)
+
+// These functions are implementations of the wrapper functions
+// in ../appengine/identity.go. See that file for commentary.
+
+const (
+ hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname"
+ hRequestLogId = "X-AppEngine-Request-Log-Id"
+ hDatacenter = "X-AppEngine-Datacenter"
+)
+
+func ctxHeaders(ctx netcontext.Context) http.Header {
+ c := fromContext(ctx)
+ if c == nil {
+ return nil
+ }
+ return c.Request().Header
+}
+
+func DefaultVersionHostname(ctx netcontext.Context) string {
+ return ctxHeaders(ctx).Get(hDefaultVersionHostname)
+}
+
+func RequestID(ctx netcontext.Context) string {
+ return ctxHeaders(ctx).Get(hRequestLogId)
+}
+
+func Datacenter(ctx netcontext.Context) string {
+ return ctxHeaders(ctx).Get(hDatacenter)
+}
+
+func ServerSoftware() string {
+ // TODO(dsymonds): Remove fallback when we've verified this.
+ if s := os.Getenv("SERVER_SOFTWARE"); s != "" {
+ return s
+ }
+ return "Google App Engine/1.x.x"
+}
+
+// TODO(dsymonds): Remove the metadata fetches.
+
+func ModuleName(_ netcontext.Context) string {
+ if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
+ return s
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_name"))
+}
+
+func VersionID(_ netcontext.Context) string {
+ if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
+ return s1 + "." + s2
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version"))
+}
+
+func InstanceID() string {
+ if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" {
+ return s
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_instance"))
+}
+
+func partitionlessAppID() string {
+ // gae_project has everything except the partition prefix.
+ appID := os.Getenv("GAE_LONG_APP_ID")
+ if appID == "" {
+ appID = string(mustGetMetadata("instance/attributes/gae_project"))
+ }
+ return appID
+}
+
+func fullyQualifiedAppID(_ netcontext.Context) string {
+ appID := partitionlessAppID()
+
+ part := os.Getenv("GAE_PARTITION")
+ if part == "" {
+ part = string(mustGetMetadata("instance/attributes/gae_partition"))
+ }
+
+ if part != "" {
+ appID = part + "~" + appID
+ }
+ return appID
+}
+
+func IsDevAppServer() bool {
+ return os.Getenv("RUN_WITH_DEVAPPSERVER") != ""
+}
diff --git a/vendor/google.golang.org/appengine/internal/image/images_service.pb.go b/vendor/google.golang.org/appengine/internal/image/images_service.pb.go
new file mode 100644
index 000000000..ba7c72206
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/image/images_service.pb.go
@@ -0,0 +1,845 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/image/images_service.proto
+// DO NOT EDIT!
+
+/*
+Package image is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/image/images_service.proto
+
+It has these top-level messages:
+ ImagesServiceError
+ ImagesServiceTransform
+ Transform
+ ImageData
+ InputSettings
+ OutputSettings
+ ImagesTransformRequest
+ ImagesTransformResponse
+ CompositeImageOptions
+ ImagesCanvas
+ ImagesCompositeRequest
+ ImagesCompositeResponse
+ ImagesHistogramRequest
+ ImagesHistogram
+ ImagesHistogramResponse
+ ImagesGetUrlBaseRequest
+ ImagesGetUrlBaseResponse
+ ImagesDeleteUrlBaseRequest
+ ImagesDeleteUrlBaseResponse
+*/
+package image
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type ImagesServiceError_ErrorCode int32
+
+const (
+ ImagesServiceError_UNSPECIFIED_ERROR ImagesServiceError_ErrorCode = 1
+ ImagesServiceError_BAD_TRANSFORM_DATA ImagesServiceError_ErrorCode = 2
+ ImagesServiceError_NOT_IMAGE ImagesServiceError_ErrorCode = 3
+ ImagesServiceError_BAD_IMAGE_DATA ImagesServiceError_ErrorCode = 4
+ ImagesServiceError_IMAGE_TOO_LARGE ImagesServiceError_ErrorCode = 5
+ ImagesServiceError_INVALID_BLOB_KEY ImagesServiceError_ErrorCode = 6
+ ImagesServiceError_ACCESS_DENIED ImagesServiceError_ErrorCode = 7
+ ImagesServiceError_OBJECT_NOT_FOUND ImagesServiceError_ErrorCode = 8
+)
+
+var ImagesServiceError_ErrorCode_name = map[int32]string{
+ 1: "UNSPECIFIED_ERROR",
+ 2: "BAD_TRANSFORM_DATA",
+ 3: "NOT_IMAGE",
+ 4: "BAD_IMAGE_DATA",
+ 5: "IMAGE_TOO_LARGE",
+ 6: "INVALID_BLOB_KEY",
+ 7: "ACCESS_DENIED",
+ 8: "OBJECT_NOT_FOUND",
+}
+var ImagesServiceError_ErrorCode_value = map[string]int32{
+ "UNSPECIFIED_ERROR": 1,
+ "BAD_TRANSFORM_DATA": 2,
+ "NOT_IMAGE": 3,
+ "BAD_IMAGE_DATA": 4,
+ "IMAGE_TOO_LARGE": 5,
+ "INVALID_BLOB_KEY": 6,
+ "ACCESS_DENIED": 7,
+ "OBJECT_NOT_FOUND": 8,
+}
+
+func (x ImagesServiceError_ErrorCode) Enum() *ImagesServiceError_ErrorCode {
+ p := new(ImagesServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x ImagesServiceError_ErrorCode) String() string {
+ return proto.EnumName(ImagesServiceError_ErrorCode_name, int32(x))
+}
+func (x *ImagesServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ImagesServiceError_ErrorCode_value, data, "ImagesServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = ImagesServiceError_ErrorCode(value)
+ return nil
+}
+
+type ImagesServiceTransform_Type int32
+
+const (
+ ImagesServiceTransform_RESIZE ImagesServiceTransform_Type = 1
+ ImagesServiceTransform_ROTATE ImagesServiceTransform_Type = 2
+ ImagesServiceTransform_HORIZONTAL_FLIP ImagesServiceTransform_Type = 3
+ ImagesServiceTransform_VERTICAL_FLIP ImagesServiceTransform_Type = 4
+ ImagesServiceTransform_CROP ImagesServiceTransform_Type = 5
+ ImagesServiceTransform_IM_FEELING_LUCKY ImagesServiceTransform_Type = 6
+)
+
+var ImagesServiceTransform_Type_name = map[int32]string{
+ 1: "RESIZE",
+ 2: "ROTATE",
+ 3: "HORIZONTAL_FLIP",
+ 4: "VERTICAL_FLIP",
+ 5: "CROP",
+ 6: "IM_FEELING_LUCKY",
+}
+var ImagesServiceTransform_Type_value = map[string]int32{
+ "RESIZE": 1,
+ "ROTATE": 2,
+ "HORIZONTAL_FLIP": 3,
+ "VERTICAL_FLIP": 4,
+ "CROP": 5,
+ "IM_FEELING_LUCKY": 6,
+}
+
+func (x ImagesServiceTransform_Type) Enum() *ImagesServiceTransform_Type {
+ p := new(ImagesServiceTransform_Type)
+ *p = x
+ return p
+}
+func (x ImagesServiceTransform_Type) String() string {
+ return proto.EnumName(ImagesServiceTransform_Type_name, int32(x))
+}
+func (x *ImagesServiceTransform_Type) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ImagesServiceTransform_Type_value, data, "ImagesServiceTransform_Type")
+ if err != nil {
+ return err
+ }
+ *x = ImagesServiceTransform_Type(value)
+ return nil
+}
+
+type InputSettings_ORIENTATION_CORRECTION_TYPE int32
+
+const (
+ InputSettings_UNCHANGED_ORIENTATION InputSettings_ORIENTATION_CORRECTION_TYPE = 0
+ InputSettings_CORRECT_ORIENTATION InputSettings_ORIENTATION_CORRECTION_TYPE = 1
+)
+
+var InputSettings_ORIENTATION_CORRECTION_TYPE_name = map[int32]string{
+ 0: "UNCHANGED_ORIENTATION",
+ 1: "CORRECT_ORIENTATION",
+}
+var InputSettings_ORIENTATION_CORRECTION_TYPE_value = map[string]int32{
+ "UNCHANGED_ORIENTATION": 0,
+ "CORRECT_ORIENTATION": 1,
+}
+
+func (x InputSettings_ORIENTATION_CORRECTION_TYPE) Enum() *InputSettings_ORIENTATION_CORRECTION_TYPE {
+ p := new(InputSettings_ORIENTATION_CORRECTION_TYPE)
+ *p = x
+ return p
+}
+func (x InputSettings_ORIENTATION_CORRECTION_TYPE) String() string {
+ return proto.EnumName(InputSettings_ORIENTATION_CORRECTION_TYPE_name, int32(x))
+}
+func (x *InputSettings_ORIENTATION_CORRECTION_TYPE) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(InputSettings_ORIENTATION_CORRECTION_TYPE_value, data, "InputSettings_ORIENTATION_CORRECTION_TYPE")
+ if err != nil {
+ return err
+ }
+ *x = InputSettings_ORIENTATION_CORRECTION_TYPE(value)
+ return nil
+}
+
+type OutputSettings_MIME_TYPE int32
+
+const (
+ OutputSettings_PNG OutputSettings_MIME_TYPE = 0
+ OutputSettings_JPEG OutputSettings_MIME_TYPE = 1
+ OutputSettings_WEBP OutputSettings_MIME_TYPE = 2
+)
+
+var OutputSettings_MIME_TYPE_name = map[int32]string{
+ 0: "PNG",
+ 1: "JPEG",
+ 2: "WEBP",
+}
+var OutputSettings_MIME_TYPE_value = map[string]int32{
+ "PNG": 0,
+ "JPEG": 1,
+ "WEBP": 2,
+}
+
+func (x OutputSettings_MIME_TYPE) Enum() *OutputSettings_MIME_TYPE {
+ p := new(OutputSettings_MIME_TYPE)
+ *p = x
+ return p
+}
+func (x OutputSettings_MIME_TYPE) String() string {
+ return proto.EnumName(OutputSettings_MIME_TYPE_name, int32(x))
+}
+func (x *OutputSettings_MIME_TYPE) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(OutputSettings_MIME_TYPE_value, data, "OutputSettings_MIME_TYPE")
+ if err != nil {
+ return err
+ }
+ *x = OutputSettings_MIME_TYPE(value)
+ return nil
+}
+
+type CompositeImageOptions_ANCHOR int32
+
+const (
+ CompositeImageOptions_TOP_LEFT CompositeImageOptions_ANCHOR = 0
+ CompositeImageOptions_TOP CompositeImageOptions_ANCHOR = 1
+ CompositeImageOptions_TOP_RIGHT CompositeImageOptions_ANCHOR = 2
+ CompositeImageOptions_LEFT CompositeImageOptions_ANCHOR = 3
+ CompositeImageOptions_CENTER CompositeImageOptions_ANCHOR = 4
+ CompositeImageOptions_RIGHT CompositeImageOptions_ANCHOR = 5
+ CompositeImageOptions_BOTTOM_LEFT CompositeImageOptions_ANCHOR = 6
+ CompositeImageOptions_BOTTOM CompositeImageOptions_ANCHOR = 7
+ CompositeImageOptions_BOTTOM_RIGHT CompositeImageOptions_ANCHOR = 8
+)
+
+var CompositeImageOptions_ANCHOR_name = map[int32]string{
+ 0: "TOP_LEFT",
+ 1: "TOP",
+ 2: "TOP_RIGHT",
+ 3: "LEFT",
+ 4: "CENTER",
+ 5: "RIGHT",
+ 6: "BOTTOM_LEFT",
+ 7: "BOTTOM",
+ 8: "BOTTOM_RIGHT",
+}
+var CompositeImageOptions_ANCHOR_value = map[string]int32{
+ "TOP_LEFT": 0,
+ "TOP": 1,
+ "TOP_RIGHT": 2,
+ "LEFT": 3,
+ "CENTER": 4,
+ "RIGHT": 5,
+ "BOTTOM_LEFT": 6,
+ "BOTTOM": 7,
+ "BOTTOM_RIGHT": 8,
+}
+
+func (x CompositeImageOptions_ANCHOR) Enum() *CompositeImageOptions_ANCHOR {
+ p := new(CompositeImageOptions_ANCHOR)
+ *p = x
+ return p
+}
+func (x CompositeImageOptions_ANCHOR) String() string {
+ return proto.EnumName(CompositeImageOptions_ANCHOR_name, int32(x))
+}
+func (x *CompositeImageOptions_ANCHOR) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CompositeImageOptions_ANCHOR_value, data, "CompositeImageOptions_ANCHOR")
+ if err != nil {
+ return err
+ }
+ *x = CompositeImageOptions_ANCHOR(value)
+ return nil
+}
+
+type ImagesServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesServiceError) Reset() { *m = ImagesServiceError{} }
+func (m *ImagesServiceError) String() string { return proto.CompactTextString(m) }
+func (*ImagesServiceError) ProtoMessage() {}
+
+type ImagesServiceTransform struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesServiceTransform) Reset() { *m = ImagesServiceTransform{} }
+func (m *ImagesServiceTransform) String() string { return proto.CompactTextString(m) }
+func (*ImagesServiceTransform) ProtoMessage() {}
+
+type Transform struct {
+ Width *int32 `protobuf:"varint,1,opt,name=width" json:"width,omitempty"`
+ Height *int32 `protobuf:"varint,2,opt,name=height" json:"height,omitempty"`
+ CropToFit *bool `protobuf:"varint,11,opt,name=crop_to_fit,def=0" json:"crop_to_fit,omitempty"`
+ CropOffsetX *float32 `protobuf:"fixed32,12,opt,name=crop_offset_x,def=0.5" json:"crop_offset_x,omitempty"`
+ CropOffsetY *float32 `protobuf:"fixed32,13,opt,name=crop_offset_y,def=0.5" json:"crop_offset_y,omitempty"`
+ Rotate *int32 `protobuf:"varint,3,opt,name=rotate,def=0" json:"rotate,omitempty"`
+ HorizontalFlip *bool `protobuf:"varint,4,opt,name=horizontal_flip,def=0" json:"horizontal_flip,omitempty"`
+ VerticalFlip *bool `protobuf:"varint,5,opt,name=vertical_flip,def=0" json:"vertical_flip,omitempty"`
+ CropLeftX *float32 `protobuf:"fixed32,6,opt,name=crop_left_x,def=0" json:"crop_left_x,omitempty"`
+ CropTopY *float32 `protobuf:"fixed32,7,opt,name=crop_top_y,def=0" json:"crop_top_y,omitempty"`
+ CropRightX *float32 `protobuf:"fixed32,8,opt,name=crop_right_x,def=1" json:"crop_right_x,omitempty"`
+ CropBottomY *float32 `protobuf:"fixed32,9,opt,name=crop_bottom_y,def=1" json:"crop_bottom_y,omitempty"`
+ Autolevels *bool `protobuf:"varint,10,opt,name=autolevels,def=0" json:"autolevels,omitempty"`
+ AllowStretch *bool `protobuf:"varint,14,opt,name=allow_stretch,def=0" json:"allow_stretch,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Transform) Reset() { *m = Transform{} }
+func (m *Transform) String() string { return proto.CompactTextString(m) }
+func (*Transform) ProtoMessage() {}
+
+const Default_Transform_CropToFit bool = false
+const Default_Transform_CropOffsetX float32 = 0.5
+const Default_Transform_CropOffsetY float32 = 0.5
+const Default_Transform_Rotate int32 = 0
+const Default_Transform_HorizontalFlip bool = false
+const Default_Transform_VerticalFlip bool = false
+const Default_Transform_CropLeftX float32 = 0
+const Default_Transform_CropTopY float32 = 0
+const Default_Transform_CropRightX float32 = 1
+const Default_Transform_CropBottomY float32 = 1
+const Default_Transform_Autolevels bool = false
+const Default_Transform_AllowStretch bool = false
+
+func (m *Transform) GetWidth() int32 {
+ if m != nil && m.Width != nil {
+ return *m.Width
+ }
+ return 0
+}
+
+func (m *Transform) GetHeight() int32 {
+ if m != nil && m.Height != nil {
+ return *m.Height
+ }
+ return 0
+}
+
+func (m *Transform) GetCropToFit() bool {
+ if m != nil && m.CropToFit != nil {
+ return *m.CropToFit
+ }
+ return Default_Transform_CropToFit
+}
+
+func (m *Transform) GetCropOffsetX() float32 {
+ if m != nil && m.CropOffsetX != nil {
+ return *m.CropOffsetX
+ }
+ return Default_Transform_CropOffsetX
+}
+
+func (m *Transform) GetCropOffsetY() float32 {
+ if m != nil && m.CropOffsetY != nil {
+ return *m.CropOffsetY
+ }
+ return Default_Transform_CropOffsetY
+}
+
+func (m *Transform) GetRotate() int32 {
+ if m != nil && m.Rotate != nil {
+ return *m.Rotate
+ }
+ return Default_Transform_Rotate
+}
+
+func (m *Transform) GetHorizontalFlip() bool {
+ if m != nil && m.HorizontalFlip != nil {
+ return *m.HorizontalFlip
+ }
+ return Default_Transform_HorizontalFlip
+}
+
+func (m *Transform) GetVerticalFlip() bool {
+ if m != nil && m.VerticalFlip != nil {
+ return *m.VerticalFlip
+ }
+ return Default_Transform_VerticalFlip
+}
+
+func (m *Transform) GetCropLeftX() float32 {
+ if m != nil && m.CropLeftX != nil {
+ return *m.CropLeftX
+ }
+ return Default_Transform_CropLeftX
+}
+
+func (m *Transform) GetCropTopY() float32 {
+ if m != nil && m.CropTopY != nil {
+ return *m.CropTopY
+ }
+ return Default_Transform_CropTopY
+}
+
+func (m *Transform) GetCropRightX() float32 {
+ if m != nil && m.CropRightX != nil {
+ return *m.CropRightX
+ }
+ return Default_Transform_CropRightX
+}
+
+func (m *Transform) GetCropBottomY() float32 {
+ if m != nil && m.CropBottomY != nil {
+ return *m.CropBottomY
+ }
+ return Default_Transform_CropBottomY
+}
+
+func (m *Transform) GetAutolevels() bool {
+ if m != nil && m.Autolevels != nil {
+ return *m.Autolevels
+ }
+ return Default_Transform_Autolevels
+}
+
+func (m *Transform) GetAllowStretch() bool {
+ if m != nil && m.AllowStretch != nil {
+ return *m.AllowStretch
+ }
+ return Default_Transform_AllowStretch
+}
+
+type ImageData struct {
+ Content []byte `protobuf:"bytes,1,req,name=content" json:"content,omitempty"`
+ BlobKey *string `protobuf:"bytes,2,opt,name=blob_key" json:"blob_key,omitempty"`
+ Width *int32 `protobuf:"varint,3,opt,name=width" json:"width,omitempty"`
+ Height *int32 `protobuf:"varint,4,opt,name=height" json:"height,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImageData) Reset() { *m = ImageData{} }
+func (m *ImageData) String() string { return proto.CompactTextString(m) }
+func (*ImageData) ProtoMessage() {}
+
+func (m *ImageData) GetContent() []byte {
+ if m != nil {
+ return m.Content
+ }
+ return nil
+}
+
+func (m *ImageData) GetBlobKey() string {
+ if m != nil && m.BlobKey != nil {
+ return *m.BlobKey
+ }
+ return ""
+}
+
+func (m *ImageData) GetWidth() int32 {
+ if m != nil && m.Width != nil {
+ return *m.Width
+ }
+ return 0
+}
+
+func (m *ImageData) GetHeight() int32 {
+ if m != nil && m.Height != nil {
+ return *m.Height
+ }
+ return 0
+}
+
+type InputSettings struct {
+ CorrectExifOrientation *InputSettings_ORIENTATION_CORRECTION_TYPE `protobuf:"varint,1,opt,name=correct_exif_orientation,enum=appengine.InputSettings_ORIENTATION_CORRECTION_TYPE,def=0" json:"correct_exif_orientation,omitempty"`
+ ParseMetadata *bool `protobuf:"varint,2,opt,name=parse_metadata,def=0" json:"parse_metadata,omitempty"`
+ TransparentSubstitutionRgb *int32 `protobuf:"varint,3,opt,name=transparent_substitution_rgb" json:"transparent_substitution_rgb,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *InputSettings) Reset() { *m = InputSettings{} }
+func (m *InputSettings) String() string { return proto.CompactTextString(m) }
+func (*InputSettings) ProtoMessage() {}
+
+const Default_InputSettings_CorrectExifOrientation InputSettings_ORIENTATION_CORRECTION_TYPE = InputSettings_UNCHANGED_ORIENTATION
+const Default_InputSettings_ParseMetadata bool = false
+
+func (m *InputSettings) GetCorrectExifOrientation() InputSettings_ORIENTATION_CORRECTION_TYPE {
+ if m != nil && m.CorrectExifOrientation != nil {
+ return *m.CorrectExifOrientation
+ }
+ return Default_InputSettings_CorrectExifOrientation
+}
+
+func (m *InputSettings) GetParseMetadata() bool {
+ if m != nil && m.ParseMetadata != nil {
+ return *m.ParseMetadata
+ }
+ return Default_InputSettings_ParseMetadata
+}
+
+func (m *InputSettings) GetTransparentSubstitutionRgb() int32 {
+ if m != nil && m.TransparentSubstitutionRgb != nil {
+ return *m.TransparentSubstitutionRgb
+ }
+ return 0
+}
+
+type OutputSettings struct {
+ MimeType *OutputSettings_MIME_TYPE `protobuf:"varint,1,opt,name=mime_type,enum=appengine.OutputSettings_MIME_TYPE,def=0" json:"mime_type,omitempty"`
+ Quality *int32 `protobuf:"varint,2,opt,name=quality" json:"quality,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OutputSettings) Reset() { *m = OutputSettings{} }
+func (m *OutputSettings) String() string { return proto.CompactTextString(m) }
+func (*OutputSettings) ProtoMessage() {}
+
+const Default_OutputSettings_MimeType OutputSettings_MIME_TYPE = OutputSettings_PNG
+
+func (m *OutputSettings) GetMimeType() OutputSettings_MIME_TYPE {
+ if m != nil && m.MimeType != nil {
+ return *m.MimeType
+ }
+ return Default_OutputSettings_MimeType
+}
+
+func (m *OutputSettings) GetQuality() int32 {
+ if m != nil && m.Quality != nil {
+ return *m.Quality
+ }
+ return 0
+}
+
+type ImagesTransformRequest struct {
+ Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"`
+ Transform []*Transform `protobuf:"bytes,2,rep,name=transform" json:"transform,omitempty"`
+ Output *OutputSettings `protobuf:"bytes,3,req,name=output" json:"output,omitempty"`
+ Input *InputSettings `protobuf:"bytes,4,opt,name=input" json:"input,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesTransformRequest) Reset() { *m = ImagesTransformRequest{} }
+func (m *ImagesTransformRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesTransformRequest) ProtoMessage() {}
+
+func (m *ImagesTransformRequest) GetImage() *ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+func (m *ImagesTransformRequest) GetTransform() []*Transform {
+ if m != nil {
+ return m.Transform
+ }
+ return nil
+}
+
+func (m *ImagesTransformRequest) GetOutput() *OutputSettings {
+ if m != nil {
+ return m.Output
+ }
+ return nil
+}
+
+func (m *ImagesTransformRequest) GetInput() *InputSettings {
+ if m != nil {
+ return m.Input
+ }
+ return nil
+}
+
+type ImagesTransformResponse struct {
+ Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"`
+ SourceMetadata *string `protobuf:"bytes,2,opt,name=source_metadata" json:"source_metadata,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesTransformResponse) Reset() { *m = ImagesTransformResponse{} }
+func (m *ImagesTransformResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesTransformResponse) ProtoMessage() {}
+
+func (m *ImagesTransformResponse) GetImage() *ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+func (m *ImagesTransformResponse) GetSourceMetadata() string {
+ if m != nil && m.SourceMetadata != nil {
+ return *m.SourceMetadata
+ }
+ return ""
+}
+
+type CompositeImageOptions struct {
+ SourceIndex *int32 `protobuf:"varint,1,req,name=source_index" json:"source_index,omitempty"`
+ XOffset *int32 `protobuf:"varint,2,req,name=x_offset" json:"x_offset,omitempty"`
+ YOffset *int32 `protobuf:"varint,3,req,name=y_offset" json:"y_offset,omitempty"`
+ Opacity *float32 `protobuf:"fixed32,4,req,name=opacity" json:"opacity,omitempty"`
+ Anchor *CompositeImageOptions_ANCHOR `protobuf:"varint,5,req,name=anchor,enum=appengine.CompositeImageOptions_ANCHOR" json:"anchor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeImageOptions) Reset() { *m = CompositeImageOptions{} }
+func (m *CompositeImageOptions) String() string { return proto.CompactTextString(m) }
+func (*CompositeImageOptions) ProtoMessage() {}
+
+func (m *CompositeImageOptions) GetSourceIndex() int32 {
+ if m != nil && m.SourceIndex != nil {
+ return *m.SourceIndex
+ }
+ return 0
+}
+
+func (m *CompositeImageOptions) GetXOffset() int32 {
+ if m != nil && m.XOffset != nil {
+ return *m.XOffset
+ }
+ return 0
+}
+
+func (m *CompositeImageOptions) GetYOffset() int32 {
+ if m != nil && m.YOffset != nil {
+ return *m.YOffset
+ }
+ return 0
+}
+
+func (m *CompositeImageOptions) GetOpacity() float32 {
+ if m != nil && m.Opacity != nil {
+ return *m.Opacity
+ }
+ return 0
+}
+
+func (m *CompositeImageOptions) GetAnchor() CompositeImageOptions_ANCHOR {
+ if m != nil && m.Anchor != nil {
+ return *m.Anchor
+ }
+ return CompositeImageOptions_TOP_LEFT
+}
+
+type ImagesCanvas struct {
+ Width *int32 `protobuf:"varint,1,req,name=width" json:"width,omitempty"`
+ Height *int32 `protobuf:"varint,2,req,name=height" json:"height,omitempty"`
+ Output *OutputSettings `protobuf:"bytes,3,req,name=output" json:"output,omitempty"`
+ Color *int32 `protobuf:"varint,4,opt,name=color,def=-1" json:"color,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesCanvas) Reset() { *m = ImagesCanvas{} }
+func (m *ImagesCanvas) String() string { return proto.CompactTextString(m) }
+func (*ImagesCanvas) ProtoMessage() {}
+
+const Default_ImagesCanvas_Color int32 = -1
+
+func (m *ImagesCanvas) GetWidth() int32 {
+ if m != nil && m.Width != nil {
+ return *m.Width
+ }
+ return 0
+}
+
+func (m *ImagesCanvas) GetHeight() int32 {
+ if m != nil && m.Height != nil {
+ return *m.Height
+ }
+ return 0
+}
+
+func (m *ImagesCanvas) GetOutput() *OutputSettings {
+ if m != nil {
+ return m.Output
+ }
+ return nil
+}
+
+func (m *ImagesCanvas) GetColor() int32 {
+ if m != nil && m.Color != nil {
+ return *m.Color
+ }
+ return Default_ImagesCanvas_Color
+}
+
+type ImagesCompositeRequest struct {
+ Image []*ImageData `protobuf:"bytes,1,rep,name=image" json:"image,omitempty"`
+ Options []*CompositeImageOptions `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"`
+ Canvas *ImagesCanvas `protobuf:"bytes,3,req,name=canvas" json:"canvas,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesCompositeRequest) Reset() { *m = ImagesCompositeRequest{} }
+func (m *ImagesCompositeRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesCompositeRequest) ProtoMessage() {}
+
+func (m *ImagesCompositeRequest) GetImage() []*ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+func (m *ImagesCompositeRequest) GetOptions() []*CompositeImageOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+func (m *ImagesCompositeRequest) GetCanvas() *ImagesCanvas {
+ if m != nil {
+ return m.Canvas
+ }
+ return nil
+}
+
+type ImagesCompositeResponse struct {
+ Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesCompositeResponse) Reset() { *m = ImagesCompositeResponse{} }
+func (m *ImagesCompositeResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesCompositeResponse) ProtoMessage() {}
+
+func (m *ImagesCompositeResponse) GetImage() *ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+type ImagesHistogramRequest struct {
+ Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesHistogramRequest) Reset() { *m = ImagesHistogramRequest{} }
+func (m *ImagesHistogramRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesHistogramRequest) ProtoMessage() {}
+
+func (m *ImagesHistogramRequest) GetImage() *ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+type ImagesHistogram struct {
+ Red []int32 `protobuf:"varint,1,rep,name=red" json:"red,omitempty"`
+ Green []int32 `protobuf:"varint,2,rep,name=green" json:"green,omitempty"`
+ Blue []int32 `protobuf:"varint,3,rep,name=blue" json:"blue,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesHistogram) Reset() { *m = ImagesHistogram{} }
+func (m *ImagesHistogram) String() string { return proto.CompactTextString(m) }
+func (*ImagesHistogram) ProtoMessage() {}
+
+func (m *ImagesHistogram) GetRed() []int32 {
+ if m != nil {
+ return m.Red
+ }
+ return nil
+}
+
+func (m *ImagesHistogram) GetGreen() []int32 {
+ if m != nil {
+ return m.Green
+ }
+ return nil
+}
+
+func (m *ImagesHistogram) GetBlue() []int32 {
+ if m != nil {
+ return m.Blue
+ }
+ return nil
+}
+
+type ImagesHistogramResponse struct {
+ Histogram *ImagesHistogram `protobuf:"bytes,1,req,name=histogram" json:"histogram,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesHistogramResponse) Reset() { *m = ImagesHistogramResponse{} }
+func (m *ImagesHistogramResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesHistogramResponse) ProtoMessage() {}
+
+func (m *ImagesHistogramResponse) GetHistogram() *ImagesHistogram {
+ if m != nil {
+ return m.Histogram
+ }
+ return nil
+}
+
+type ImagesGetUrlBaseRequest struct {
+ BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"`
+ CreateSecureUrl *bool `protobuf:"varint,2,opt,name=create_secure_url,def=0" json:"create_secure_url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesGetUrlBaseRequest) Reset() { *m = ImagesGetUrlBaseRequest{} }
+func (m *ImagesGetUrlBaseRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesGetUrlBaseRequest) ProtoMessage() {}
+
+const Default_ImagesGetUrlBaseRequest_CreateSecureUrl bool = false
+
+func (m *ImagesGetUrlBaseRequest) GetBlobKey() string {
+ if m != nil && m.BlobKey != nil {
+ return *m.BlobKey
+ }
+ return ""
+}
+
+func (m *ImagesGetUrlBaseRequest) GetCreateSecureUrl() bool {
+ if m != nil && m.CreateSecureUrl != nil {
+ return *m.CreateSecureUrl
+ }
+ return Default_ImagesGetUrlBaseRequest_CreateSecureUrl
+}
+
+type ImagesGetUrlBaseResponse struct {
+ Url *string `protobuf:"bytes,1,req,name=url" json:"url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesGetUrlBaseResponse) Reset() { *m = ImagesGetUrlBaseResponse{} }
+func (m *ImagesGetUrlBaseResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesGetUrlBaseResponse) ProtoMessage() {}
+
+func (m *ImagesGetUrlBaseResponse) GetUrl() string {
+ if m != nil && m.Url != nil {
+ return *m.Url
+ }
+ return ""
+}
+
+type ImagesDeleteUrlBaseRequest struct {
+ BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesDeleteUrlBaseRequest) Reset() { *m = ImagesDeleteUrlBaseRequest{} }
+func (m *ImagesDeleteUrlBaseRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesDeleteUrlBaseRequest) ProtoMessage() {}
+
+func (m *ImagesDeleteUrlBaseRequest) GetBlobKey() string {
+ if m != nil && m.BlobKey != nil {
+ return *m.BlobKey
+ }
+ return ""
+}
+
+type ImagesDeleteUrlBaseResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesDeleteUrlBaseResponse) Reset() { *m = ImagesDeleteUrlBaseResponse{} }
+func (m *ImagesDeleteUrlBaseResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesDeleteUrlBaseResponse) ProtoMessage() {}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/image/images_service.proto b/vendor/google.golang.org/appengine/internal/image/images_service.proto
new file mode 100644
index 000000000..f0d2ed5d3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/image/images_service.proto
@@ -0,0 +1,162 @@
+syntax = "proto2";
+option go_package = "image";
+
+package appengine;
+
+message ImagesServiceError {
+ enum ErrorCode {
+ UNSPECIFIED_ERROR = 1;
+ BAD_TRANSFORM_DATA = 2;
+ NOT_IMAGE = 3;
+ BAD_IMAGE_DATA = 4;
+ IMAGE_TOO_LARGE = 5;
+ INVALID_BLOB_KEY = 6;
+ ACCESS_DENIED = 7;
+ OBJECT_NOT_FOUND = 8;
+ }
+}
+
+message ImagesServiceTransform {
+ enum Type {
+ RESIZE = 1;
+ ROTATE = 2;
+ HORIZONTAL_FLIP = 3;
+ VERTICAL_FLIP = 4;
+ CROP = 5;
+ IM_FEELING_LUCKY = 6;
+ }
+}
+
+message Transform {
+ optional int32 width = 1;
+ optional int32 height = 2;
+ optional bool crop_to_fit = 11 [default = false];
+ optional float crop_offset_x = 12 [default = 0.5];
+ optional float crop_offset_y = 13 [default = 0.5];
+
+ optional int32 rotate = 3 [default = 0];
+
+ optional bool horizontal_flip = 4 [default = false];
+
+ optional bool vertical_flip = 5 [default = false];
+
+ optional float crop_left_x = 6 [default = 0.0];
+ optional float crop_top_y = 7 [default = 0.0];
+ optional float crop_right_x = 8 [default = 1.0];
+ optional float crop_bottom_y = 9 [default = 1.0];
+
+ optional bool autolevels = 10 [default = false];
+
+ optional bool allow_stretch = 14 [default = false];
+}
+
+message ImageData {
+ required bytes content = 1 [ctype=CORD];
+ optional string blob_key = 2;
+
+ optional int32 width = 3;
+ optional int32 height = 4;
+}
+
+message InputSettings {
+ enum ORIENTATION_CORRECTION_TYPE {
+ UNCHANGED_ORIENTATION = 0;
+ CORRECT_ORIENTATION = 1;
+ }
+ optional ORIENTATION_CORRECTION_TYPE correct_exif_orientation = 1
+ [default=UNCHANGED_ORIENTATION];
+ optional bool parse_metadata = 2 [default=false];
+ optional int32 transparent_substitution_rgb = 3;
+}
+
+message OutputSettings {
+ enum MIME_TYPE {
+ PNG = 0;
+ JPEG = 1;
+ WEBP = 2;
+ }
+
+ optional MIME_TYPE mime_type = 1 [default=PNG];
+ optional int32 quality = 2;
+}
+
+message ImagesTransformRequest {
+ required ImageData image = 1;
+ repeated Transform transform = 2;
+ required OutputSettings output = 3;
+ optional InputSettings input = 4;
+}
+
+message ImagesTransformResponse {
+ required ImageData image = 1;
+ optional string source_metadata = 2;
+}
+
+message CompositeImageOptions {
+ required int32 source_index = 1;
+ required int32 x_offset = 2;
+ required int32 y_offset = 3;
+ required float opacity = 4;
+
+ enum ANCHOR {
+ TOP_LEFT = 0;
+ TOP = 1;
+ TOP_RIGHT = 2;
+ LEFT = 3;
+ CENTER = 4;
+ RIGHT = 5;
+ BOTTOM_LEFT = 6;
+ BOTTOM = 7;
+ BOTTOM_RIGHT = 8;
+ }
+
+ required ANCHOR anchor = 5;
+}
+
+message ImagesCanvas {
+ required int32 width = 1;
+ required int32 height = 2;
+ required OutputSettings output = 3;
+ optional int32 color = 4 [default=-1];
+}
+
+message ImagesCompositeRequest {
+ repeated ImageData image = 1;
+ repeated CompositeImageOptions options = 2;
+ required ImagesCanvas canvas = 3;
+}
+
+message ImagesCompositeResponse {
+ required ImageData image = 1;
+}
+
+message ImagesHistogramRequest {
+ required ImageData image = 1;
+}
+
+message ImagesHistogram {
+ repeated int32 red = 1;
+ repeated int32 green = 2;
+ repeated int32 blue = 3;
+}
+
+message ImagesHistogramResponse {
+ required ImagesHistogram histogram = 1;
+}
+
+message ImagesGetUrlBaseRequest {
+ required string blob_key = 1;
+
+ optional bool create_secure_url = 2 [default = false];
+}
+
+message ImagesGetUrlBaseResponse {
+ required string url = 1;
+}
+
+message ImagesDeleteUrlBaseRequest {
+ required string blob_key = 1;
+}
+
+message ImagesDeleteUrlBaseResponse {
+}
diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go
new file mode 100644
index 000000000..051ea3980
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/internal.go
@@ -0,0 +1,110 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package internal provides support for package appengine.
+//
+// Programs should not use this package directly. Its API is not stable.
+// Use packages appengine and appengine/* instead.
+package internal
+
+import (
+ "fmt"
+
+ "github.com/golang/protobuf/proto"
+
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+// errorCodeMaps is a map of service name to the error code map for the service.
+var errorCodeMaps = make(map[string]map[int32]string)
+
+// RegisterErrorCodeMap is called from API implementations to register their
+// error code map. This should only be called from init functions.
+func RegisterErrorCodeMap(service string, m map[int32]string) {
+ errorCodeMaps[service] = m
+}
+
+type timeoutCodeKey struct {
+ service string
+ code int32
+}
+
+// timeoutCodes is the set of service+code pairs that represent timeouts.
+var timeoutCodes = make(map[timeoutCodeKey]bool)
+
+func RegisterTimeoutErrorCode(service string, code int32) {
+ timeoutCodes[timeoutCodeKey{service, code}] = true
+}
+
+// APIError is the type returned by appengine.Context's Call method
+// when an API call fails in an API-specific way. This may be, for instance,
+// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE.
+type APIError struct {
+ Service string
+ Detail string
+ Code int32 // API-specific error code
+}
+
+func (e *APIError) Error() string {
+ if e.Code == 0 {
+ if e.Detail == "" {
+ return "APIError <empty>"
+ }
+ return e.Detail
+ }
+ s := fmt.Sprintf("API error %d", e.Code)
+ if m, ok := errorCodeMaps[e.Service]; ok {
+ s += " (" + e.Service + ": " + m[e.Code] + ")"
+ } else {
+ // Shouldn't happen, but provide a bit more detail if it does.
+ s = e.Service + " " + s
+ }
+ if e.Detail != "" {
+ s += ": " + e.Detail
+ }
+ return s
+}
+
+func (e *APIError) IsTimeout() bool {
+ return timeoutCodes[timeoutCodeKey{e.Service, e.Code}]
+}
+
+// CallError is the type returned by appengine.Context's Call method when an
+// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED.
+type CallError struct {
+ Detail string
+ Code int32
+ // TODO: Remove this if we get a distinguishable error code.
+ Timeout bool
+}
+
+func (e *CallError) Error() string {
+ var msg string
+ switch remotepb.RpcError_ErrorCode(e.Code) {
+ case remotepb.RpcError_UNKNOWN:
+ return e.Detail
+ case remotepb.RpcError_OVER_QUOTA:
+ msg = "Over quota"
+ case remotepb.RpcError_CAPABILITY_DISABLED:
+ msg = "Capability disabled"
+ case remotepb.RpcError_CANCELLED:
+ msg = "Canceled"
+ default:
+ msg = fmt.Sprintf("Call error %d", e.Code)
+ }
+ s := msg + ": " + e.Detail
+ if e.Timeout {
+ s += " (timeout)"
+ }
+ return s
+}
+
+func (e *CallError) IsTimeout() bool {
+ return e.Timeout
+}
+
+// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace.
+// The function should be prepared to be called on the same message more than once; it should only modify the
+// RPC request the first time.
+var NamespaceMods = make(map[string]func(m proto.Message, namespace string))
diff --git a/vendor/google.golang.org/appengine/internal/internal_vm_test.go b/vendor/google.golang.org/appengine/internal/internal_vm_test.go
new file mode 100644
index 000000000..f8097616b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/internal_vm_test.go
@@ -0,0 +1,60 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+)
+
+func TestInstallingHealthChecker(t *testing.T) {
+ try := func(desc string, mux *http.ServeMux, wantCode int, wantBody string) {
+ installHealthChecker(mux)
+ srv := httptest.NewServer(mux)
+ defer srv.Close()
+
+ resp, err := http.Get(srv.URL + "/_ah/health")
+ if err != nil {
+ t.Errorf("%s: http.Get: %v", desc, err)
+ return
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Errorf("%s: reading body: %v", desc, err)
+ return
+ }
+
+ if resp.StatusCode != wantCode {
+ t.Errorf("%s: got HTTP %d, want %d", desc, resp.StatusCode, wantCode)
+ return
+ }
+ if wantBody != "" && string(body) != wantBody {
+ t.Errorf("%s: got HTTP body %q, want %q", desc, body, wantBody)
+ return
+ }
+ }
+
+ // If there's no handlers, or only a root handler, a health checker should be installed.
+ try("empty mux", http.NewServeMux(), 200, "ok")
+ mux := http.NewServeMux()
+ mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, "root handler")
+ })
+ try("mux with root handler", mux, 200, "ok")
+
+ // If there's a custom health check handler, one should not be installed.
+ mux = http.NewServeMux()
+ mux.HandleFunc("/_ah/health", func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(418)
+ io.WriteString(w, "I'm short and stout!")
+ })
+ try("mux with custom health checker", mux, 418, "I'm short and stout!")
+}
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
new file mode 100644
index 000000000..20c595be3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
@@ -0,0 +1,899 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/log/log_service.proto
+// DO NOT EDIT!
+
+/*
+Package log is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/log/log_service.proto
+
+It has these top-level messages:
+ LogServiceError
+ UserAppLogLine
+ UserAppLogGroup
+ FlushRequest
+ SetStatusRequest
+ LogOffset
+ LogLine
+ RequestLog
+ LogModuleVersion
+ LogReadRequest
+ LogReadResponse
+ LogUsageRecord
+ LogUsageRequest
+ LogUsageResponse
+*/
+package log
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type LogServiceError_ErrorCode int32
+
+const (
+ LogServiceError_OK LogServiceError_ErrorCode = 0
+ LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1
+ LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2
+)
+
+var LogServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_REQUEST",
+ 2: "STORAGE_ERROR",
+}
+var LogServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_REQUEST": 1,
+ "STORAGE_ERROR": 2,
+}
+
+func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode {
+ p := new(LogServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x LogServiceError_ErrorCode) String() string {
+ return proto.EnumName(LogServiceError_ErrorCode_name, int32(x))
+}
+func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = LogServiceError_ErrorCode(value)
+ return nil
+}
+
+type LogServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogServiceError) Reset() { *m = LogServiceError{} }
+func (m *LogServiceError) String() string { return proto.CompactTextString(m) }
+func (*LogServiceError) ProtoMessage() {}
+
+type UserAppLogLine struct {
+ TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec" json:"timestamp_usec,omitempty"`
+ Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
+ Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} }
+func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) }
+func (*UserAppLogLine) ProtoMessage() {}
+
+func (m *UserAppLogLine) GetTimestampUsec() int64 {
+ if m != nil && m.TimestampUsec != nil {
+ return *m.TimestampUsec
+ }
+ return 0
+}
+
+func (m *UserAppLogLine) GetLevel() int64 {
+ if m != nil && m.Level != nil {
+ return *m.Level
+ }
+ return 0
+}
+
+func (m *UserAppLogLine) GetMessage() string {
+ if m != nil && m.Message != nil {
+ return *m.Message
+ }
+ return ""
+}
+
+type UserAppLogGroup struct {
+ LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line" json:"log_line,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} }
+func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) }
+func (*UserAppLogGroup) ProtoMessage() {}
+
+func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine {
+ if m != nil {
+ return m.LogLine
+ }
+ return nil
+}
+
+type FlushRequest struct {
+ Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FlushRequest) Reset() { *m = FlushRequest{} }
+func (m *FlushRequest) String() string { return proto.CompactTextString(m) }
+func (*FlushRequest) ProtoMessage() {}
+
+func (m *FlushRequest) GetLogs() []byte {
+ if m != nil {
+ return m.Logs
+ }
+ return nil
+}
+
+type SetStatusRequest struct {
+ Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} }
+func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) }
+func (*SetStatusRequest) ProtoMessage() {}
+
+func (m *SetStatusRequest) GetStatus() string {
+ if m != nil && m.Status != nil {
+ return *m.Status
+ }
+ return ""
+}
+
+type LogOffset struct {
+ RequestId []byte `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogOffset) Reset() { *m = LogOffset{} }
+func (m *LogOffset) String() string { return proto.CompactTextString(m) }
+func (*LogOffset) ProtoMessage() {}
+
+func (m *LogOffset) GetRequestId() []byte {
+ if m != nil {
+ return m.RequestId
+ }
+ return nil
+}
+
+type LogLine struct {
+ Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"`
+ Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
+ LogMessage *string `protobuf:"bytes,3,req,name=log_message" json:"log_message,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogLine) Reset() { *m = LogLine{} }
+func (m *LogLine) String() string { return proto.CompactTextString(m) }
+func (*LogLine) ProtoMessage() {}
+
+func (m *LogLine) GetTime() int64 {
+ if m != nil && m.Time != nil {
+ return *m.Time
+ }
+ return 0
+}
+
+func (m *LogLine) GetLevel() int32 {
+ if m != nil && m.Level != nil {
+ return *m.Level
+ }
+ return 0
+}
+
+func (m *LogLine) GetLogMessage() string {
+ if m != nil && m.LogMessage != nil {
+ return *m.LogMessage
+ }
+ return ""
+}
+
+type RequestLog struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ ModuleId *string `protobuf:"bytes,37,opt,name=module_id,def=default" json:"module_id,omitempty"`
+ VersionId *string `protobuf:"bytes,2,req,name=version_id" json:"version_id,omitempty"`
+ RequestId []byte `protobuf:"bytes,3,req,name=request_id" json:"request_id,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"`
+ Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"`
+ Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"`
+ StartTime *int64 `protobuf:"varint,6,req,name=start_time" json:"start_time,omitempty"`
+ EndTime *int64 `protobuf:"varint,7,req,name=end_time" json:"end_time,omitempty"`
+ Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"`
+ Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"`
+ Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"`
+ Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"`
+ HttpVersion *string `protobuf:"bytes,12,req,name=http_version" json:"http_version,omitempty"`
+ Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"`
+ ResponseSize *int64 `protobuf:"varint,14,req,name=response_size" json:"response_size,omitempty"`
+ Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"`
+ UserAgent *string `protobuf:"bytes,16,opt,name=user_agent" json:"user_agent,omitempty"`
+ UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry" json:"url_map_entry,omitempty"`
+ Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"`
+ ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles" json:"api_mcycles,omitempty"`
+ Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"`
+ Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"`
+ TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name" json:"task_queue_name,omitempty"`
+ TaskName *string `protobuf:"bytes,23,opt,name=task_name" json:"task_name,omitempty"`
+ WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request" json:"was_loading_request,omitempty"`
+ PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time" json:"pending_time,omitempty"`
+ ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,def=-1" json:"replica_index,omitempty"`
+ Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"`
+ CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key" json:"clone_key,omitempty"`
+ Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"`
+ LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete" json:"lines_incomplete,omitempty"`
+ AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release" json:"app_engine_release,omitempty"`
+ ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason" json:"exit_reason,omitempty"`
+ WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time" json:"was_throttled_for_time,omitempty"`
+ WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests" json:"was_throttled_for_requests,omitempty"`
+ ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time" json:"throttled_time,omitempty"`
+ ServerName []byte `protobuf:"bytes,34,opt,name=server_name" json:"server_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RequestLog) Reset() { *m = RequestLog{} }
+func (m *RequestLog) String() string { return proto.CompactTextString(m) }
+func (*RequestLog) ProtoMessage() {}
+
+const Default_RequestLog_ModuleId string = "default"
+const Default_RequestLog_ReplicaIndex int32 = -1
+const Default_RequestLog_Finished bool = true
+
+func (m *RequestLog) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *RequestLog) GetModuleId() string {
+ if m != nil && m.ModuleId != nil {
+ return *m.ModuleId
+ }
+ return Default_RequestLog_ModuleId
+}
+
+func (m *RequestLog) GetVersionId() string {
+ if m != nil && m.VersionId != nil {
+ return *m.VersionId
+ }
+ return ""
+}
+
+func (m *RequestLog) GetRequestId() []byte {
+ if m != nil {
+ return m.RequestId
+ }
+ return nil
+}
+
+func (m *RequestLog) GetOffset() *LogOffset {
+ if m != nil {
+ return m.Offset
+ }
+ return nil
+}
+
+func (m *RequestLog) GetIp() string {
+ if m != nil && m.Ip != nil {
+ return *m.Ip
+ }
+ return ""
+}
+
+func (m *RequestLog) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *RequestLog) GetStartTime() int64 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetEndTime() int64 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetLatency() int64 {
+ if m != nil && m.Latency != nil {
+ return *m.Latency
+ }
+ return 0
+}
+
+func (m *RequestLog) GetMcycles() int64 {
+ if m != nil && m.Mcycles != nil {
+ return *m.Mcycles
+ }
+ return 0
+}
+
+func (m *RequestLog) GetMethod() string {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return ""
+}
+
+func (m *RequestLog) GetResource() string {
+ if m != nil && m.Resource != nil {
+ return *m.Resource
+ }
+ return ""
+}
+
+func (m *RequestLog) GetHttpVersion() string {
+ if m != nil && m.HttpVersion != nil {
+ return *m.HttpVersion
+ }
+ return ""
+}
+
+func (m *RequestLog) GetStatus() int32 {
+ if m != nil && m.Status != nil {
+ return *m.Status
+ }
+ return 0
+}
+
+func (m *RequestLog) GetResponseSize() int64 {
+ if m != nil && m.ResponseSize != nil {
+ return *m.ResponseSize
+ }
+ return 0
+}
+
+func (m *RequestLog) GetReferrer() string {
+ if m != nil && m.Referrer != nil {
+ return *m.Referrer
+ }
+ return ""
+}
+
+func (m *RequestLog) GetUserAgent() string {
+ if m != nil && m.UserAgent != nil {
+ return *m.UserAgent
+ }
+ return ""
+}
+
+func (m *RequestLog) GetUrlMapEntry() string {
+ if m != nil && m.UrlMapEntry != nil {
+ return *m.UrlMapEntry
+ }
+ return ""
+}
+
+func (m *RequestLog) GetCombined() string {
+ if m != nil && m.Combined != nil {
+ return *m.Combined
+ }
+ return ""
+}
+
+func (m *RequestLog) GetApiMcycles() int64 {
+ if m != nil && m.ApiMcycles != nil {
+ return *m.ApiMcycles
+ }
+ return 0
+}
+
+func (m *RequestLog) GetHost() string {
+ if m != nil && m.Host != nil {
+ return *m.Host
+ }
+ return ""
+}
+
+func (m *RequestLog) GetCost() float64 {
+ if m != nil && m.Cost != nil {
+ return *m.Cost
+ }
+ return 0
+}
+
+func (m *RequestLog) GetTaskQueueName() string {
+ if m != nil && m.TaskQueueName != nil {
+ return *m.TaskQueueName
+ }
+ return ""
+}
+
+func (m *RequestLog) GetTaskName() string {
+ if m != nil && m.TaskName != nil {
+ return *m.TaskName
+ }
+ return ""
+}
+
+func (m *RequestLog) GetWasLoadingRequest() bool {
+ if m != nil && m.WasLoadingRequest != nil {
+ return *m.WasLoadingRequest
+ }
+ return false
+}
+
+func (m *RequestLog) GetPendingTime() int64 {
+ if m != nil && m.PendingTime != nil {
+ return *m.PendingTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetReplicaIndex() int32 {
+ if m != nil && m.ReplicaIndex != nil {
+ return *m.ReplicaIndex
+ }
+ return Default_RequestLog_ReplicaIndex
+}
+
+func (m *RequestLog) GetFinished() bool {
+ if m != nil && m.Finished != nil {
+ return *m.Finished
+ }
+ return Default_RequestLog_Finished
+}
+
+func (m *RequestLog) GetCloneKey() []byte {
+ if m != nil {
+ return m.CloneKey
+ }
+ return nil
+}
+
+func (m *RequestLog) GetLine() []*LogLine {
+ if m != nil {
+ return m.Line
+ }
+ return nil
+}
+
+func (m *RequestLog) GetLinesIncomplete() bool {
+ if m != nil && m.LinesIncomplete != nil {
+ return *m.LinesIncomplete
+ }
+ return false
+}
+
+func (m *RequestLog) GetAppEngineRelease() []byte {
+ if m != nil {
+ return m.AppEngineRelease
+ }
+ return nil
+}
+
+func (m *RequestLog) GetExitReason() int32 {
+ if m != nil && m.ExitReason != nil {
+ return *m.ExitReason
+ }
+ return 0
+}
+
+func (m *RequestLog) GetWasThrottledForTime() bool {
+ if m != nil && m.WasThrottledForTime != nil {
+ return *m.WasThrottledForTime
+ }
+ return false
+}
+
+func (m *RequestLog) GetWasThrottledForRequests() bool {
+ if m != nil && m.WasThrottledForRequests != nil {
+ return *m.WasThrottledForRequests
+ }
+ return false
+}
+
+func (m *RequestLog) GetThrottledTime() int64 {
+ if m != nil && m.ThrottledTime != nil {
+ return *m.ThrottledTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetServerName() []byte {
+ if m != nil {
+ return m.ServerName
+ }
+ return nil
+}
+
+type LogModuleVersion struct {
+ ModuleId *string `protobuf:"bytes,1,opt,name=module_id,def=default" json:"module_id,omitempty"`
+ VersionId *string `protobuf:"bytes,2,opt,name=version_id" json:"version_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} }
+func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) }
+func (*LogModuleVersion) ProtoMessage() {}
+
+const Default_LogModuleVersion_ModuleId string = "default"
+
+func (m *LogModuleVersion) GetModuleId() string {
+ if m != nil && m.ModuleId != nil {
+ return *m.ModuleId
+ }
+ return Default_LogModuleVersion_ModuleId
+}
+
+func (m *LogModuleVersion) GetVersionId() string {
+ if m != nil && m.VersionId != nil {
+ return *m.VersionId
+ }
+ return ""
+}
+
+type LogReadRequest struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"`
+ ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version" json:"module_version,omitempty"`
+ StartTime *int64 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"`
+ EndTime *int64 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"`
+ RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id" json:"request_id,omitempty"`
+ MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level" json:"minimum_log_level,omitempty"`
+ IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete" json:"include_incomplete,omitempty"`
+ Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"`
+ CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex" json:"combined_log_regex,omitempty"`
+ HostRegex *string `protobuf:"bytes,15,opt,name=host_regex" json:"host_regex,omitempty"`
+ ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index" json:"replica_index,omitempty"`
+ IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs" json:"include_app_logs,omitempty"`
+ AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request" json:"app_logs_per_request,omitempty"`
+ IncludeHost *bool `protobuf:"varint,11,opt,name=include_host" json:"include_host,omitempty"`
+ IncludeAll *bool `protobuf:"varint,12,opt,name=include_all" json:"include_all,omitempty"`
+ CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator" json:"cache_iterator,omitempty"`
+ NumShards *int32 `protobuf:"varint,18,opt,name=num_shards" json:"num_shards,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogReadRequest) Reset() { *m = LogReadRequest{} }
+func (m *LogReadRequest) String() string { return proto.CompactTextString(m) }
+func (*LogReadRequest) ProtoMessage() {}
+
+func (m *LogReadRequest) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *LogReadRequest) GetVersionId() []string {
+ if m != nil {
+ return m.VersionId
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion {
+ if m != nil {
+ return m.ModuleVersion
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetStartTime() int64 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetEndTime() int64 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetOffset() *LogOffset {
+ if m != nil {
+ return m.Offset
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetRequestId() [][]byte {
+ if m != nil {
+ return m.RequestId
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetMinimumLogLevel() int32 {
+ if m != nil && m.MinimumLogLevel != nil {
+ return *m.MinimumLogLevel
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetIncludeIncomplete() bool {
+ if m != nil && m.IncludeIncomplete != nil {
+ return *m.IncludeIncomplete
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetCount() int64 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetCombinedLogRegex() string {
+ if m != nil && m.CombinedLogRegex != nil {
+ return *m.CombinedLogRegex
+ }
+ return ""
+}
+
+func (m *LogReadRequest) GetHostRegex() string {
+ if m != nil && m.HostRegex != nil {
+ return *m.HostRegex
+ }
+ return ""
+}
+
+func (m *LogReadRequest) GetReplicaIndex() int32 {
+ if m != nil && m.ReplicaIndex != nil {
+ return *m.ReplicaIndex
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetIncludeAppLogs() bool {
+ if m != nil && m.IncludeAppLogs != nil {
+ return *m.IncludeAppLogs
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetAppLogsPerRequest() int32 {
+ if m != nil && m.AppLogsPerRequest != nil {
+ return *m.AppLogsPerRequest
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetIncludeHost() bool {
+ if m != nil && m.IncludeHost != nil {
+ return *m.IncludeHost
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetIncludeAll() bool {
+ if m != nil && m.IncludeAll != nil {
+ return *m.IncludeAll
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetCacheIterator() bool {
+ if m != nil && m.CacheIterator != nil {
+ return *m.CacheIterator
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetNumShards() int32 {
+ if m != nil && m.NumShards != nil {
+ return *m.NumShards
+ }
+ return 0
+}
+
+type LogReadResponse struct {
+ Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"`
+ LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time" json:"last_end_time,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogReadResponse) Reset() { *m = LogReadResponse{} }
+func (m *LogReadResponse) String() string { return proto.CompactTextString(m) }
+func (*LogReadResponse) ProtoMessage() {}
+
+func (m *LogReadResponse) GetLog() []*RequestLog {
+ if m != nil {
+ return m.Log
+ }
+ return nil
+}
+
+func (m *LogReadResponse) GetOffset() *LogOffset {
+ if m != nil {
+ return m.Offset
+ }
+ return nil
+}
+
+func (m *LogReadResponse) GetLastEndTime() int64 {
+ if m != nil && m.LastEndTime != nil {
+ return *m.LastEndTime
+ }
+ return 0
+}
+
+type LogUsageRecord struct {
+ VersionId *string `protobuf:"bytes,1,opt,name=version_id" json:"version_id,omitempty"`
+ StartTime *int32 `protobuf:"varint,2,opt,name=start_time" json:"start_time,omitempty"`
+ EndTime *int32 `protobuf:"varint,3,opt,name=end_time" json:"end_time,omitempty"`
+ Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
+ TotalSize *int64 `protobuf:"varint,5,opt,name=total_size" json:"total_size,omitempty"`
+ Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} }
+func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) }
+func (*LogUsageRecord) ProtoMessage() {}
+
+func (m *LogUsageRecord) GetVersionId() string {
+ if m != nil && m.VersionId != nil {
+ return *m.VersionId
+ }
+ return ""
+}
+
+func (m *LogUsageRecord) GetStartTime() int32 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetEndTime() int32 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetCount() int64 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetTotalSize() int64 {
+ if m != nil && m.TotalSize != nil {
+ return *m.TotalSize
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetRecords() int32 {
+ if m != nil && m.Records != nil {
+ return *m.Records
+ }
+ return 0
+}
+
+type LogUsageRequest struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"`
+ StartTime *int32 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"`
+ EndTime *int32 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"`
+ ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,def=1" json:"resolution_hours,omitempty"`
+ CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions" json:"combine_versions,omitempty"`
+ UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version" json:"usage_version,omitempty"`
+ VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only" json:"versions_only,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} }
+func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) }
+func (*LogUsageRequest) ProtoMessage() {}
+
+const Default_LogUsageRequest_ResolutionHours uint32 = 1
+
+func (m *LogUsageRequest) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *LogUsageRequest) GetVersionId() []string {
+ if m != nil {
+ return m.VersionId
+ }
+ return nil
+}
+
+func (m *LogUsageRequest) GetStartTime() int32 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *LogUsageRequest) GetEndTime() int32 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *LogUsageRequest) GetResolutionHours() uint32 {
+ if m != nil && m.ResolutionHours != nil {
+ return *m.ResolutionHours
+ }
+ return Default_LogUsageRequest_ResolutionHours
+}
+
+func (m *LogUsageRequest) GetCombineVersions() bool {
+ if m != nil && m.CombineVersions != nil {
+ return *m.CombineVersions
+ }
+ return false
+}
+
+func (m *LogUsageRequest) GetUsageVersion() int32 {
+ if m != nil && m.UsageVersion != nil {
+ return *m.UsageVersion
+ }
+ return 0
+}
+
+func (m *LogUsageRequest) GetVersionsOnly() bool {
+ if m != nil && m.VersionsOnly != nil {
+ return *m.VersionsOnly
+ }
+ return false
+}
+
+type LogUsageResponse struct {
+ Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"`
+ Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} }
+func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) }
+func (*LogUsageResponse) ProtoMessage() {}
+
+func (m *LogUsageResponse) GetUsage() []*LogUsageRecord {
+ if m != nil {
+ return m.Usage
+ }
+ return nil
+}
+
+func (m *LogUsageResponse) GetSummary() *LogUsageRecord {
+ if m != nil {
+ return m.Summary
+ }
+ return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto
new file mode 100644
index 000000000..8981dc475
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/log/log_service.proto
@@ -0,0 +1,150 @@
+syntax = "proto2";
+option go_package = "log";
+
+package appengine;
+
+message LogServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_REQUEST = 1;
+ STORAGE_ERROR = 2;
+ }
+}
+
+message UserAppLogLine {
+ required int64 timestamp_usec = 1;
+ required int64 level = 2;
+ required string message = 3;
+}
+
+message UserAppLogGroup {
+ repeated UserAppLogLine log_line = 2;
+}
+
+message FlushRequest {
+ optional bytes logs = 1;
+}
+
+message SetStatusRequest {
+ required string status = 1;
+}
+
+
+message LogOffset {
+ optional bytes request_id = 1;
+}
+
+message LogLine {
+ required int64 time = 1;
+ required int32 level = 2;
+ required string log_message = 3;
+}
+
+message RequestLog {
+ required string app_id = 1;
+ optional string module_id = 37 [default="default"];
+ required string version_id = 2;
+ required bytes request_id = 3;
+ optional LogOffset offset = 35;
+ required string ip = 4;
+ optional string nickname = 5;
+ required int64 start_time = 6;
+ required int64 end_time = 7;
+ required int64 latency = 8;
+ required int64 mcycles = 9;
+ required string method = 10;
+ required string resource = 11;
+ required string http_version = 12;
+ required int32 status = 13;
+ required int64 response_size = 14;
+ optional string referrer = 15;
+ optional string user_agent = 16;
+ required string url_map_entry = 17;
+ required string combined = 18;
+ optional int64 api_mcycles = 19;
+ optional string host = 20;
+ optional double cost = 21;
+
+ optional string task_queue_name = 22;
+ optional string task_name = 23;
+
+ optional bool was_loading_request = 24;
+ optional int64 pending_time = 25;
+ optional int32 replica_index = 26 [default = -1];
+ optional bool finished = 27 [default = true];
+ optional bytes clone_key = 28;
+
+ repeated LogLine line = 29;
+
+ optional bool lines_incomplete = 36;
+ optional bytes app_engine_release = 38;
+
+ optional int32 exit_reason = 30;
+ optional bool was_throttled_for_time = 31;
+ optional bool was_throttled_for_requests = 32;
+ optional int64 throttled_time = 33;
+
+ optional bytes server_name = 34;
+}
+
+message LogModuleVersion {
+ optional string module_id = 1 [default="default"];
+ optional string version_id = 2;
+}
+
+message LogReadRequest {
+ required string app_id = 1;
+ repeated string version_id = 2;
+ repeated LogModuleVersion module_version = 19;
+
+ optional int64 start_time = 3;
+ optional int64 end_time = 4;
+ optional LogOffset offset = 5;
+ repeated bytes request_id = 6;
+
+ optional int32 minimum_log_level = 7;
+ optional bool include_incomplete = 8;
+ optional int64 count = 9;
+
+ optional string combined_log_regex = 14;
+ optional string host_regex = 15;
+ optional int32 replica_index = 16;
+
+ optional bool include_app_logs = 10;
+ optional int32 app_logs_per_request = 17;
+ optional bool include_host = 11;
+ optional bool include_all = 12;
+ optional bool cache_iterator = 13;
+ optional int32 num_shards = 18;
+}
+
+message LogReadResponse {
+ repeated RequestLog log = 1;
+ optional LogOffset offset = 2;
+ optional int64 last_end_time = 3;
+}
+
+message LogUsageRecord {
+ optional string version_id = 1;
+ optional int32 start_time = 2;
+ optional int32 end_time = 3;
+ optional int64 count = 4;
+ optional int64 total_size = 5;
+ optional int32 records = 6;
+}
+
+message LogUsageRequest {
+ required string app_id = 1;
+ repeated string version_id = 2;
+ optional int32 start_time = 3;
+ optional int32 end_time = 4;
+ optional uint32 resolution_hours = 5 [default = 1];
+ optional bool combine_versions = 6;
+ optional int32 usage_version = 7;
+ optional bool versions_only = 8;
+}
+
+message LogUsageResponse {
+ repeated LogUsageRecord usage = 1;
+ optional LogUsageRecord summary = 2;
+}
diff --git a/vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go b/vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go
new file mode 100644
index 000000000..b8d5f0301
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go
@@ -0,0 +1,229 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/mail/mail_service.proto
+// DO NOT EDIT!
+
+/*
+Package mail is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/mail/mail_service.proto
+
+It has these top-level messages:
+ MailServiceError
+ MailAttachment
+ MailHeader
+ MailMessage
+*/
+package mail
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type MailServiceError_ErrorCode int32
+
+const (
+ MailServiceError_OK MailServiceError_ErrorCode = 0
+ MailServiceError_INTERNAL_ERROR MailServiceError_ErrorCode = 1
+ MailServiceError_BAD_REQUEST MailServiceError_ErrorCode = 2
+ MailServiceError_UNAUTHORIZED_SENDER MailServiceError_ErrorCode = 3
+ MailServiceError_INVALID_ATTACHMENT_TYPE MailServiceError_ErrorCode = 4
+ MailServiceError_INVALID_HEADER_NAME MailServiceError_ErrorCode = 5
+ MailServiceError_INVALID_CONTENT_ID MailServiceError_ErrorCode = 6
+)
+
+var MailServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INTERNAL_ERROR",
+ 2: "BAD_REQUEST",
+ 3: "UNAUTHORIZED_SENDER",
+ 4: "INVALID_ATTACHMENT_TYPE",
+ 5: "INVALID_HEADER_NAME",
+ 6: "INVALID_CONTENT_ID",
+}
+var MailServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INTERNAL_ERROR": 1,
+ "BAD_REQUEST": 2,
+ "UNAUTHORIZED_SENDER": 3,
+ "INVALID_ATTACHMENT_TYPE": 4,
+ "INVALID_HEADER_NAME": 5,
+ "INVALID_CONTENT_ID": 6,
+}
+
+func (x MailServiceError_ErrorCode) Enum() *MailServiceError_ErrorCode {
+ p := new(MailServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x MailServiceError_ErrorCode) String() string {
+ return proto.EnumName(MailServiceError_ErrorCode_name, int32(x))
+}
+func (x *MailServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MailServiceError_ErrorCode_value, data, "MailServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = MailServiceError_ErrorCode(value)
+ return nil
+}
+
+type MailServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MailServiceError) Reset() { *m = MailServiceError{} }
+func (m *MailServiceError) String() string { return proto.CompactTextString(m) }
+func (*MailServiceError) ProtoMessage() {}
+
+type MailAttachment struct {
+ FileName *string `protobuf:"bytes,1,req,name=FileName" json:"FileName,omitempty"`
+ Data []byte `protobuf:"bytes,2,req,name=Data" json:"Data,omitempty"`
+ ContentID *string `protobuf:"bytes,3,opt,name=ContentID" json:"ContentID,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MailAttachment) Reset() { *m = MailAttachment{} }
+func (m *MailAttachment) String() string { return proto.CompactTextString(m) }
+func (*MailAttachment) ProtoMessage() {}
+
+func (m *MailAttachment) GetFileName() string {
+ if m != nil && m.FileName != nil {
+ return *m.FileName
+ }
+ return ""
+}
+
+func (m *MailAttachment) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *MailAttachment) GetContentID() string {
+ if m != nil && m.ContentID != nil {
+ return *m.ContentID
+ }
+ return ""
+}
+
+type MailHeader struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MailHeader) Reset() { *m = MailHeader{} }
+func (m *MailHeader) String() string { return proto.CompactTextString(m) }
+func (*MailHeader) ProtoMessage() {}
+
+func (m *MailHeader) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MailHeader) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type MailMessage struct {
+ Sender *string `protobuf:"bytes,1,req,name=Sender" json:"Sender,omitempty"`
+ ReplyTo *string `protobuf:"bytes,2,opt,name=ReplyTo" json:"ReplyTo,omitempty"`
+ To []string `protobuf:"bytes,3,rep,name=To" json:"To,omitempty"`
+ Cc []string `protobuf:"bytes,4,rep,name=Cc" json:"Cc,omitempty"`
+ Bcc []string `protobuf:"bytes,5,rep,name=Bcc" json:"Bcc,omitempty"`
+ Subject *string `protobuf:"bytes,6,req,name=Subject" json:"Subject,omitempty"`
+ TextBody *string `protobuf:"bytes,7,opt,name=TextBody" json:"TextBody,omitempty"`
+ HtmlBody *string `protobuf:"bytes,8,opt,name=HtmlBody" json:"HtmlBody,omitempty"`
+ Attachment []*MailAttachment `protobuf:"bytes,9,rep,name=Attachment" json:"Attachment,omitempty"`
+ Header []*MailHeader `protobuf:"bytes,10,rep,name=Header" json:"Header,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MailMessage) Reset() { *m = MailMessage{} }
+func (m *MailMessage) String() string { return proto.CompactTextString(m) }
+func (*MailMessage) ProtoMessage() {}
+
+func (m *MailMessage) GetSender() string {
+ if m != nil && m.Sender != nil {
+ return *m.Sender
+ }
+ return ""
+}
+
+func (m *MailMessage) GetReplyTo() string {
+ if m != nil && m.ReplyTo != nil {
+ return *m.ReplyTo
+ }
+ return ""
+}
+
+func (m *MailMessage) GetTo() []string {
+ if m != nil {
+ return m.To
+ }
+ return nil
+}
+
+func (m *MailMessage) GetCc() []string {
+ if m != nil {
+ return m.Cc
+ }
+ return nil
+}
+
+func (m *MailMessage) GetBcc() []string {
+ if m != nil {
+ return m.Bcc
+ }
+ return nil
+}
+
+func (m *MailMessage) GetSubject() string {
+ if m != nil && m.Subject != nil {
+ return *m.Subject
+ }
+ return ""
+}
+
+func (m *MailMessage) GetTextBody() string {
+ if m != nil && m.TextBody != nil {
+ return *m.TextBody
+ }
+ return ""
+}
+
+func (m *MailMessage) GetHtmlBody() string {
+ if m != nil && m.HtmlBody != nil {
+ return *m.HtmlBody
+ }
+ return ""
+}
+
+func (m *MailMessage) GetAttachment() []*MailAttachment {
+ if m != nil {
+ return m.Attachment
+ }
+ return nil
+}
+
+func (m *MailMessage) GetHeader() []*MailHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/mail/mail_service.proto b/vendor/google.golang.org/appengine/internal/mail/mail_service.proto
new file mode 100644
index 000000000..4e57b7aa5
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/mail/mail_service.proto
@@ -0,0 +1,45 @@
+syntax = "proto2";
+option go_package = "mail";
+
+package appengine;
+
+message MailServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INTERNAL_ERROR = 1;
+ BAD_REQUEST = 2;
+ UNAUTHORIZED_SENDER = 3;
+ INVALID_ATTACHMENT_TYPE = 4;
+ INVALID_HEADER_NAME = 5;
+ INVALID_CONTENT_ID = 6;
+ }
+}
+
+message MailAttachment {
+ required string FileName = 1;
+ required bytes Data = 2;
+ optional string ContentID = 3;
+}
+
+message MailHeader {
+ required string name = 1;
+ required string value = 2;
+}
+
+message MailMessage {
+ required string Sender = 1;
+ optional string ReplyTo = 2;
+
+ repeated string To = 3;
+ repeated string Cc = 4;
+ repeated string Bcc = 5;
+
+ required string Subject = 6;
+
+ optional string TextBody = 7;
+ optional string HtmlBody = 8;
+
+ repeated MailAttachment Attachment = 9;
+
+ repeated MailHeader Header = 10;
+}
diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go
new file mode 100644
index 000000000..49036163c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/main.go
@@ -0,0 +1,15 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+ "appengine_internal"
+)
+
+func Main() {
+ appengine_internal.Main()
+}
diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go
new file mode 100644
index 000000000..822e784a4
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/main_vm.go
@@ -0,0 +1,48 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "io"
+ "log"
+ "net/http"
+ "net/url"
+ "os"
+)
+
+func Main() {
+ installHealthChecker(http.DefaultServeMux)
+
+ port := "8080"
+ if s := os.Getenv("PORT"); s != "" {
+ port = s
+ }
+
+ host := ""
+ if IsDevAppServer() {
+ host = "127.0.0.1"
+ }
+ if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil {
+ log.Fatalf("http.ListenAndServe: %v", err)
+ }
+}
+
+func installHealthChecker(mux *http.ServeMux) {
+ // If no health check handler has been installed by this point, add a trivial one.
+ const healthPath = "/_ah/health"
+ hreq := &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Path: healthPath,
+ },
+ }
+ if _, pat := mux.Handler(hreq); pat != healthPath {
+ mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, "ok")
+ })
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go
new file mode 100644
index 000000000..252fef869
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go
@@ -0,0 +1,938 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/memcache/memcache_service.proto
+// DO NOT EDIT!
+
+/*
+Package memcache is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/memcache/memcache_service.proto
+
+It has these top-level messages:
+ MemcacheServiceError
+ AppOverride
+ MemcacheGetRequest
+ MemcacheGetResponse
+ MemcacheSetRequest
+ MemcacheSetResponse
+ MemcacheDeleteRequest
+ MemcacheDeleteResponse
+ MemcacheIncrementRequest
+ MemcacheIncrementResponse
+ MemcacheBatchIncrementRequest
+ MemcacheBatchIncrementResponse
+ MemcacheFlushRequest
+ MemcacheFlushResponse
+ MemcacheStatsRequest
+ MergedNamespaceStats
+ MemcacheStatsResponse
+ MemcacheGrabTailRequest
+ MemcacheGrabTailResponse
+*/
+package memcache
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type MemcacheServiceError_ErrorCode int32
+
+const (
+ MemcacheServiceError_OK MemcacheServiceError_ErrorCode = 0
+ MemcacheServiceError_UNSPECIFIED_ERROR MemcacheServiceError_ErrorCode = 1
+ MemcacheServiceError_NAMESPACE_NOT_SET MemcacheServiceError_ErrorCode = 2
+ MemcacheServiceError_PERMISSION_DENIED MemcacheServiceError_ErrorCode = 3
+ MemcacheServiceError_INVALID_VALUE MemcacheServiceError_ErrorCode = 6
+)
+
+var MemcacheServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "UNSPECIFIED_ERROR",
+ 2: "NAMESPACE_NOT_SET",
+ 3: "PERMISSION_DENIED",
+ 6: "INVALID_VALUE",
+}
+var MemcacheServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "UNSPECIFIED_ERROR": 1,
+ "NAMESPACE_NOT_SET": 2,
+ "PERMISSION_DENIED": 3,
+ "INVALID_VALUE": 6,
+}
+
+func (x MemcacheServiceError_ErrorCode) Enum() *MemcacheServiceError_ErrorCode {
+ p := new(MemcacheServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x MemcacheServiceError_ErrorCode) String() string {
+ return proto.EnumName(MemcacheServiceError_ErrorCode_name, int32(x))
+}
+func (x *MemcacheServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheServiceError_ErrorCode_value, data, "MemcacheServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheServiceError_ErrorCode(value)
+ return nil
+}
+
+type MemcacheSetRequest_SetPolicy int32
+
+const (
+ MemcacheSetRequest_SET MemcacheSetRequest_SetPolicy = 1
+ MemcacheSetRequest_ADD MemcacheSetRequest_SetPolicy = 2
+ MemcacheSetRequest_REPLACE MemcacheSetRequest_SetPolicy = 3
+ MemcacheSetRequest_CAS MemcacheSetRequest_SetPolicy = 4
+)
+
+var MemcacheSetRequest_SetPolicy_name = map[int32]string{
+ 1: "SET",
+ 2: "ADD",
+ 3: "REPLACE",
+ 4: "CAS",
+}
+var MemcacheSetRequest_SetPolicy_value = map[string]int32{
+ "SET": 1,
+ "ADD": 2,
+ "REPLACE": 3,
+ "CAS": 4,
+}
+
+func (x MemcacheSetRequest_SetPolicy) Enum() *MemcacheSetRequest_SetPolicy {
+ p := new(MemcacheSetRequest_SetPolicy)
+ *p = x
+ return p
+}
+func (x MemcacheSetRequest_SetPolicy) String() string {
+ return proto.EnumName(MemcacheSetRequest_SetPolicy_name, int32(x))
+}
+func (x *MemcacheSetRequest_SetPolicy) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheSetRequest_SetPolicy_value, data, "MemcacheSetRequest_SetPolicy")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheSetRequest_SetPolicy(value)
+ return nil
+}
+
+type MemcacheSetResponse_SetStatusCode int32
+
+const (
+ MemcacheSetResponse_STORED MemcacheSetResponse_SetStatusCode = 1
+ MemcacheSetResponse_NOT_STORED MemcacheSetResponse_SetStatusCode = 2
+ MemcacheSetResponse_ERROR MemcacheSetResponse_SetStatusCode = 3
+ MemcacheSetResponse_EXISTS MemcacheSetResponse_SetStatusCode = 4
+)
+
+var MemcacheSetResponse_SetStatusCode_name = map[int32]string{
+ 1: "STORED",
+ 2: "NOT_STORED",
+ 3: "ERROR",
+ 4: "EXISTS",
+}
+var MemcacheSetResponse_SetStatusCode_value = map[string]int32{
+ "STORED": 1,
+ "NOT_STORED": 2,
+ "ERROR": 3,
+ "EXISTS": 4,
+}
+
+func (x MemcacheSetResponse_SetStatusCode) Enum() *MemcacheSetResponse_SetStatusCode {
+ p := new(MemcacheSetResponse_SetStatusCode)
+ *p = x
+ return p
+}
+func (x MemcacheSetResponse_SetStatusCode) String() string {
+ return proto.EnumName(MemcacheSetResponse_SetStatusCode_name, int32(x))
+}
+func (x *MemcacheSetResponse_SetStatusCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheSetResponse_SetStatusCode_value, data, "MemcacheSetResponse_SetStatusCode")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheSetResponse_SetStatusCode(value)
+ return nil
+}
+
+type MemcacheDeleteResponse_DeleteStatusCode int32
+
+const (
+ MemcacheDeleteResponse_DELETED MemcacheDeleteResponse_DeleteStatusCode = 1
+ MemcacheDeleteResponse_NOT_FOUND MemcacheDeleteResponse_DeleteStatusCode = 2
+)
+
+var MemcacheDeleteResponse_DeleteStatusCode_name = map[int32]string{
+ 1: "DELETED",
+ 2: "NOT_FOUND",
+}
+var MemcacheDeleteResponse_DeleteStatusCode_value = map[string]int32{
+ "DELETED": 1,
+ "NOT_FOUND": 2,
+}
+
+func (x MemcacheDeleteResponse_DeleteStatusCode) Enum() *MemcacheDeleteResponse_DeleteStatusCode {
+ p := new(MemcacheDeleteResponse_DeleteStatusCode)
+ *p = x
+ return p
+}
+func (x MemcacheDeleteResponse_DeleteStatusCode) String() string {
+ return proto.EnumName(MemcacheDeleteResponse_DeleteStatusCode_name, int32(x))
+}
+func (x *MemcacheDeleteResponse_DeleteStatusCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheDeleteResponse_DeleteStatusCode_value, data, "MemcacheDeleteResponse_DeleteStatusCode")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheDeleteResponse_DeleteStatusCode(value)
+ return nil
+}
+
+type MemcacheIncrementRequest_Direction int32
+
+const (
+ MemcacheIncrementRequest_INCREMENT MemcacheIncrementRequest_Direction = 1
+ MemcacheIncrementRequest_DECREMENT MemcacheIncrementRequest_Direction = 2
+)
+
+var MemcacheIncrementRequest_Direction_name = map[int32]string{
+ 1: "INCREMENT",
+ 2: "DECREMENT",
+}
+var MemcacheIncrementRequest_Direction_value = map[string]int32{
+ "INCREMENT": 1,
+ "DECREMENT": 2,
+}
+
+func (x MemcacheIncrementRequest_Direction) Enum() *MemcacheIncrementRequest_Direction {
+ p := new(MemcacheIncrementRequest_Direction)
+ *p = x
+ return p
+}
+func (x MemcacheIncrementRequest_Direction) String() string {
+ return proto.EnumName(MemcacheIncrementRequest_Direction_name, int32(x))
+}
+func (x *MemcacheIncrementRequest_Direction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheIncrementRequest_Direction_value, data, "MemcacheIncrementRequest_Direction")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheIncrementRequest_Direction(value)
+ return nil
+}
+
+type MemcacheIncrementResponse_IncrementStatusCode int32
+
+const (
+ MemcacheIncrementResponse_OK MemcacheIncrementResponse_IncrementStatusCode = 1
+ MemcacheIncrementResponse_NOT_CHANGED MemcacheIncrementResponse_IncrementStatusCode = 2
+ MemcacheIncrementResponse_ERROR MemcacheIncrementResponse_IncrementStatusCode = 3
+)
+
+var MemcacheIncrementResponse_IncrementStatusCode_name = map[int32]string{
+ 1: "OK",
+ 2: "NOT_CHANGED",
+ 3: "ERROR",
+}
+var MemcacheIncrementResponse_IncrementStatusCode_value = map[string]int32{
+ "OK": 1,
+ "NOT_CHANGED": 2,
+ "ERROR": 3,
+}
+
+func (x MemcacheIncrementResponse_IncrementStatusCode) Enum() *MemcacheIncrementResponse_IncrementStatusCode {
+ p := new(MemcacheIncrementResponse_IncrementStatusCode)
+ *p = x
+ return p
+}
+func (x MemcacheIncrementResponse_IncrementStatusCode) String() string {
+ return proto.EnumName(MemcacheIncrementResponse_IncrementStatusCode_name, int32(x))
+}
+func (x *MemcacheIncrementResponse_IncrementStatusCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheIncrementResponse_IncrementStatusCode_value, data, "MemcacheIncrementResponse_IncrementStatusCode")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheIncrementResponse_IncrementStatusCode(value)
+ return nil
+}
+
+type MemcacheServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheServiceError) Reset() { *m = MemcacheServiceError{} }
+func (m *MemcacheServiceError) String() string { return proto.CompactTextString(m) }
+func (*MemcacheServiceError) ProtoMessage() {}
+
+type AppOverride struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ NumMemcachegBackends *int32 `protobuf:"varint,2,opt,name=num_memcacheg_backends" json:"num_memcacheg_backends,omitempty"`
+ IgnoreShardlock *bool `protobuf:"varint,3,opt,name=ignore_shardlock" json:"ignore_shardlock,omitempty"`
+ MemcachePoolHint *string `protobuf:"bytes,4,opt,name=memcache_pool_hint" json:"memcache_pool_hint,omitempty"`
+ MemcacheShardingStrategy []byte `protobuf:"bytes,5,opt,name=memcache_sharding_strategy" json:"memcache_sharding_strategy,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AppOverride) Reset() { *m = AppOverride{} }
+func (m *AppOverride) String() string { return proto.CompactTextString(m) }
+func (*AppOverride) ProtoMessage() {}
+
+func (m *AppOverride) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *AppOverride) GetNumMemcachegBackends() int32 {
+ if m != nil && m.NumMemcachegBackends != nil {
+ return *m.NumMemcachegBackends
+ }
+ return 0
+}
+
+func (m *AppOverride) GetIgnoreShardlock() bool {
+ if m != nil && m.IgnoreShardlock != nil {
+ return *m.IgnoreShardlock
+ }
+ return false
+}
+
+func (m *AppOverride) GetMemcachePoolHint() string {
+ if m != nil && m.MemcachePoolHint != nil {
+ return *m.MemcachePoolHint
+ }
+ return ""
+}
+
+func (m *AppOverride) GetMemcacheShardingStrategy() []byte {
+ if m != nil {
+ return m.MemcacheShardingStrategy
+ }
+ return nil
+}
+
+type MemcacheGetRequest struct {
+ Key [][]byte `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ NameSpace *string `protobuf:"bytes,2,opt,name=name_space,def=" json:"name_space,omitempty"`
+ ForCas *bool `protobuf:"varint,4,opt,name=for_cas" json:"for_cas,omitempty"`
+ Override *AppOverride `protobuf:"bytes,5,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGetRequest) Reset() { *m = MemcacheGetRequest{} }
+func (m *MemcacheGetRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGetRequest) ProtoMessage() {}
+
+func (m *MemcacheGetRequest) GetKey() [][]byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheGetRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheGetRequest) GetForCas() bool {
+ if m != nil && m.ForCas != nil {
+ return *m.ForCas
+ }
+ return false
+}
+
+func (m *MemcacheGetRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheGetResponse struct {
+ Item []*MemcacheGetResponse_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGetResponse) Reset() { *m = MemcacheGetResponse{} }
+func (m *MemcacheGetResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGetResponse) ProtoMessage() {}
+
+func (m *MemcacheGetResponse) GetItem() []*MemcacheGetResponse_Item {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+type MemcacheGetResponse_Item struct {
+ Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"`
+ Flags *uint32 `protobuf:"fixed32,4,opt,name=flags" json:"flags,omitempty"`
+ CasId *uint64 `protobuf:"fixed64,5,opt,name=cas_id" json:"cas_id,omitempty"`
+ ExpiresInSeconds *int32 `protobuf:"varint,6,opt,name=expires_in_seconds" json:"expires_in_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGetResponse_Item) Reset() { *m = MemcacheGetResponse_Item{} }
+func (m *MemcacheGetResponse_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGetResponse_Item) ProtoMessage() {}
+
+func (m *MemcacheGetResponse_Item) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheGetResponse_Item) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *MemcacheGetResponse_Item) GetFlags() uint32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return 0
+}
+
+func (m *MemcacheGetResponse_Item) GetCasId() uint64 {
+ if m != nil && m.CasId != nil {
+ return *m.CasId
+ }
+ return 0
+}
+
+func (m *MemcacheGetResponse_Item) GetExpiresInSeconds() int32 {
+ if m != nil && m.ExpiresInSeconds != nil {
+ return *m.ExpiresInSeconds
+ }
+ return 0
+}
+
+type MemcacheSetRequest struct {
+ Item []*MemcacheSetRequest_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"`
+ NameSpace *string `protobuf:"bytes,7,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Override *AppOverride `protobuf:"bytes,10,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheSetRequest) Reset() { *m = MemcacheSetRequest{} }
+func (m *MemcacheSetRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheSetRequest) ProtoMessage() {}
+
+func (m *MemcacheSetRequest) GetItem() []*MemcacheSetRequest_Item {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+func (m *MemcacheSetRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheSetRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheSetRequest_Item struct {
+ Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"`
+ Flags *uint32 `protobuf:"fixed32,4,opt,name=flags" json:"flags,omitempty"`
+ SetPolicy *MemcacheSetRequest_SetPolicy `protobuf:"varint,5,opt,name=set_policy,enum=appengine.MemcacheSetRequest_SetPolicy,def=1" json:"set_policy,omitempty"`
+ ExpirationTime *uint32 `protobuf:"fixed32,6,opt,name=expiration_time,def=0" json:"expiration_time,omitempty"`
+ CasId *uint64 `protobuf:"fixed64,8,opt,name=cas_id" json:"cas_id,omitempty"`
+ ForCas *bool `protobuf:"varint,9,opt,name=for_cas" json:"for_cas,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheSetRequest_Item) Reset() { *m = MemcacheSetRequest_Item{} }
+func (m *MemcacheSetRequest_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheSetRequest_Item) ProtoMessage() {}
+
+const Default_MemcacheSetRequest_Item_SetPolicy MemcacheSetRequest_SetPolicy = MemcacheSetRequest_SET
+const Default_MemcacheSetRequest_Item_ExpirationTime uint32 = 0
+
+func (m *MemcacheSetRequest_Item) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheSetRequest_Item) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *MemcacheSetRequest_Item) GetFlags() uint32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return 0
+}
+
+func (m *MemcacheSetRequest_Item) GetSetPolicy() MemcacheSetRequest_SetPolicy {
+ if m != nil && m.SetPolicy != nil {
+ return *m.SetPolicy
+ }
+ return Default_MemcacheSetRequest_Item_SetPolicy
+}
+
+func (m *MemcacheSetRequest_Item) GetExpirationTime() uint32 {
+ if m != nil && m.ExpirationTime != nil {
+ return *m.ExpirationTime
+ }
+ return Default_MemcacheSetRequest_Item_ExpirationTime
+}
+
+func (m *MemcacheSetRequest_Item) GetCasId() uint64 {
+ if m != nil && m.CasId != nil {
+ return *m.CasId
+ }
+ return 0
+}
+
+func (m *MemcacheSetRequest_Item) GetForCas() bool {
+ if m != nil && m.ForCas != nil {
+ return *m.ForCas
+ }
+ return false
+}
+
+type MemcacheSetResponse struct {
+ SetStatus []MemcacheSetResponse_SetStatusCode `protobuf:"varint,1,rep,name=set_status,enum=appengine.MemcacheSetResponse_SetStatusCode" json:"set_status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheSetResponse) Reset() { *m = MemcacheSetResponse{} }
+func (m *MemcacheSetResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheSetResponse) ProtoMessage() {}
+
+func (m *MemcacheSetResponse) GetSetStatus() []MemcacheSetResponse_SetStatusCode {
+ if m != nil {
+ return m.SetStatus
+ }
+ return nil
+}
+
+type MemcacheDeleteRequest struct {
+ Item []*MemcacheDeleteRequest_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"`
+ NameSpace *string `protobuf:"bytes,4,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Override *AppOverride `protobuf:"bytes,5,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheDeleteRequest) Reset() { *m = MemcacheDeleteRequest{} }
+func (m *MemcacheDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheDeleteRequest) ProtoMessage() {}
+
+func (m *MemcacheDeleteRequest) GetItem() []*MemcacheDeleteRequest_Item {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+func (m *MemcacheDeleteRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheDeleteRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheDeleteRequest_Item struct {
+ Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"`
+ DeleteTime *uint32 `protobuf:"fixed32,3,opt,name=delete_time,def=0" json:"delete_time,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheDeleteRequest_Item) Reset() { *m = MemcacheDeleteRequest_Item{} }
+func (m *MemcacheDeleteRequest_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheDeleteRequest_Item) ProtoMessage() {}
+
+const Default_MemcacheDeleteRequest_Item_DeleteTime uint32 = 0
+
+func (m *MemcacheDeleteRequest_Item) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheDeleteRequest_Item) GetDeleteTime() uint32 {
+ if m != nil && m.DeleteTime != nil {
+ return *m.DeleteTime
+ }
+ return Default_MemcacheDeleteRequest_Item_DeleteTime
+}
+
+type MemcacheDeleteResponse struct {
+ DeleteStatus []MemcacheDeleteResponse_DeleteStatusCode `protobuf:"varint,1,rep,name=delete_status,enum=appengine.MemcacheDeleteResponse_DeleteStatusCode" json:"delete_status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheDeleteResponse) Reset() { *m = MemcacheDeleteResponse{} }
+func (m *MemcacheDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheDeleteResponse) ProtoMessage() {}
+
+func (m *MemcacheDeleteResponse) GetDeleteStatus() []MemcacheDeleteResponse_DeleteStatusCode {
+ if m != nil {
+ return m.DeleteStatus
+ }
+ return nil
+}
+
+type MemcacheIncrementRequest struct {
+ Key []byte `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
+ NameSpace *string `protobuf:"bytes,4,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Delta *uint64 `protobuf:"varint,2,opt,name=delta,def=1" json:"delta,omitempty"`
+ Direction *MemcacheIncrementRequest_Direction `protobuf:"varint,3,opt,name=direction,enum=appengine.MemcacheIncrementRequest_Direction,def=1" json:"direction,omitempty"`
+ InitialValue *uint64 `protobuf:"varint,5,opt,name=initial_value" json:"initial_value,omitempty"`
+ InitialFlags *uint32 `protobuf:"fixed32,6,opt,name=initial_flags" json:"initial_flags,omitempty"`
+ Override *AppOverride `protobuf:"bytes,7,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheIncrementRequest) Reset() { *m = MemcacheIncrementRequest{} }
+func (m *MemcacheIncrementRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheIncrementRequest) ProtoMessage() {}
+
+const Default_MemcacheIncrementRequest_Delta uint64 = 1
+const Default_MemcacheIncrementRequest_Direction MemcacheIncrementRequest_Direction = MemcacheIncrementRequest_INCREMENT
+
+func (m *MemcacheIncrementRequest) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheIncrementRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheIncrementRequest) GetDelta() uint64 {
+ if m != nil && m.Delta != nil {
+ return *m.Delta
+ }
+ return Default_MemcacheIncrementRequest_Delta
+}
+
+func (m *MemcacheIncrementRequest) GetDirection() MemcacheIncrementRequest_Direction {
+ if m != nil && m.Direction != nil {
+ return *m.Direction
+ }
+ return Default_MemcacheIncrementRequest_Direction
+}
+
+func (m *MemcacheIncrementRequest) GetInitialValue() uint64 {
+ if m != nil && m.InitialValue != nil {
+ return *m.InitialValue
+ }
+ return 0
+}
+
+func (m *MemcacheIncrementRequest) GetInitialFlags() uint32 {
+ if m != nil && m.InitialFlags != nil {
+ return *m.InitialFlags
+ }
+ return 0
+}
+
+func (m *MemcacheIncrementRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheIncrementResponse struct {
+ NewValue *uint64 `protobuf:"varint,1,opt,name=new_value" json:"new_value,omitempty"`
+ IncrementStatus *MemcacheIncrementResponse_IncrementStatusCode `protobuf:"varint,2,opt,name=increment_status,enum=appengine.MemcacheIncrementResponse_IncrementStatusCode" json:"increment_status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheIncrementResponse) Reset() { *m = MemcacheIncrementResponse{} }
+func (m *MemcacheIncrementResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheIncrementResponse) ProtoMessage() {}
+
+func (m *MemcacheIncrementResponse) GetNewValue() uint64 {
+ if m != nil && m.NewValue != nil {
+ return *m.NewValue
+ }
+ return 0
+}
+
+func (m *MemcacheIncrementResponse) GetIncrementStatus() MemcacheIncrementResponse_IncrementStatusCode {
+ if m != nil && m.IncrementStatus != nil {
+ return *m.IncrementStatus
+ }
+ return MemcacheIncrementResponse_OK
+}
+
+type MemcacheBatchIncrementRequest struct {
+ NameSpace *string `protobuf:"bytes,1,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Item []*MemcacheIncrementRequest `protobuf:"bytes,2,rep,name=item" json:"item,omitempty"`
+ Override *AppOverride `protobuf:"bytes,3,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheBatchIncrementRequest) Reset() { *m = MemcacheBatchIncrementRequest{} }
+func (m *MemcacheBatchIncrementRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheBatchIncrementRequest) ProtoMessage() {}
+
+func (m *MemcacheBatchIncrementRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheBatchIncrementRequest) GetItem() []*MemcacheIncrementRequest {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+func (m *MemcacheBatchIncrementRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheBatchIncrementResponse struct {
+ Item []*MemcacheIncrementResponse `protobuf:"bytes,1,rep,name=item" json:"item,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheBatchIncrementResponse) Reset() { *m = MemcacheBatchIncrementResponse{} }
+func (m *MemcacheBatchIncrementResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheBatchIncrementResponse) ProtoMessage() {}
+
+func (m *MemcacheBatchIncrementResponse) GetItem() []*MemcacheIncrementResponse {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+type MemcacheFlushRequest struct {
+ Override *AppOverride `protobuf:"bytes,1,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheFlushRequest) Reset() { *m = MemcacheFlushRequest{} }
+func (m *MemcacheFlushRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheFlushRequest) ProtoMessage() {}
+
+func (m *MemcacheFlushRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheFlushResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheFlushResponse) Reset() { *m = MemcacheFlushResponse{} }
+func (m *MemcacheFlushResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheFlushResponse) ProtoMessage() {}
+
+type MemcacheStatsRequest struct {
+ Override *AppOverride `protobuf:"bytes,1,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheStatsRequest) Reset() { *m = MemcacheStatsRequest{} }
+func (m *MemcacheStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheStatsRequest) ProtoMessage() {}
+
+func (m *MemcacheStatsRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MergedNamespaceStats struct {
+ Hits *uint64 `protobuf:"varint,1,req,name=hits" json:"hits,omitempty"`
+ Misses *uint64 `protobuf:"varint,2,req,name=misses" json:"misses,omitempty"`
+ ByteHits *uint64 `protobuf:"varint,3,req,name=byte_hits" json:"byte_hits,omitempty"`
+ Items *uint64 `protobuf:"varint,4,req,name=items" json:"items,omitempty"`
+ Bytes *uint64 `protobuf:"varint,5,req,name=bytes" json:"bytes,omitempty"`
+ OldestItemAge *uint32 `protobuf:"fixed32,6,req,name=oldest_item_age" json:"oldest_item_age,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MergedNamespaceStats) Reset() { *m = MergedNamespaceStats{} }
+func (m *MergedNamespaceStats) String() string { return proto.CompactTextString(m) }
+func (*MergedNamespaceStats) ProtoMessage() {}
+
+func (m *MergedNamespaceStats) GetHits() uint64 {
+ if m != nil && m.Hits != nil {
+ return *m.Hits
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetMisses() uint64 {
+ if m != nil && m.Misses != nil {
+ return *m.Misses
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetByteHits() uint64 {
+ if m != nil && m.ByteHits != nil {
+ return *m.ByteHits
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetItems() uint64 {
+ if m != nil && m.Items != nil {
+ return *m.Items
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetBytes() uint64 {
+ if m != nil && m.Bytes != nil {
+ return *m.Bytes
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetOldestItemAge() uint32 {
+ if m != nil && m.OldestItemAge != nil {
+ return *m.OldestItemAge
+ }
+ return 0
+}
+
+type MemcacheStatsResponse struct {
+ Stats *MergedNamespaceStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheStatsResponse) Reset() { *m = MemcacheStatsResponse{} }
+func (m *MemcacheStatsResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheStatsResponse) ProtoMessage() {}
+
+func (m *MemcacheStatsResponse) GetStats() *MergedNamespaceStats {
+ if m != nil {
+ return m.Stats
+ }
+ return nil
+}
+
+type MemcacheGrabTailRequest struct {
+ ItemCount *int32 `protobuf:"varint,1,req,name=item_count" json:"item_count,omitempty"`
+ NameSpace *string `protobuf:"bytes,2,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Override *AppOverride `protobuf:"bytes,3,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGrabTailRequest) Reset() { *m = MemcacheGrabTailRequest{} }
+func (m *MemcacheGrabTailRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGrabTailRequest) ProtoMessage() {}
+
+func (m *MemcacheGrabTailRequest) GetItemCount() int32 {
+ if m != nil && m.ItemCount != nil {
+ return *m.ItemCount
+ }
+ return 0
+}
+
+func (m *MemcacheGrabTailRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheGrabTailRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheGrabTailResponse struct {
+ Item []*MemcacheGrabTailResponse_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGrabTailResponse) Reset() { *m = MemcacheGrabTailResponse{} }
+func (m *MemcacheGrabTailResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGrabTailResponse) ProtoMessage() {}
+
+func (m *MemcacheGrabTailResponse) GetItem() []*MemcacheGrabTailResponse_Item {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+type MemcacheGrabTailResponse_Item struct {
+ Value []byte `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ Flags *uint32 `protobuf:"fixed32,3,opt,name=flags" json:"flags,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGrabTailResponse_Item) Reset() { *m = MemcacheGrabTailResponse_Item{} }
+func (m *MemcacheGrabTailResponse_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGrabTailResponse_Item) ProtoMessage() {}
+
+func (m *MemcacheGrabTailResponse_Item) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *MemcacheGrabTailResponse_Item) GetFlags() uint32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return 0
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto
new file mode 100644
index 000000000..5f0edcdc7
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto
@@ -0,0 +1,165 @@
+syntax = "proto2";
+option go_package = "memcache";
+
+package appengine;
+
+message MemcacheServiceError {
+ enum ErrorCode {
+ OK = 0;
+ UNSPECIFIED_ERROR = 1;
+ NAMESPACE_NOT_SET = 2;
+ PERMISSION_DENIED = 3;
+ INVALID_VALUE = 6;
+ }
+}
+
+message AppOverride {
+ required string app_id = 1;
+
+ optional int32 num_memcacheg_backends = 2 [deprecated=true];
+ optional bool ignore_shardlock = 3 [deprecated=true];
+ optional string memcache_pool_hint = 4 [deprecated=true];
+ optional bytes memcache_sharding_strategy = 5 [deprecated=true];
+}
+
+message MemcacheGetRequest {
+ repeated bytes key = 1;
+ optional string name_space = 2 [default = ""];
+ optional bool for_cas = 4;
+ optional AppOverride override = 5;
+}
+
+message MemcacheGetResponse {
+ repeated group Item = 1 {
+ required bytes key = 2;
+ required bytes value = 3;
+ optional fixed32 flags = 4;
+ optional fixed64 cas_id = 5;
+ optional int32 expires_in_seconds = 6;
+ }
+}
+
+message MemcacheSetRequest {
+ enum SetPolicy {
+ SET = 1;
+ ADD = 2;
+ REPLACE = 3;
+ CAS = 4;
+ }
+ repeated group Item = 1 {
+ required bytes key = 2;
+ required bytes value = 3;
+
+ optional fixed32 flags = 4;
+ optional SetPolicy set_policy = 5 [default = SET];
+ optional fixed32 expiration_time = 6 [default = 0];
+
+ optional fixed64 cas_id = 8;
+ optional bool for_cas = 9;
+ }
+ optional string name_space = 7 [default = ""];
+ optional AppOverride override = 10;
+}
+
+message MemcacheSetResponse {
+ enum SetStatusCode {
+ STORED = 1;
+ NOT_STORED = 2;
+ ERROR = 3;
+ EXISTS = 4;
+ }
+ repeated SetStatusCode set_status = 1;
+}
+
+message MemcacheDeleteRequest {
+ repeated group Item = 1 {
+ required bytes key = 2;
+ optional fixed32 delete_time = 3 [default = 0];
+ }
+ optional string name_space = 4 [default = ""];
+ optional AppOverride override = 5;
+}
+
+message MemcacheDeleteResponse {
+ enum DeleteStatusCode {
+ DELETED = 1;
+ NOT_FOUND = 2;
+ }
+ repeated DeleteStatusCode delete_status = 1;
+}
+
+message MemcacheIncrementRequest {
+ enum Direction {
+ INCREMENT = 1;
+ DECREMENT = 2;
+ }
+ required bytes key = 1;
+ optional string name_space = 4 [default = ""];
+
+ optional uint64 delta = 2 [default = 1];
+ optional Direction direction = 3 [default = INCREMENT];
+
+ optional uint64 initial_value = 5;
+ optional fixed32 initial_flags = 6;
+ optional AppOverride override = 7;
+}
+
+message MemcacheIncrementResponse {
+ enum IncrementStatusCode {
+ OK = 1;
+ NOT_CHANGED = 2;
+ ERROR = 3;
+ }
+
+ optional uint64 new_value = 1;
+ optional IncrementStatusCode increment_status = 2;
+}
+
+message MemcacheBatchIncrementRequest {
+ optional string name_space = 1 [default = ""];
+ repeated MemcacheIncrementRequest item = 2;
+ optional AppOverride override = 3;
+}
+
+message MemcacheBatchIncrementResponse {
+ repeated MemcacheIncrementResponse item = 1;
+}
+
+message MemcacheFlushRequest {
+ optional AppOverride override = 1;
+}
+
+message MemcacheFlushResponse {
+}
+
+message MemcacheStatsRequest {
+ optional AppOverride override = 1;
+}
+
+message MergedNamespaceStats {
+ required uint64 hits = 1;
+ required uint64 misses = 2;
+ required uint64 byte_hits = 3;
+
+ required uint64 items = 4;
+ required uint64 bytes = 5;
+
+ required fixed32 oldest_item_age = 6;
+}
+
+message MemcacheStatsResponse {
+ optional MergedNamespaceStats stats = 1;
+}
+
+message MemcacheGrabTailRequest {
+ required int32 item_count = 1;
+ optional string name_space = 2 [default = ""];
+ optional AppOverride override = 3;
+}
+
+message MemcacheGrabTailResponse {
+ repeated group Item = 1 {
+ required bytes value = 2;
+ optional fixed32 flags = 3;
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go
new file mode 100644
index 000000000..9cc1f71d1
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/metadata.go
@@ -0,0 +1,61 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file has code for accessing metadata.
+//
+// References:
+// https://cloud.google.com/compute/docs/metadata
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "net/url"
+)
+
+const (
+ metadataHost = "metadata"
+ metadataPath = "/computeMetadata/v1/"
+)
+
+var (
+ metadataRequestHeaders = http.Header{
+ "Metadata-Flavor": []string{"Google"},
+ }
+)
+
+// TODO(dsymonds): Do we need to support default values, like Python?
+func mustGetMetadata(key string) []byte {
+ b, err := getMetadata(key)
+ if err != nil {
+ log.Fatalf("Metadata fetch failed: %v", err)
+ }
+ return b
+}
+
+func getMetadata(key string) ([]byte, error) {
+ // TODO(dsymonds): May need to use url.Parse to support keys with query args.
+ req := &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Scheme: "http",
+ Host: metadataHost,
+ Path: metadataPath + key,
+ },
+ Header: metadataRequestHeaders,
+ Host: metadataHost,
+ }
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode)
+ }
+ return ioutil.ReadAll(resp.Body)
+}
diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
new file mode 100644
index 000000000..a0145ed31
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
@@ -0,0 +1,375 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/modules/modules_service.proto
+// DO NOT EDIT!
+
+/*
+Package modules is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/modules/modules_service.proto
+
+It has these top-level messages:
+ ModulesServiceError
+ GetModulesRequest
+ GetModulesResponse
+ GetVersionsRequest
+ GetVersionsResponse
+ GetDefaultVersionRequest
+ GetDefaultVersionResponse
+ GetNumInstancesRequest
+ GetNumInstancesResponse
+ SetNumInstancesRequest
+ SetNumInstancesResponse
+ StartModuleRequest
+ StartModuleResponse
+ StopModuleRequest
+ StopModuleResponse
+ GetHostnameRequest
+ GetHostnameResponse
+*/
+package modules
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type ModulesServiceError_ErrorCode int32
+
+const (
+ ModulesServiceError_OK ModulesServiceError_ErrorCode = 0
+ ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1
+ ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2
+ ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3
+ ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4
+ ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5
+)
+
+var ModulesServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_MODULE",
+ 2: "INVALID_VERSION",
+ 3: "INVALID_INSTANCES",
+ 4: "TRANSIENT_ERROR",
+ 5: "UNEXPECTED_STATE",
+}
+var ModulesServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_MODULE": 1,
+ "INVALID_VERSION": 2,
+ "INVALID_INSTANCES": 3,
+ "TRANSIENT_ERROR": 4,
+ "UNEXPECTED_STATE": 5,
+}
+
+func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode {
+ p := new(ModulesServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x ModulesServiceError_ErrorCode) String() string {
+ return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x))
+}
+func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = ModulesServiceError_ErrorCode(value)
+ return nil
+}
+
+type ModulesServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} }
+func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) }
+func (*ModulesServiceError) ProtoMessage() {}
+
+type GetModulesRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} }
+func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) }
+func (*GetModulesRequest) ProtoMessage() {}
+
+type GetModulesResponse struct {
+ Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} }
+func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) }
+func (*GetModulesResponse) ProtoMessage() {}
+
+func (m *GetModulesResponse) GetModule() []string {
+ if m != nil {
+ return m.Module
+ }
+ return nil
+}
+
+type GetVersionsRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} }
+func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) }
+func (*GetVersionsRequest) ProtoMessage() {}
+
+func (m *GetVersionsRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+type GetVersionsResponse struct {
+ Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} }
+func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) }
+func (*GetVersionsResponse) ProtoMessage() {}
+
+func (m *GetVersionsResponse) GetVersion() []string {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type GetDefaultVersionRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} }
+func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultVersionRequest) ProtoMessage() {}
+
+func (m *GetDefaultVersionRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+type GetDefaultVersionResponse struct {
+ Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} }
+func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultVersionResponse) ProtoMessage() {}
+
+func (m *GetDefaultVersionResponse) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type GetNumInstancesRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} }
+func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
+func (*GetNumInstancesRequest) ProtoMessage() {}
+
+func (m *GetNumInstancesRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *GetNumInstancesRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type GetNumInstancesResponse struct {
+ Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} }
+func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
+func (*GetNumInstancesResponse) ProtoMessage() {}
+
+func (m *GetNumInstancesResponse) GetInstances() int64 {
+ if m != nil && m.Instances != nil {
+ return *m.Instances
+ }
+ return 0
+}
+
+type SetNumInstancesRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} }
+func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
+func (*SetNumInstancesRequest) ProtoMessage() {}
+
+func (m *SetNumInstancesRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *SetNumInstancesRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+func (m *SetNumInstancesRequest) GetInstances() int64 {
+ if m != nil && m.Instances != nil {
+ return *m.Instances
+ }
+ return 0
+}
+
+type SetNumInstancesResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} }
+func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
+func (*SetNumInstancesResponse) ProtoMessage() {}
+
+type StartModuleRequest struct {
+ Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} }
+func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) }
+func (*StartModuleRequest) ProtoMessage() {}
+
+func (m *StartModuleRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *StartModuleRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type StartModuleResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} }
+func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) }
+func (*StartModuleResponse) ProtoMessage() {}
+
+type StopModuleRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} }
+func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) }
+func (*StopModuleRequest) ProtoMessage() {}
+
+func (m *StopModuleRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *StopModuleRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type StopModuleResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} }
+func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) }
+func (*StopModuleResponse) ProtoMessage() {}
+
+type GetHostnameRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} }
+func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetHostnameRequest) ProtoMessage() {}
+
+func (m *GetHostnameRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *GetHostnameRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+func (m *GetHostnameRequest) GetInstance() string {
+ if m != nil && m.Instance != nil {
+ return *m.Instance
+ }
+ return ""
+}
+
+type GetHostnameResponse struct {
+ Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} }
+func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetHostnameResponse) ProtoMessage() {}
+
+func (m *GetHostnameResponse) GetHostname() string {
+ if m != nil && m.Hostname != nil {
+ return *m.Hostname
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto
new file mode 100644
index 000000000..d29f0065a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto
@@ -0,0 +1,80 @@
+syntax = "proto2";
+option go_package = "modules";
+
+package appengine;
+
+message ModulesServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_MODULE = 1;
+ INVALID_VERSION = 2;
+ INVALID_INSTANCES = 3;
+ TRANSIENT_ERROR = 4;
+ UNEXPECTED_STATE = 5;
+ }
+}
+
+message GetModulesRequest {
+}
+
+message GetModulesResponse {
+ repeated string module = 1;
+}
+
+message GetVersionsRequest {
+ optional string module = 1;
+}
+
+message GetVersionsResponse {
+ repeated string version = 1;
+}
+
+message GetDefaultVersionRequest {
+ optional string module = 1;
+}
+
+message GetDefaultVersionResponse {
+ required string version = 1;
+}
+
+message GetNumInstancesRequest {
+ optional string module = 1;
+ optional string version = 2;
+}
+
+message GetNumInstancesResponse {
+ required int64 instances = 1;
+}
+
+message SetNumInstancesRequest {
+ optional string module = 1;
+ optional string version = 2;
+ required int64 instances = 3;
+}
+
+message SetNumInstancesResponse {}
+
+message StartModuleRequest {
+ required string module = 1;
+ required string version = 2;
+}
+
+message StartModuleResponse {}
+
+message StopModuleRequest {
+ optional string module = 1;
+ optional string version = 2;
+}
+
+message StopModuleResponse {}
+
+message GetHostnameRequest {
+ optional string module = 1;
+ optional string version = 2;
+ optional string instance = 3;
+}
+
+message GetHostnameResponse {
+ required string hostname = 1;
+}
+
diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go
new file mode 100644
index 000000000..3b94cf0c6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/net.go
@@ -0,0 +1,56 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file implements a network dialer that limits the number of concurrent connections.
+// It is only used for API calls.
+
+import (
+ "log"
+ "net"
+ "runtime"
+ "sync"
+ "time"
+)
+
+var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable.
+
+func limitRelease() {
+ // non-blocking
+ select {
+ case <-limitSem:
+ default:
+ // This should not normally happen.
+ log.Print("appengine: unbalanced limitSem release!")
+ }
+}
+
+func limitDial(network, addr string) (net.Conn, error) {
+ limitSem <- 1
+
+ // Dial with a timeout in case the API host is MIA.
+ // The connection should normally be very fast.
+ conn, err := net.DialTimeout(network, addr, 500*time.Millisecond)
+ if err != nil {
+ limitRelease()
+ return nil, err
+ }
+ lc := &limitConn{Conn: conn}
+ runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required
+ return lc, nil
+}
+
+type limitConn struct {
+ close sync.Once
+ net.Conn
+}
+
+func (lc *limitConn) Close() error {
+ defer lc.close.Do(func() {
+ limitRelease()
+ runtime.SetFinalizer(lc, nil)
+ })
+ return lc.Conn.Close()
+}
diff --git a/vendor/google.golang.org/appengine/internal/net_test.go b/vendor/google.golang.org/appengine/internal/net_test.go
new file mode 100644
index 000000000..24da8bb2b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/net_test.go
@@ -0,0 +1,58 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "sync"
+ "testing"
+ "time"
+
+ netcontext "golang.org/x/net/context"
+
+ basepb "google.golang.org/appengine/internal/base"
+)
+
+func TestDialLimit(t *testing.T) {
+ // Fill up semaphore with false acquisitions to permit only two TCP connections at a time.
+ // We don't replace limitSem because that results in a data race when net/http lazily closes connections.
+ nFake := cap(limitSem) - 2
+ for i := 0; i < nFake; i++ {
+ limitSem <- 1
+ }
+ defer func() {
+ for i := 0; i < nFake; i++ {
+ <-limitSem
+ }
+ }()
+
+ f, c, cleanup := setup() // setup is in api_test.go
+ defer cleanup()
+ f.hang = make(chan int)
+
+ // If we make two RunSlowly RPCs (which will wait for f.hang to be strobed),
+ // then the simple Non200 RPC should hang.
+ var wg sync.WaitGroup
+ wg.Add(2)
+ for i := 0; i < 2; i++ {
+ go func() {
+ defer wg.Done()
+ Call(toContext(c), "errors", "RunSlowly", &basepb.VoidProto{}, &basepb.VoidProto{})
+ }()
+ }
+ time.Sleep(50 * time.Millisecond) // let those two RPCs start
+
+ ctx, _ := netcontext.WithTimeout(toContext(c), 50*time.Millisecond)
+ err := Call(ctx, "errors", "Non200", &basepb.VoidProto{}, &basepb.VoidProto{})
+ if err != errTimeout {
+ t.Errorf("Non200 RPC returned with err %v, want errTimeout", err)
+ }
+
+ // Drain the two RunSlowly calls.
+ f.hang <- 1
+ f.hang <- 1
+ wg.Wait()
+}
diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh
new file mode 100755
index 000000000..2fdb546a6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/regen.sh
@@ -0,0 +1,40 @@
+#!/bin/bash -e
+#
+# This script rebuilds the generated code for the protocol buffers.
+# To run this you will need protoc and goprotobuf installed;
+# see https://github.com/golang/protobuf for instructions.
+
+PKG=google.golang.org/appengine
+
+function die() {
+ echo 1>&2 $*
+ exit 1
+}
+
+# Sanity check that the right tools are accessible.
+for tool in go protoc protoc-gen-go; do
+ q=$(which $tool) || die "didn't find $tool"
+ echo 1>&2 "$tool: $q"
+done
+
+echo -n 1>&2 "finding package dir... "
+pkgdir=$(go list -f '{{.Dir}}' $PKG)
+echo 1>&2 $pkgdir
+base=$(echo $pkgdir | sed "s,/$PKG\$,,")
+echo 1>&2 "base: $base"
+cd $base
+
+# Run protoc once per package.
+for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do
+ echo 1>&2 "* $dir"
+ protoc --go_out=. $dir/*.proto
+done
+
+for f in $(find $PKG/internal -name '*.pb.go'); do
+ # Remove proto.RegisterEnum calls.
+ # These cause duplicate registration panics when these packages
+ # are used on classic App Engine. proto.RegisterEnum only affects
+ # parsing the text format; we don't care about that.
+ # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17
+ sed -i '/proto.RegisterEnum/d' $f
+done
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
new file mode 100644
index 000000000..526bd39e6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
@@ -0,0 +1,231 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/remote_api/remote_api.proto
+// DO NOT EDIT!
+
+/*
+Package remote_api is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/remote_api/remote_api.proto
+
+It has these top-level messages:
+ Request
+ ApplicationError
+ RpcError
+ Response
+*/
+package remote_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type RpcError_ErrorCode int32
+
+const (
+ RpcError_UNKNOWN RpcError_ErrorCode = 0
+ RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1
+ RpcError_PARSE_ERROR RpcError_ErrorCode = 2
+ RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3
+ RpcError_OVER_QUOTA RpcError_ErrorCode = 4
+ RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5
+ RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6
+ RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7
+ RpcError_BAD_REQUEST RpcError_ErrorCode = 8
+ RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9
+ RpcError_CANCELLED RpcError_ErrorCode = 10
+ RpcError_REPLAY_ERROR RpcError_ErrorCode = 11
+ RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12
+)
+
+var RpcError_ErrorCode_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "CALL_NOT_FOUND",
+ 2: "PARSE_ERROR",
+ 3: "SECURITY_VIOLATION",
+ 4: "OVER_QUOTA",
+ 5: "REQUEST_TOO_LARGE",
+ 6: "CAPABILITY_DISABLED",
+ 7: "FEATURE_DISABLED",
+ 8: "BAD_REQUEST",
+ 9: "RESPONSE_TOO_LARGE",
+ 10: "CANCELLED",
+ 11: "REPLAY_ERROR",
+ 12: "DEADLINE_EXCEEDED",
+}
+var RpcError_ErrorCode_value = map[string]int32{
+ "UNKNOWN": 0,
+ "CALL_NOT_FOUND": 1,
+ "PARSE_ERROR": 2,
+ "SECURITY_VIOLATION": 3,
+ "OVER_QUOTA": 4,
+ "REQUEST_TOO_LARGE": 5,
+ "CAPABILITY_DISABLED": 6,
+ "FEATURE_DISABLED": 7,
+ "BAD_REQUEST": 8,
+ "RESPONSE_TOO_LARGE": 9,
+ "CANCELLED": 10,
+ "REPLAY_ERROR": 11,
+ "DEADLINE_EXCEEDED": 12,
+}
+
+func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode {
+ p := new(RpcError_ErrorCode)
+ *p = x
+ return p
+}
+func (x RpcError_ErrorCode) String() string {
+ return proto.EnumName(RpcError_ErrorCode_name, int32(x))
+}
+func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = RpcError_ErrorCode(value)
+ return nil
+}
+
+type Request struct {
+ ServiceName *string `protobuf:"bytes,2,req,name=service_name" json:"service_name,omitempty"`
+ Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"`
+ Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"`
+ RequestId *string `protobuf:"bytes,5,opt,name=request_id" json:"request_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Request) Reset() { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage() {}
+
+func (m *Request) GetServiceName() string {
+ if m != nil && m.ServiceName != nil {
+ return *m.ServiceName
+ }
+ return ""
+}
+
+func (m *Request) GetMethod() string {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return ""
+}
+
+func (m *Request) GetRequest() []byte {
+ if m != nil {
+ return m.Request
+ }
+ return nil
+}
+
+func (m *Request) GetRequestId() string {
+ if m != nil && m.RequestId != nil {
+ return *m.RequestId
+ }
+ return ""
+}
+
+type ApplicationError struct {
+ Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
+ Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ApplicationError) Reset() { *m = ApplicationError{} }
+func (m *ApplicationError) String() string { return proto.CompactTextString(m) }
+func (*ApplicationError) ProtoMessage() {}
+
+func (m *ApplicationError) GetCode() int32 {
+ if m != nil && m.Code != nil {
+ return *m.Code
+ }
+ return 0
+}
+
+func (m *ApplicationError) GetDetail() string {
+ if m != nil && m.Detail != nil {
+ return *m.Detail
+ }
+ return ""
+}
+
+type RpcError struct {
+ Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
+ Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RpcError) Reset() { *m = RpcError{} }
+func (m *RpcError) String() string { return proto.CompactTextString(m) }
+func (*RpcError) ProtoMessage() {}
+
+func (m *RpcError) GetCode() int32 {
+ if m != nil && m.Code != nil {
+ return *m.Code
+ }
+ return 0
+}
+
+func (m *RpcError) GetDetail() string {
+ if m != nil && m.Detail != nil {
+ return *m.Detail
+ }
+ return ""
+}
+
+type Response struct {
+ Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
+ Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"`
+ ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error" json:"application_error,omitempty"`
+ JavaException []byte `protobuf:"bytes,4,opt,name=java_exception" json:"java_exception,omitempty"`
+ RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error" json:"rpc_error,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Response) Reset() { *m = Response{} }
+func (m *Response) String() string { return proto.CompactTextString(m) }
+func (*Response) ProtoMessage() {}
+
+func (m *Response) GetResponse() []byte {
+ if m != nil {
+ return m.Response
+ }
+ return nil
+}
+
+func (m *Response) GetException() []byte {
+ if m != nil {
+ return m.Exception
+ }
+ return nil
+}
+
+func (m *Response) GetApplicationError() *ApplicationError {
+ if m != nil {
+ return m.ApplicationError
+ }
+ return nil
+}
+
+func (m *Response) GetJavaException() []byte {
+ if m != nil {
+ return m.JavaException
+ }
+ return nil
+}
+
+func (m *Response) GetRpcError() *RpcError {
+ if m != nil {
+ return m.RpcError
+ }
+ return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
new file mode 100644
index 000000000..f21763a4e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
@@ -0,0 +1,44 @@
+syntax = "proto2";
+option go_package = "remote_api";
+
+package remote_api;
+
+message Request {
+ required string service_name = 2;
+ required string method = 3;
+ required bytes request = 4;
+ optional string request_id = 5;
+}
+
+message ApplicationError {
+ required int32 code = 1;
+ required string detail = 2;
+}
+
+message RpcError {
+ enum ErrorCode {
+ UNKNOWN = 0;
+ CALL_NOT_FOUND = 1;
+ PARSE_ERROR = 2;
+ SECURITY_VIOLATION = 3;
+ OVER_QUOTA = 4;
+ REQUEST_TOO_LARGE = 5;
+ CAPABILITY_DISABLED = 6;
+ FEATURE_DISABLED = 7;
+ BAD_REQUEST = 8;
+ RESPONSE_TOO_LARGE = 9;
+ CANCELLED = 10;
+ REPLAY_ERROR = 11;
+ DEADLINE_EXCEEDED = 12;
+ }
+ required int32 code = 1;
+ optional string detail = 2;
+}
+
+message Response {
+ optional bytes response = 1;
+ optional bytes exception = 2;
+ optional ApplicationError application_error = 3;
+ optional bytes java_exception = 4;
+ optional RpcError rpc_error = 5;
+}
diff --git a/vendor/google.golang.org/appengine/internal/search/search.pb.go b/vendor/google.golang.org/appengine/internal/search/search.pb.go
new file mode 100644
index 000000000..3b280e4a1
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/search/search.pb.go
@@ -0,0 +1,2488 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/search/search.proto
+// DO NOT EDIT!
+
+/*
+Package search is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/search/search.proto
+
+It has these top-level messages:
+ Scope
+ Entry
+ AccessControlList
+ FieldValue
+ Field
+ FieldTypes
+ IndexShardSettings
+ FacetValue
+ Facet
+ DocumentMetadata
+ Document
+ SearchServiceError
+ RequestStatus
+ IndexSpec
+ IndexMetadata
+ IndexDocumentParams
+ IndexDocumentRequest
+ IndexDocumentResponse
+ DeleteDocumentParams
+ DeleteDocumentRequest
+ DeleteDocumentResponse
+ ListDocumentsParams
+ ListDocumentsRequest
+ ListDocumentsResponse
+ ListIndexesParams
+ ListIndexesRequest
+ ListIndexesResponse
+ DeleteSchemaParams
+ DeleteSchemaRequest
+ DeleteSchemaResponse
+ SortSpec
+ ScorerSpec
+ FieldSpec
+ FacetRange
+ FacetRequestParam
+ FacetAutoDetectParam
+ FacetRequest
+ FacetRefinement
+ SearchParams
+ SearchRequest
+ FacetResultValue
+ FacetResult
+ SearchResult
+ SearchResponse
+*/
+package search
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Scope_Type int32
+
+const (
+ Scope_USER_BY_CANONICAL_ID Scope_Type = 1
+ Scope_USER_BY_EMAIL Scope_Type = 2
+ Scope_GROUP_BY_CANONICAL_ID Scope_Type = 3
+ Scope_GROUP_BY_EMAIL Scope_Type = 4
+ Scope_GROUP_BY_DOMAIN Scope_Type = 5
+ Scope_ALL_USERS Scope_Type = 6
+ Scope_ALL_AUTHENTICATED_USERS Scope_Type = 7
+)
+
+var Scope_Type_name = map[int32]string{
+ 1: "USER_BY_CANONICAL_ID",
+ 2: "USER_BY_EMAIL",
+ 3: "GROUP_BY_CANONICAL_ID",
+ 4: "GROUP_BY_EMAIL",
+ 5: "GROUP_BY_DOMAIN",
+ 6: "ALL_USERS",
+ 7: "ALL_AUTHENTICATED_USERS",
+}
+var Scope_Type_value = map[string]int32{
+ "USER_BY_CANONICAL_ID": 1,
+ "USER_BY_EMAIL": 2,
+ "GROUP_BY_CANONICAL_ID": 3,
+ "GROUP_BY_EMAIL": 4,
+ "GROUP_BY_DOMAIN": 5,
+ "ALL_USERS": 6,
+ "ALL_AUTHENTICATED_USERS": 7,
+}
+
+func (x Scope_Type) Enum() *Scope_Type {
+ p := new(Scope_Type)
+ *p = x
+ return p
+}
+func (x Scope_Type) String() string {
+ return proto.EnumName(Scope_Type_name, int32(x))
+}
+func (x *Scope_Type) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Scope_Type_value, data, "Scope_Type")
+ if err != nil {
+ return err
+ }
+ *x = Scope_Type(value)
+ return nil
+}
+func (Scope_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
+
+type Entry_Permission int32
+
+const (
+ Entry_READ Entry_Permission = 1
+ Entry_WRITE Entry_Permission = 2
+ Entry_FULL_CONTROL Entry_Permission = 3
+)
+
+var Entry_Permission_name = map[int32]string{
+ 1: "READ",
+ 2: "WRITE",
+ 3: "FULL_CONTROL",
+}
+var Entry_Permission_value = map[string]int32{
+ "READ": 1,
+ "WRITE": 2,
+ "FULL_CONTROL": 3,
+}
+
+func (x Entry_Permission) Enum() *Entry_Permission {
+ p := new(Entry_Permission)
+ *p = x
+ return p
+}
+func (x Entry_Permission) String() string {
+ return proto.EnumName(Entry_Permission_name, int32(x))
+}
+func (x *Entry_Permission) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Entry_Permission_value, data, "Entry_Permission")
+ if err != nil {
+ return err
+ }
+ *x = Entry_Permission(value)
+ return nil
+}
+func (Entry_Permission) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} }
+
+type FieldValue_ContentType int32
+
+const (
+ FieldValue_TEXT FieldValue_ContentType = 0
+ FieldValue_HTML FieldValue_ContentType = 1
+ FieldValue_ATOM FieldValue_ContentType = 2
+ FieldValue_DATE FieldValue_ContentType = 3
+ FieldValue_NUMBER FieldValue_ContentType = 4
+ FieldValue_GEO FieldValue_ContentType = 5
+)
+
+var FieldValue_ContentType_name = map[int32]string{
+ 0: "TEXT",
+ 1: "HTML",
+ 2: "ATOM",
+ 3: "DATE",
+ 4: "NUMBER",
+ 5: "GEO",
+}
+var FieldValue_ContentType_value = map[string]int32{
+ "TEXT": 0,
+ "HTML": 1,
+ "ATOM": 2,
+ "DATE": 3,
+ "NUMBER": 4,
+ "GEO": 5,
+}
+
+func (x FieldValue_ContentType) Enum() *FieldValue_ContentType {
+ p := new(FieldValue_ContentType)
+ *p = x
+ return p
+}
+func (x FieldValue_ContentType) String() string {
+ return proto.EnumName(FieldValue_ContentType_name, int32(x))
+}
+func (x *FieldValue_ContentType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FieldValue_ContentType_value, data, "FieldValue_ContentType")
+ if err != nil {
+ return err
+ }
+ *x = FieldValue_ContentType(value)
+ return nil
+}
+func (FieldValue_ContentType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} }
+
+type FacetValue_ContentType int32
+
+const (
+ FacetValue_ATOM FacetValue_ContentType = 2
+ FacetValue_NUMBER FacetValue_ContentType = 4
+)
+
+var FacetValue_ContentType_name = map[int32]string{
+ 2: "ATOM",
+ 4: "NUMBER",
+}
+var FacetValue_ContentType_value = map[string]int32{
+ "ATOM": 2,
+ "NUMBER": 4,
+}
+
+func (x FacetValue_ContentType) Enum() *FacetValue_ContentType {
+ p := new(FacetValue_ContentType)
+ *p = x
+ return p
+}
+func (x FacetValue_ContentType) String() string {
+ return proto.EnumName(FacetValue_ContentType_name, int32(x))
+}
+func (x *FacetValue_ContentType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FacetValue_ContentType_value, data, "FacetValue_ContentType")
+ if err != nil {
+ return err
+ }
+ *x = FacetValue_ContentType(value)
+ return nil
+}
+func (FacetValue_ContentType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} }
+
+type Document_OrderIdSource int32
+
+const (
+ Document_DEFAULTED Document_OrderIdSource = 0
+ Document_SUPPLIED Document_OrderIdSource = 1
+)
+
+var Document_OrderIdSource_name = map[int32]string{
+ 0: "DEFAULTED",
+ 1: "SUPPLIED",
+}
+var Document_OrderIdSource_value = map[string]int32{
+ "DEFAULTED": 0,
+ "SUPPLIED": 1,
+}
+
+func (x Document_OrderIdSource) Enum() *Document_OrderIdSource {
+ p := new(Document_OrderIdSource)
+ *p = x
+ return p
+}
+func (x Document_OrderIdSource) String() string {
+ return proto.EnumName(Document_OrderIdSource_name, int32(x))
+}
+func (x *Document_OrderIdSource) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Document_OrderIdSource_value, data, "Document_OrderIdSource")
+ if err != nil {
+ return err
+ }
+ *x = Document_OrderIdSource(value)
+ return nil
+}
+func (Document_OrderIdSource) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 0} }
+
+type Document_Storage int32
+
+const (
+ Document_DISK Document_Storage = 0
+)
+
+var Document_Storage_name = map[int32]string{
+ 0: "DISK",
+}
+var Document_Storage_value = map[string]int32{
+ "DISK": 0,
+}
+
+func (x Document_Storage) Enum() *Document_Storage {
+ p := new(Document_Storage)
+ *p = x
+ return p
+}
+func (x Document_Storage) String() string {
+ return proto.EnumName(Document_Storage_name, int32(x))
+}
+func (x *Document_Storage) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Document_Storage_value, data, "Document_Storage")
+ if err != nil {
+ return err
+ }
+ *x = Document_Storage(value)
+ return nil
+}
+func (Document_Storage) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 1} }
+
+type SearchServiceError_ErrorCode int32
+
+const (
+ SearchServiceError_OK SearchServiceError_ErrorCode = 0
+ SearchServiceError_INVALID_REQUEST SearchServiceError_ErrorCode = 1
+ SearchServiceError_TRANSIENT_ERROR SearchServiceError_ErrorCode = 2
+ SearchServiceError_INTERNAL_ERROR SearchServiceError_ErrorCode = 3
+ SearchServiceError_PERMISSION_DENIED SearchServiceError_ErrorCode = 4
+ SearchServiceError_TIMEOUT SearchServiceError_ErrorCode = 5
+ SearchServiceError_CONCURRENT_TRANSACTION SearchServiceError_ErrorCode = 6
+)
+
+var SearchServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_REQUEST",
+ 2: "TRANSIENT_ERROR",
+ 3: "INTERNAL_ERROR",
+ 4: "PERMISSION_DENIED",
+ 5: "TIMEOUT",
+ 6: "CONCURRENT_TRANSACTION",
+}
+var SearchServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_REQUEST": 1,
+ "TRANSIENT_ERROR": 2,
+ "INTERNAL_ERROR": 3,
+ "PERMISSION_DENIED": 4,
+ "TIMEOUT": 5,
+ "CONCURRENT_TRANSACTION": 6,
+}
+
+func (x SearchServiceError_ErrorCode) Enum() *SearchServiceError_ErrorCode {
+ p := new(SearchServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x SearchServiceError_ErrorCode) String() string {
+ return proto.EnumName(SearchServiceError_ErrorCode_name, int32(x))
+}
+func (x *SearchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SearchServiceError_ErrorCode_value, data, "SearchServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = SearchServiceError_ErrorCode(value)
+ return nil
+}
+func (SearchServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{11, 0}
+}
+
+type IndexSpec_Consistency int32
+
+const (
+ IndexSpec_GLOBAL IndexSpec_Consistency = 0
+ IndexSpec_PER_DOCUMENT IndexSpec_Consistency = 1
+)
+
+var IndexSpec_Consistency_name = map[int32]string{
+ 0: "GLOBAL",
+ 1: "PER_DOCUMENT",
+}
+var IndexSpec_Consistency_value = map[string]int32{
+ "GLOBAL": 0,
+ "PER_DOCUMENT": 1,
+}
+
+func (x IndexSpec_Consistency) Enum() *IndexSpec_Consistency {
+ p := new(IndexSpec_Consistency)
+ *p = x
+ return p
+}
+func (x IndexSpec_Consistency) String() string {
+ return proto.EnumName(IndexSpec_Consistency_name, int32(x))
+}
+func (x *IndexSpec_Consistency) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IndexSpec_Consistency_value, data, "IndexSpec_Consistency")
+ if err != nil {
+ return err
+ }
+ *x = IndexSpec_Consistency(value)
+ return nil
+}
+func (IndexSpec_Consistency) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 0} }
+
+type IndexSpec_Source int32
+
+const (
+ IndexSpec_SEARCH IndexSpec_Source = 0
+ IndexSpec_DATASTORE IndexSpec_Source = 1
+ IndexSpec_CLOUD_STORAGE IndexSpec_Source = 2
+)
+
+var IndexSpec_Source_name = map[int32]string{
+ 0: "SEARCH",
+ 1: "DATASTORE",
+ 2: "CLOUD_STORAGE",
+}
+var IndexSpec_Source_value = map[string]int32{
+ "SEARCH": 0,
+ "DATASTORE": 1,
+ "CLOUD_STORAGE": 2,
+}
+
+func (x IndexSpec_Source) Enum() *IndexSpec_Source {
+ p := new(IndexSpec_Source)
+ *p = x
+ return p
+}
+func (x IndexSpec_Source) String() string {
+ return proto.EnumName(IndexSpec_Source_name, int32(x))
+}
+func (x *IndexSpec_Source) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IndexSpec_Source_value, data, "IndexSpec_Source")
+ if err != nil {
+ return err
+ }
+ *x = IndexSpec_Source(value)
+ return nil
+}
+func (IndexSpec_Source) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 1} }
+
+type IndexSpec_Mode int32
+
+const (
+ IndexSpec_PRIORITY IndexSpec_Mode = 0
+ IndexSpec_BACKGROUND IndexSpec_Mode = 1
+)
+
+var IndexSpec_Mode_name = map[int32]string{
+ 0: "PRIORITY",
+ 1: "BACKGROUND",
+}
+var IndexSpec_Mode_value = map[string]int32{
+ "PRIORITY": 0,
+ "BACKGROUND": 1,
+}
+
+func (x IndexSpec_Mode) Enum() *IndexSpec_Mode {
+ p := new(IndexSpec_Mode)
+ *p = x
+ return p
+}
+func (x IndexSpec_Mode) String() string {
+ return proto.EnumName(IndexSpec_Mode_name, int32(x))
+}
+func (x *IndexSpec_Mode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IndexSpec_Mode_value, data, "IndexSpec_Mode")
+ if err != nil {
+ return err
+ }
+ *x = IndexSpec_Mode(value)
+ return nil
+}
+func (IndexSpec_Mode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 2} }
+
+type IndexDocumentParams_Freshness int32
+
+const (
+ IndexDocumentParams_SYNCHRONOUSLY IndexDocumentParams_Freshness = 0
+ IndexDocumentParams_WHEN_CONVENIENT IndexDocumentParams_Freshness = 1
+)
+
+var IndexDocumentParams_Freshness_name = map[int32]string{
+ 0: "SYNCHRONOUSLY",
+ 1: "WHEN_CONVENIENT",
+}
+var IndexDocumentParams_Freshness_value = map[string]int32{
+ "SYNCHRONOUSLY": 0,
+ "WHEN_CONVENIENT": 1,
+}
+
+func (x IndexDocumentParams_Freshness) Enum() *IndexDocumentParams_Freshness {
+ p := new(IndexDocumentParams_Freshness)
+ *p = x
+ return p
+}
+func (x IndexDocumentParams_Freshness) String() string {
+ return proto.EnumName(IndexDocumentParams_Freshness_name, int32(x))
+}
+func (x *IndexDocumentParams_Freshness) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IndexDocumentParams_Freshness_value, data, "IndexDocumentParams_Freshness")
+ if err != nil {
+ return err
+ }
+ *x = IndexDocumentParams_Freshness(value)
+ return nil
+}
+func (IndexDocumentParams_Freshness) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{15, 0}
+}
+
+type ScorerSpec_Scorer int32
+
+const (
+ ScorerSpec_RESCORING_MATCH_SCORER ScorerSpec_Scorer = 0
+ ScorerSpec_MATCH_SCORER ScorerSpec_Scorer = 2
+)
+
+var ScorerSpec_Scorer_name = map[int32]string{
+ 0: "RESCORING_MATCH_SCORER",
+ 2: "MATCH_SCORER",
+}
+var ScorerSpec_Scorer_value = map[string]int32{
+ "RESCORING_MATCH_SCORER": 0,
+ "MATCH_SCORER": 2,
+}
+
+func (x ScorerSpec_Scorer) Enum() *ScorerSpec_Scorer {
+ p := new(ScorerSpec_Scorer)
+ *p = x
+ return p
+}
+func (x ScorerSpec_Scorer) String() string {
+ return proto.EnumName(ScorerSpec_Scorer_name, int32(x))
+}
+func (x *ScorerSpec_Scorer) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ScorerSpec_Scorer_value, data, "ScorerSpec_Scorer")
+ if err != nil {
+ return err
+ }
+ *x = ScorerSpec_Scorer(value)
+ return nil
+}
+func (ScorerSpec_Scorer) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{31, 0} }
+
+type SearchParams_CursorType int32
+
+const (
+ SearchParams_NONE SearchParams_CursorType = 0
+ SearchParams_SINGLE SearchParams_CursorType = 1
+ SearchParams_PER_RESULT SearchParams_CursorType = 2
+)
+
+var SearchParams_CursorType_name = map[int32]string{
+ 0: "NONE",
+ 1: "SINGLE",
+ 2: "PER_RESULT",
+}
+var SearchParams_CursorType_value = map[string]int32{
+ "NONE": 0,
+ "SINGLE": 1,
+ "PER_RESULT": 2,
+}
+
+func (x SearchParams_CursorType) Enum() *SearchParams_CursorType {
+ p := new(SearchParams_CursorType)
+ *p = x
+ return p
+}
+func (x SearchParams_CursorType) String() string {
+ return proto.EnumName(SearchParams_CursorType_name, int32(x))
+}
+func (x *SearchParams_CursorType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SearchParams_CursorType_value, data, "SearchParams_CursorType")
+ if err != nil {
+ return err
+ }
+ *x = SearchParams_CursorType(value)
+ return nil
+}
+func (SearchParams_CursorType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{38, 0} }
+
+type SearchParams_ParsingMode int32
+
+const (
+ SearchParams_STRICT SearchParams_ParsingMode = 0
+ SearchParams_RELAXED SearchParams_ParsingMode = 1
+)
+
+var SearchParams_ParsingMode_name = map[int32]string{
+ 0: "STRICT",
+ 1: "RELAXED",
+}
+var SearchParams_ParsingMode_value = map[string]int32{
+ "STRICT": 0,
+ "RELAXED": 1,
+}
+
+func (x SearchParams_ParsingMode) Enum() *SearchParams_ParsingMode {
+ p := new(SearchParams_ParsingMode)
+ *p = x
+ return p
+}
+func (x SearchParams_ParsingMode) String() string {
+ return proto.EnumName(SearchParams_ParsingMode_name, int32(x))
+}
+func (x *SearchParams_ParsingMode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SearchParams_ParsingMode_value, data, "SearchParams_ParsingMode")
+ if err != nil {
+ return err
+ }
+ *x = SearchParams_ParsingMode(value)
+ return nil
+}
+func (SearchParams_ParsingMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{38, 1} }
+
+type Scope struct {
+ Type *Scope_Type `protobuf:"varint,1,opt,name=type,enum=search.Scope_Type" json:"type,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Scope) Reset() { *m = Scope{} }
+func (m *Scope) String() string { return proto.CompactTextString(m) }
+func (*Scope) ProtoMessage() {}
+func (*Scope) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *Scope) GetType() Scope_Type {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Scope_USER_BY_CANONICAL_ID
+}
+
+func (m *Scope) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Entry struct {
+ Scope *Scope `protobuf:"bytes,1,opt,name=scope" json:"scope,omitempty"`
+ Permission *Entry_Permission `protobuf:"varint,2,opt,name=permission,enum=search.Entry_Permission" json:"permission,omitempty"`
+ DisplayName *string `protobuf:"bytes,3,opt,name=display_name,json=displayName" json:"display_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Entry) Reset() { *m = Entry{} }
+func (m *Entry) String() string { return proto.CompactTextString(m) }
+func (*Entry) ProtoMessage() {}
+func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *Entry) GetScope() *Scope {
+ if m != nil {
+ return m.Scope
+ }
+ return nil
+}
+
+func (m *Entry) GetPermission() Entry_Permission {
+ if m != nil && m.Permission != nil {
+ return *m.Permission
+ }
+ return Entry_READ
+}
+
+func (m *Entry) GetDisplayName() string {
+ if m != nil && m.DisplayName != nil {
+ return *m.DisplayName
+ }
+ return ""
+}
+
+type AccessControlList struct {
+ Owner *string `protobuf:"bytes,1,opt,name=owner" json:"owner,omitempty"`
+ Entries []*Entry `protobuf:"bytes,2,rep,name=entries" json:"entries,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AccessControlList) Reset() { *m = AccessControlList{} }
+func (m *AccessControlList) String() string { return proto.CompactTextString(m) }
+func (*AccessControlList) ProtoMessage() {}
+func (*AccessControlList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+func (m *AccessControlList) GetOwner() string {
+ if m != nil && m.Owner != nil {
+ return *m.Owner
+ }
+ return ""
+}
+
+func (m *AccessControlList) GetEntries() []*Entry {
+ if m != nil {
+ return m.Entries
+ }
+ return nil
+}
+
+type FieldValue struct {
+ Type *FieldValue_ContentType `protobuf:"varint,1,opt,name=type,enum=search.FieldValue_ContentType,def=0" json:"type,omitempty"`
+ Language *string `protobuf:"bytes,2,opt,name=language,def=en" json:"language,omitempty"`
+ StringValue *string `protobuf:"bytes,3,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
+ Geo *FieldValue_Geo `protobuf:"group,4,opt,name=Geo,json=geo" json:"geo,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldValue) Reset() { *m = FieldValue{} }
+func (m *FieldValue) String() string { return proto.CompactTextString(m) }
+func (*FieldValue) ProtoMessage() {}
+func (*FieldValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+const Default_FieldValue_Type FieldValue_ContentType = FieldValue_TEXT
+const Default_FieldValue_Language string = "en"
+
+func (m *FieldValue) GetType() FieldValue_ContentType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_FieldValue_Type
+}
+
+func (m *FieldValue) GetLanguage() string {
+ if m != nil && m.Language != nil {
+ return *m.Language
+ }
+ return Default_FieldValue_Language
+}
+
+func (m *FieldValue) GetStringValue() string {
+ if m != nil && m.StringValue != nil {
+ return *m.StringValue
+ }
+ return ""
+}
+
+func (m *FieldValue) GetGeo() *FieldValue_Geo {
+ if m != nil {
+ return m.Geo
+ }
+ return nil
+}
+
+type FieldValue_Geo struct {
+ Lat *float64 `protobuf:"fixed64,5,req,name=lat" json:"lat,omitempty"`
+ Lng *float64 `protobuf:"fixed64,6,req,name=lng" json:"lng,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldValue_Geo) Reset() { *m = FieldValue_Geo{} }
+func (m *FieldValue_Geo) String() string { return proto.CompactTextString(m) }
+func (*FieldValue_Geo) ProtoMessage() {}
+func (*FieldValue_Geo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} }
+
+func (m *FieldValue_Geo) GetLat() float64 {
+ if m != nil && m.Lat != nil {
+ return *m.Lat
+ }
+ return 0
+}
+
+func (m *FieldValue_Geo) GetLng() float64 {
+ if m != nil && m.Lng != nil {
+ return *m.Lng
+ }
+ return 0
+}
+
+type Field struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Value *FieldValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Field) Reset() { *m = Field{} }
+func (m *Field) String() string { return proto.CompactTextString(m) }
+func (*Field) ProtoMessage() {}
+func (*Field) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+
+func (m *Field) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Field) GetValue() *FieldValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type FieldTypes struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Type []FieldValue_ContentType `protobuf:"varint,2,rep,name=type,enum=search.FieldValue_ContentType" json:"type,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldTypes) Reset() { *m = FieldTypes{} }
+func (m *FieldTypes) String() string { return proto.CompactTextString(m) }
+func (*FieldTypes) ProtoMessage() {}
+func (*FieldTypes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+
+func (m *FieldTypes) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FieldTypes) GetType() []FieldValue_ContentType {
+ if m != nil {
+ return m.Type
+ }
+ return nil
+}
+
+type IndexShardSettings struct {
+ PrevNumShards []int32 `protobuf:"varint,1,rep,name=prev_num_shards,json=prevNumShards" json:"prev_num_shards,omitempty"`
+ NumShards *int32 `protobuf:"varint,2,req,name=num_shards,json=numShards,def=1" json:"num_shards,omitempty"`
+ PrevNumShardsSearchFalse []int32 `protobuf:"varint,3,rep,name=prev_num_shards_search_false,json=prevNumShardsSearchFalse" json:"prev_num_shards_search_false,omitempty"`
+ LocalReplica *string `protobuf:"bytes,4,opt,name=local_replica,json=localReplica,def=" json:"local_replica,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexShardSettings) Reset() { *m = IndexShardSettings{} }
+func (m *IndexShardSettings) String() string { return proto.CompactTextString(m) }
+func (*IndexShardSettings) ProtoMessage() {}
+func (*IndexShardSettings) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+
+const Default_IndexShardSettings_NumShards int32 = 1
+
+func (m *IndexShardSettings) GetPrevNumShards() []int32 {
+ if m != nil {
+ return m.PrevNumShards
+ }
+ return nil
+}
+
+func (m *IndexShardSettings) GetNumShards() int32 {
+ if m != nil && m.NumShards != nil {
+ return *m.NumShards
+ }
+ return Default_IndexShardSettings_NumShards
+}
+
+func (m *IndexShardSettings) GetPrevNumShardsSearchFalse() []int32 {
+ if m != nil {
+ return m.PrevNumShardsSearchFalse
+ }
+ return nil
+}
+
+func (m *IndexShardSettings) GetLocalReplica() string {
+ if m != nil && m.LocalReplica != nil {
+ return *m.LocalReplica
+ }
+ return ""
+}
+
+type FacetValue struct {
+ Type *FacetValue_ContentType `protobuf:"varint,1,opt,name=type,enum=search.FacetValue_ContentType,def=2" json:"type,omitempty"`
+ StringValue *string `protobuf:"bytes,3,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetValue) Reset() { *m = FacetValue{} }
+func (m *FacetValue) String() string { return proto.CompactTextString(m) }
+func (*FacetValue) ProtoMessage() {}
+func (*FacetValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+
+const Default_FacetValue_Type FacetValue_ContentType = FacetValue_ATOM
+
+func (m *FacetValue) GetType() FacetValue_ContentType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_FacetValue_Type
+}
+
+func (m *FacetValue) GetStringValue() string {
+ if m != nil && m.StringValue != nil {
+ return *m.StringValue
+ }
+ return ""
+}
+
+type Facet struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Value *FacetValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Facet) Reset() { *m = Facet{} }
+func (m *Facet) String() string { return proto.CompactTextString(m) }
+func (*Facet) ProtoMessage() {}
+func (*Facet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+
+func (m *Facet) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Facet) GetValue() *FacetValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type DocumentMetadata struct {
+ Version *int64 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"`
+ CommittedStVersion *int64 `protobuf:"varint,2,opt,name=committed_st_version,json=committedStVersion" json:"committed_st_version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DocumentMetadata) Reset() { *m = DocumentMetadata{} }
+func (m *DocumentMetadata) String() string { return proto.CompactTextString(m) }
+func (*DocumentMetadata) ProtoMessage() {}
+func (*DocumentMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+
+func (m *DocumentMetadata) GetVersion() int64 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+func (m *DocumentMetadata) GetCommittedStVersion() int64 {
+ if m != nil && m.CommittedStVersion != nil {
+ return *m.CommittedStVersion
+ }
+ return 0
+}
+
+type Document struct {
+ Id *string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
+ Language *string `protobuf:"bytes,2,opt,name=language,def=en" json:"language,omitempty"`
+ Field []*Field `protobuf:"bytes,3,rep,name=field" json:"field,omitempty"`
+ OrderId *int32 `protobuf:"varint,4,opt,name=order_id,json=orderId" json:"order_id,omitempty"`
+ OrderIdSource *Document_OrderIdSource `protobuf:"varint,6,opt,name=order_id_source,json=orderIdSource,enum=search.Document_OrderIdSource,def=1" json:"order_id_source,omitempty"`
+ Storage *Document_Storage `protobuf:"varint,5,opt,name=storage,enum=search.Document_Storage,def=0" json:"storage,omitempty"`
+ Facet []*Facet `protobuf:"bytes,8,rep,name=facet" json:"facet,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Document) Reset() { *m = Document{} }
+func (m *Document) String() string { return proto.CompactTextString(m) }
+func (*Document) ProtoMessage() {}
+func (*Document) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+
+const Default_Document_Language string = "en"
+const Default_Document_OrderIdSource Document_OrderIdSource = Document_SUPPLIED
+const Default_Document_Storage Document_Storage = Document_DISK
+
+func (m *Document) GetId() string {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return ""
+}
+
+func (m *Document) GetLanguage() string {
+ if m != nil && m.Language != nil {
+ return *m.Language
+ }
+ return Default_Document_Language
+}
+
+func (m *Document) GetField() []*Field {
+ if m != nil {
+ return m.Field
+ }
+ return nil
+}
+
+func (m *Document) GetOrderId() int32 {
+ if m != nil && m.OrderId != nil {
+ return *m.OrderId
+ }
+ return 0
+}
+
+func (m *Document) GetOrderIdSource() Document_OrderIdSource {
+ if m != nil && m.OrderIdSource != nil {
+ return *m.OrderIdSource
+ }
+ return Default_Document_OrderIdSource
+}
+
+func (m *Document) GetStorage() Document_Storage {
+ if m != nil && m.Storage != nil {
+ return *m.Storage
+ }
+ return Default_Document_Storage
+}
+
+func (m *Document) GetFacet() []*Facet {
+ if m != nil {
+ return m.Facet
+ }
+ return nil
+}
+
+type SearchServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchServiceError) Reset() { *m = SearchServiceError{} }
+func (m *SearchServiceError) String() string { return proto.CompactTextString(m) }
+func (*SearchServiceError) ProtoMessage() {}
+func (*SearchServiceError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+
+type RequestStatus struct {
+ Code *SearchServiceError_ErrorCode `protobuf:"varint,1,req,name=code,enum=search.SearchServiceError_ErrorCode" json:"code,omitempty"`
+ ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail,json=errorDetail" json:"error_detail,omitempty"`
+ CanonicalCode *int32 `protobuf:"varint,3,opt,name=canonical_code,json=canonicalCode" json:"canonical_code,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RequestStatus) Reset() { *m = RequestStatus{} }
+func (m *RequestStatus) String() string { return proto.CompactTextString(m) }
+func (*RequestStatus) ProtoMessage() {}
+func (*RequestStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+
+func (m *RequestStatus) GetCode() SearchServiceError_ErrorCode {
+ if m != nil && m.Code != nil {
+ return *m.Code
+ }
+ return SearchServiceError_OK
+}
+
+func (m *RequestStatus) GetErrorDetail() string {
+ if m != nil && m.ErrorDetail != nil {
+ return *m.ErrorDetail
+ }
+ return ""
+}
+
+func (m *RequestStatus) GetCanonicalCode() int32 {
+ if m != nil && m.CanonicalCode != nil {
+ return *m.CanonicalCode
+ }
+ return 0
+}
+
+type IndexSpec struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Consistency *IndexSpec_Consistency `protobuf:"varint,2,opt,name=consistency,enum=search.IndexSpec_Consistency,def=1" json:"consistency,omitempty"`
+ Namespace *string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"`
+ Version *int32 `protobuf:"varint,4,opt,name=version" json:"version,omitempty"`
+ Source *IndexSpec_Source `protobuf:"varint,5,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"`
+ Mode *IndexSpec_Mode `protobuf:"varint,6,opt,name=mode,enum=search.IndexSpec_Mode,def=0" json:"mode,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexSpec) Reset() { *m = IndexSpec{} }
+func (m *IndexSpec) String() string { return proto.CompactTextString(m) }
+func (*IndexSpec) ProtoMessage() {}
+func (*IndexSpec) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+
+const Default_IndexSpec_Consistency IndexSpec_Consistency = IndexSpec_PER_DOCUMENT
+const Default_IndexSpec_Source IndexSpec_Source = IndexSpec_SEARCH
+const Default_IndexSpec_Mode IndexSpec_Mode = IndexSpec_PRIORITY
+
+func (m *IndexSpec) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *IndexSpec) GetConsistency() IndexSpec_Consistency {
+ if m != nil && m.Consistency != nil {
+ return *m.Consistency
+ }
+ return Default_IndexSpec_Consistency
+}
+
+func (m *IndexSpec) GetNamespace() string {
+ if m != nil && m.Namespace != nil {
+ return *m.Namespace
+ }
+ return ""
+}
+
+func (m *IndexSpec) GetVersion() int32 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+func (m *IndexSpec) GetSource() IndexSpec_Source {
+ if m != nil && m.Source != nil {
+ return *m.Source
+ }
+ return Default_IndexSpec_Source
+}
+
+func (m *IndexSpec) GetMode() IndexSpec_Mode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_IndexSpec_Mode
+}
+
+type IndexMetadata struct {
+ IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec,json=indexSpec" json:"index_spec,omitempty"`
+ Field []*FieldTypes `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
+ Storage *IndexMetadata_Storage `protobuf:"bytes,3,opt,name=storage" json:"storage,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexMetadata) Reset() { *m = IndexMetadata{} }
+func (m *IndexMetadata) String() string { return proto.CompactTextString(m) }
+func (*IndexMetadata) ProtoMessage() {}
+func (*IndexMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+
+func (m *IndexMetadata) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+func (m *IndexMetadata) GetField() []*FieldTypes {
+ if m != nil {
+ return m.Field
+ }
+ return nil
+}
+
+func (m *IndexMetadata) GetStorage() *IndexMetadata_Storage {
+ if m != nil {
+ return m.Storage
+ }
+ return nil
+}
+
+type IndexMetadata_Storage struct {
+ AmountUsed *int64 `protobuf:"varint,1,opt,name=amount_used,json=amountUsed" json:"amount_used,omitempty"`
+ Limit *int64 `protobuf:"varint,2,opt,name=limit" json:"limit,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexMetadata_Storage) Reset() { *m = IndexMetadata_Storage{} }
+func (m *IndexMetadata_Storage) String() string { return proto.CompactTextString(m) }
+func (*IndexMetadata_Storage) ProtoMessage() {}
+func (*IndexMetadata_Storage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14, 0} }
+
+func (m *IndexMetadata_Storage) GetAmountUsed() int64 {
+ if m != nil && m.AmountUsed != nil {
+ return *m.AmountUsed
+ }
+ return 0
+}
+
+func (m *IndexMetadata_Storage) GetLimit() int64 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+type IndexDocumentParams struct {
+ Document []*Document `protobuf:"bytes,1,rep,name=document" json:"document,omitempty"`
+ Freshness *IndexDocumentParams_Freshness `protobuf:"varint,2,opt,name=freshness,enum=search.IndexDocumentParams_Freshness,def=0" json:"freshness,omitempty"`
+ IndexSpec *IndexSpec `protobuf:"bytes,3,req,name=index_spec,json=indexSpec" json:"index_spec,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexDocumentParams) Reset() { *m = IndexDocumentParams{} }
+func (m *IndexDocumentParams) String() string { return proto.CompactTextString(m) }
+func (*IndexDocumentParams) ProtoMessage() {}
+func (*IndexDocumentParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+
+const Default_IndexDocumentParams_Freshness IndexDocumentParams_Freshness = IndexDocumentParams_SYNCHRONOUSLY
+
+func (m *IndexDocumentParams) GetDocument() []*Document {
+ if m != nil {
+ return m.Document
+ }
+ return nil
+}
+
+func (m *IndexDocumentParams) GetFreshness() IndexDocumentParams_Freshness {
+ if m != nil && m.Freshness != nil {
+ return *m.Freshness
+ }
+ return Default_IndexDocumentParams_Freshness
+}
+
+func (m *IndexDocumentParams) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+type IndexDocumentRequest struct {
+ Params *IndexDocumentParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id,json=appId" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexDocumentRequest) Reset() { *m = IndexDocumentRequest{} }
+func (m *IndexDocumentRequest) String() string { return proto.CompactTextString(m) }
+func (*IndexDocumentRequest) ProtoMessage() {}
+func (*IndexDocumentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
+
+func (m *IndexDocumentRequest) GetParams() *IndexDocumentParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *IndexDocumentRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type IndexDocumentResponse struct {
+ Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"`
+ DocId []string `protobuf:"bytes,2,rep,name=doc_id,json=docId" json:"doc_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexDocumentResponse) Reset() { *m = IndexDocumentResponse{} }
+func (m *IndexDocumentResponse) String() string { return proto.CompactTextString(m) }
+func (*IndexDocumentResponse) ProtoMessage() {}
+func (*IndexDocumentResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
+
+func (m *IndexDocumentResponse) GetStatus() []*RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *IndexDocumentResponse) GetDocId() []string {
+ if m != nil {
+ return m.DocId
+ }
+ return nil
+}
+
+type DeleteDocumentParams struct {
+ DocId []string `protobuf:"bytes,1,rep,name=doc_id,json=docId" json:"doc_id,omitempty"`
+ IndexSpec *IndexSpec `protobuf:"bytes,2,req,name=index_spec,json=indexSpec" json:"index_spec,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteDocumentParams) Reset() { *m = DeleteDocumentParams{} }
+func (m *DeleteDocumentParams) String() string { return proto.CompactTextString(m) }
+func (*DeleteDocumentParams) ProtoMessage() {}
+func (*DeleteDocumentParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
+
+func (m *DeleteDocumentParams) GetDocId() []string {
+ if m != nil {
+ return m.DocId
+ }
+ return nil
+}
+
+func (m *DeleteDocumentParams) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+type DeleteDocumentRequest struct {
+ Params *DeleteDocumentParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id,json=appId" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteDocumentRequest) Reset() { *m = DeleteDocumentRequest{} }
+func (m *DeleteDocumentRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteDocumentRequest) ProtoMessage() {}
+func (*DeleteDocumentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
+
+func (m *DeleteDocumentRequest) GetParams() *DeleteDocumentParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *DeleteDocumentRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type DeleteDocumentResponse struct {
+ Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteDocumentResponse) Reset() { *m = DeleteDocumentResponse{} }
+func (m *DeleteDocumentResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteDocumentResponse) ProtoMessage() {}
+func (*DeleteDocumentResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
+
+func (m *DeleteDocumentResponse) GetStatus() []*RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+type ListDocumentsParams struct {
+ IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec,json=indexSpec" json:"index_spec,omitempty"`
+ StartDocId *string `protobuf:"bytes,2,opt,name=start_doc_id,json=startDocId" json:"start_doc_id,omitempty"`
+ IncludeStartDoc *bool `protobuf:"varint,3,opt,name=include_start_doc,json=includeStartDoc,def=1" json:"include_start_doc,omitempty"`
+ Limit *int32 `protobuf:"varint,4,opt,name=limit,def=100" json:"limit,omitempty"`
+ KeysOnly *bool `protobuf:"varint,5,opt,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListDocumentsParams) Reset() { *m = ListDocumentsParams{} }
+func (m *ListDocumentsParams) String() string { return proto.CompactTextString(m) }
+func (*ListDocumentsParams) ProtoMessage() {}
+func (*ListDocumentsParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
+
+const Default_ListDocumentsParams_IncludeStartDoc bool = true
+const Default_ListDocumentsParams_Limit int32 = 100
+
+func (m *ListDocumentsParams) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+func (m *ListDocumentsParams) GetStartDocId() string {
+ if m != nil && m.StartDocId != nil {
+ return *m.StartDocId
+ }
+ return ""
+}
+
+func (m *ListDocumentsParams) GetIncludeStartDoc() bool {
+ if m != nil && m.IncludeStartDoc != nil {
+ return *m.IncludeStartDoc
+ }
+ return Default_ListDocumentsParams_IncludeStartDoc
+}
+
+func (m *ListDocumentsParams) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return Default_ListDocumentsParams_Limit
+}
+
+func (m *ListDocumentsParams) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+type ListDocumentsRequest struct {
+ Params *ListDocumentsParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,2,opt,name=app_id,json=appId" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListDocumentsRequest) Reset() { *m = ListDocumentsRequest{} }
+func (m *ListDocumentsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListDocumentsRequest) ProtoMessage() {}
+func (*ListDocumentsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
+
+func (m *ListDocumentsRequest) GetParams() *ListDocumentsParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *ListDocumentsRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type ListDocumentsResponse struct {
+ Status *RequestStatus `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
+ Document []*Document `protobuf:"bytes,2,rep,name=document" json:"document,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListDocumentsResponse) Reset() { *m = ListDocumentsResponse{} }
+func (m *ListDocumentsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListDocumentsResponse) ProtoMessage() {}
+func (*ListDocumentsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
+
+func (m *ListDocumentsResponse) GetStatus() *RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *ListDocumentsResponse) GetDocument() []*Document {
+ if m != nil {
+ return m.Document
+ }
+ return nil
+}
+
+type ListIndexesParams struct {
+ FetchSchema *bool `protobuf:"varint,1,opt,name=fetch_schema,json=fetchSchema" json:"fetch_schema,omitempty"`
+ Limit *int32 `protobuf:"varint,2,opt,name=limit,def=20" json:"limit,omitempty"`
+ Namespace *string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"`
+ StartIndexName *string `protobuf:"bytes,4,opt,name=start_index_name,json=startIndexName" json:"start_index_name,omitempty"`
+ IncludeStartIndex *bool `protobuf:"varint,5,opt,name=include_start_index,json=includeStartIndex,def=1" json:"include_start_index,omitempty"`
+ IndexNamePrefix *string `protobuf:"bytes,6,opt,name=index_name_prefix,json=indexNamePrefix" json:"index_name_prefix,omitempty"`
+ Offset *int32 `protobuf:"varint,7,opt,name=offset" json:"offset,omitempty"`
+ Source *IndexSpec_Source `protobuf:"varint,8,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListIndexesParams) Reset() { *m = ListIndexesParams{} }
+func (m *ListIndexesParams) String() string { return proto.CompactTextString(m) }
+func (*ListIndexesParams) ProtoMessage() {}
+func (*ListIndexesParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
+
+const Default_ListIndexesParams_Limit int32 = 20
+const Default_ListIndexesParams_IncludeStartIndex bool = true
+const Default_ListIndexesParams_Source IndexSpec_Source = IndexSpec_SEARCH
+
+func (m *ListIndexesParams) GetFetchSchema() bool {
+ if m != nil && m.FetchSchema != nil {
+ return *m.FetchSchema
+ }
+ return false
+}
+
+func (m *ListIndexesParams) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return Default_ListIndexesParams_Limit
+}
+
+func (m *ListIndexesParams) GetNamespace() string {
+ if m != nil && m.Namespace != nil {
+ return *m.Namespace
+ }
+ return ""
+}
+
+func (m *ListIndexesParams) GetStartIndexName() string {
+ if m != nil && m.StartIndexName != nil {
+ return *m.StartIndexName
+ }
+ return ""
+}
+
+func (m *ListIndexesParams) GetIncludeStartIndex() bool {
+ if m != nil && m.IncludeStartIndex != nil {
+ return *m.IncludeStartIndex
+ }
+ return Default_ListIndexesParams_IncludeStartIndex
+}
+
+func (m *ListIndexesParams) GetIndexNamePrefix() string {
+ if m != nil && m.IndexNamePrefix != nil {
+ return *m.IndexNamePrefix
+ }
+ return ""
+}
+
+func (m *ListIndexesParams) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return 0
+}
+
+func (m *ListIndexesParams) GetSource() IndexSpec_Source {
+ if m != nil && m.Source != nil {
+ return *m.Source
+ }
+ return Default_ListIndexesParams_Source
+}
+
+type ListIndexesRequest struct {
+ Params *ListIndexesParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id,json=appId" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListIndexesRequest) Reset() { *m = ListIndexesRequest{} }
+func (m *ListIndexesRequest) String() string { return proto.CompactTextString(m) }
+func (*ListIndexesRequest) ProtoMessage() {}
+func (*ListIndexesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
+
+func (m *ListIndexesRequest) GetParams() *ListIndexesParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *ListIndexesRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type ListIndexesResponse struct {
+ Status *RequestStatus `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
+ IndexMetadata []*IndexMetadata `protobuf:"bytes,2,rep,name=index_metadata,json=indexMetadata" json:"index_metadata,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListIndexesResponse) Reset() { *m = ListIndexesResponse{} }
+func (m *ListIndexesResponse) String() string { return proto.CompactTextString(m) }
+func (*ListIndexesResponse) ProtoMessage() {}
+func (*ListIndexesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
+
+func (m *ListIndexesResponse) GetStatus() *RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *ListIndexesResponse) GetIndexMetadata() []*IndexMetadata {
+ if m != nil {
+ return m.IndexMetadata
+ }
+ return nil
+}
+
+type DeleteSchemaParams struct {
+ Source *IndexSpec_Source `protobuf:"varint,1,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"`
+ IndexSpec []*IndexSpec `protobuf:"bytes,2,rep,name=index_spec,json=indexSpec" json:"index_spec,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteSchemaParams) Reset() { *m = DeleteSchemaParams{} }
+func (m *DeleteSchemaParams) String() string { return proto.CompactTextString(m) }
+func (*DeleteSchemaParams) ProtoMessage() {}
+func (*DeleteSchemaParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
+
+const Default_DeleteSchemaParams_Source IndexSpec_Source = IndexSpec_SEARCH
+
+func (m *DeleteSchemaParams) GetSource() IndexSpec_Source {
+ if m != nil && m.Source != nil {
+ return *m.Source
+ }
+ return Default_DeleteSchemaParams_Source
+}
+
+func (m *DeleteSchemaParams) GetIndexSpec() []*IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+type DeleteSchemaRequest struct {
+ Params *DeleteSchemaParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id,json=appId" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteSchemaRequest) Reset() { *m = DeleteSchemaRequest{} }
+func (m *DeleteSchemaRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteSchemaRequest) ProtoMessage() {}
+func (*DeleteSchemaRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
+
+func (m *DeleteSchemaRequest) GetParams() *DeleteSchemaParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *DeleteSchemaRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type DeleteSchemaResponse struct {
+ Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteSchemaResponse) Reset() { *m = DeleteSchemaResponse{} }
+func (m *DeleteSchemaResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteSchemaResponse) ProtoMessage() {}
+func (*DeleteSchemaResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
+
+func (m *DeleteSchemaResponse) GetStatus() []*RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+type SortSpec struct {
+ SortExpression *string `protobuf:"bytes,1,req,name=sort_expression,json=sortExpression" json:"sort_expression,omitempty"`
+ SortDescending *bool `protobuf:"varint,2,opt,name=sort_descending,json=sortDescending,def=1" json:"sort_descending,omitempty"`
+ DefaultValueText *string `protobuf:"bytes,4,opt,name=default_value_text,json=defaultValueText" json:"default_value_text,omitempty"`
+ DefaultValueNumeric *float64 `protobuf:"fixed64,5,opt,name=default_value_numeric,json=defaultValueNumeric" json:"default_value_numeric,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SortSpec) Reset() { *m = SortSpec{} }
+func (m *SortSpec) String() string { return proto.CompactTextString(m) }
+func (*SortSpec) ProtoMessage() {}
+func (*SortSpec) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
+
+const Default_SortSpec_SortDescending bool = true
+
+func (m *SortSpec) GetSortExpression() string {
+ if m != nil && m.SortExpression != nil {
+ return *m.SortExpression
+ }
+ return ""
+}
+
+func (m *SortSpec) GetSortDescending() bool {
+ if m != nil && m.SortDescending != nil {
+ return *m.SortDescending
+ }
+ return Default_SortSpec_SortDescending
+}
+
+func (m *SortSpec) GetDefaultValueText() string {
+ if m != nil && m.DefaultValueText != nil {
+ return *m.DefaultValueText
+ }
+ return ""
+}
+
+func (m *SortSpec) GetDefaultValueNumeric() float64 {
+ if m != nil && m.DefaultValueNumeric != nil {
+ return *m.DefaultValueNumeric
+ }
+ return 0
+}
+
+type ScorerSpec struct {
+ Scorer *ScorerSpec_Scorer `protobuf:"varint,1,opt,name=scorer,enum=search.ScorerSpec_Scorer,def=2" json:"scorer,omitempty"`
+ Limit *int32 `protobuf:"varint,2,opt,name=limit,def=1000" json:"limit,omitempty"`
+ MatchScorerParameters *string `protobuf:"bytes,9,opt,name=match_scorer_parameters,json=matchScorerParameters" json:"match_scorer_parameters,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ScorerSpec) Reset() { *m = ScorerSpec{} }
+func (m *ScorerSpec) String() string { return proto.CompactTextString(m) }
+func (*ScorerSpec) ProtoMessage() {}
+func (*ScorerSpec) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} }
+
+const Default_ScorerSpec_Scorer ScorerSpec_Scorer = ScorerSpec_MATCH_SCORER
+const Default_ScorerSpec_Limit int32 = 1000
+
+func (m *ScorerSpec) GetScorer() ScorerSpec_Scorer {
+ if m != nil && m.Scorer != nil {
+ return *m.Scorer
+ }
+ return Default_ScorerSpec_Scorer
+}
+
+func (m *ScorerSpec) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return Default_ScorerSpec_Limit
+}
+
+func (m *ScorerSpec) GetMatchScorerParameters() string {
+ if m != nil && m.MatchScorerParameters != nil {
+ return *m.MatchScorerParameters
+ }
+ return ""
+}
+
+type FieldSpec struct {
+ Name []string `protobuf:"bytes,1,rep,name=name" json:"name,omitempty"`
+ Expression []*FieldSpec_Expression `protobuf:"group,2,rep,name=Expression,json=expression" json:"expression,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldSpec) Reset() { *m = FieldSpec{} }
+func (m *FieldSpec) String() string { return proto.CompactTextString(m) }
+func (*FieldSpec) ProtoMessage() {}
+func (*FieldSpec) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} }
+
+func (m *FieldSpec) GetName() []string {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *FieldSpec) GetExpression() []*FieldSpec_Expression {
+ if m != nil {
+ return m.Expression
+ }
+ return nil
+}
+
+type FieldSpec_Expression struct {
+ Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+ Expression *string `protobuf:"bytes,4,req,name=expression" json:"expression,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldSpec_Expression) Reset() { *m = FieldSpec_Expression{} }
+func (m *FieldSpec_Expression) String() string { return proto.CompactTextString(m) }
+func (*FieldSpec_Expression) ProtoMessage() {}
+func (*FieldSpec_Expression) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32, 0} }
+
+func (m *FieldSpec_Expression) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FieldSpec_Expression) GetExpression() string {
+ if m != nil && m.Expression != nil {
+ return *m.Expression
+ }
+ return ""
+}
+
+type FacetRange struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Start *string `protobuf:"bytes,2,opt,name=start" json:"start,omitempty"`
+ End *string `protobuf:"bytes,3,opt,name=end" json:"end,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRange) Reset() { *m = FacetRange{} }
+func (m *FacetRange) String() string { return proto.CompactTextString(m) }
+func (*FacetRange) ProtoMessage() {}
+func (*FacetRange) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} }
+
+func (m *FacetRange) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetRange) GetStart() string {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return ""
+}
+
+func (m *FacetRange) GetEnd() string {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return ""
+}
+
+type FacetRequestParam struct {
+ ValueLimit *int32 `protobuf:"varint,1,opt,name=value_limit,json=valueLimit" json:"value_limit,omitempty"`
+ Range []*FacetRange `protobuf:"bytes,2,rep,name=range" json:"range,omitempty"`
+ ValueConstraint []string `protobuf:"bytes,3,rep,name=value_constraint,json=valueConstraint" json:"value_constraint,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRequestParam) Reset() { *m = FacetRequestParam{} }
+func (m *FacetRequestParam) String() string { return proto.CompactTextString(m) }
+func (*FacetRequestParam) ProtoMessage() {}
+func (*FacetRequestParam) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} }
+
+func (m *FacetRequestParam) GetValueLimit() int32 {
+ if m != nil && m.ValueLimit != nil {
+ return *m.ValueLimit
+ }
+ return 0
+}
+
+func (m *FacetRequestParam) GetRange() []*FacetRange {
+ if m != nil {
+ return m.Range
+ }
+ return nil
+}
+
+func (m *FacetRequestParam) GetValueConstraint() []string {
+ if m != nil {
+ return m.ValueConstraint
+ }
+ return nil
+}
+
+type FacetAutoDetectParam struct {
+ ValueLimit *int32 `protobuf:"varint,1,opt,name=value_limit,json=valueLimit,def=10" json:"value_limit,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetAutoDetectParam) Reset() { *m = FacetAutoDetectParam{} }
+func (m *FacetAutoDetectParam) String() string { return proto.CompactTextString(m) }
+func (*FacetAutoDetectParam) ProtoMessage() {}
+func (*FacetAutoDetectParam) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} }
+
+const Default_FacetAutoDetectParam_ValueLimit int32 = 10
+
+func (m *FacetAutoDetectParam) GetValueLimit() int32 {
+ if m != nil && m.ValueLimit != nil {
+ return *m.ValueLimit
+ }
+ return Default_FacetAutoDetectParam_ValueLimit
+}
+
+type FacetRequest struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Params *FacetRequestParam `protobuf:"bytes,2,opt,name=params" json:"params,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRequest) Reset() { *m = FacetRequest{} }
+func (m *FacetRequest) String() string { return proto.CompactTextString(m) }
+func (*FacetRequest) ProtoMessage() {}
+func (*FacetRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} }
+
+func (m *FacetRequest) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetRequest) GetParams() *FacetRequestParam {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+type FacetRefinement struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ Range *FacetRefinement_Range `protobuf:"bytes,3,opt,name=range" json:"range,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRefinement) Reset() { *m = FacetRefinement{} }
+func (m *FacetRefinement) String() string { return proto.CompactTextString(m) }
+func (*FacetRefinement) ProtoMessage() {}
+func (*FacetRefinement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} }
+
+func (m *FacetRefinement) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetRefinement) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+func (m *FacetRefinement) GetRange() *FacetRefinement_Range {
+ if m != nil {
+ return m.Range
+ }
+ return nil
+}
+
+type FacetRefinement_Range struct {
+ Start *string `protobuf:"bytes,1,opt,name=start" json:"start,omitempty"`
+ End *string `protobuf:"bytes,2,opt,name=end" json:"end,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRefinement_Range) Reset() { *m = FacetRefinement_Range{} }
+func (m *FacetRefinement_Range) String() string { return proto.CompactTextString(m) }
+func (*FacetRefinement_Range) ProtoMessage() {}
+func (*FacetRefinement_Range) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37, 0} }
+
+func (m *FacetRefinement_Range) GetStart() string {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return ""
+}
+
+func (m *FacetRefinement_Range) GetEnd() string {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return ""
+}
+
+type SearchParams struct {
+ IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec,json=indexSpec" json:"index_spec,omitempty"`
+ Query *string `protobuf:"bytes,2,req,name=query" json:"query,omitempty"`
+ Cursor *string `protobuf:"bytes,4,opt,name=cursor" json:"cursor,omitempty"`
+ Offset *int32 `protobuf:"varint,11,opt,name=offset" json:"offset,omitempty"`
+ CursorType *SearchParams_CursorType `protobuf:"varint,5,opt,name=cursor_type,json=cursorType,enum=search.SearchParams_CursorType,def=0" json:"cursor_type,omitempty"`
+ Limit *int32 `protobuf:"varint,6,opt,name=limit,def=20" json:"limit,omitempty"`
+ MatchedCountAccuracy *int32 `protobuf:"varint,7,opt,name=matched_count_accuracy,json=matchedCountAccuracy" json:"matched_count_accuracy,omitempty"`
+ SortSpec []*SortSpec `protobuf:"bytes,8,rep,name=sort_spec,json=sortSpec" json:"sort_spec,omitempty"`
+ ScorerSpec *ScorerSpec `protobuf:"bytes,9,opt,name=scorer_spec,json=scorerSpec" json:"scorer_spec,omitempty"`
+ FieldSpec *FieldSpec `protobuf:"bytes,10,opt,name=field_spec,json=fieldSpec" json:"field_spec,omitempty"`
+ KeysOnly *bool `protobuf:"varint,12,opt,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
+ ParsingMode *SearchParams_ParsingMode `protobuf:"varint,13,opt,name=parsing_mode,json=parsingMode,enum=search.SearchParams_ParsingMode,def=0" json:"parsing_mode,omitempty"`
+ AutoDiscoverFacetCount *int32 `protobuf:"varint,15,opt,name=auto_discover_facet_count,json=autoDiscoverFacetCount,def=0" json:"auto_discover_facet_count,omitempty"`
+ IncludeFacet []*FacetRequest `protobuf:"bytes,16,rep,name=include_facet,json=includeFacet" json:"include_facet,omitempty"`
+ FacetRefinement []*FacetRefinement `protobuf:"bytes,17,rep,name=facet_refinement,json=facetRefinement" json:"facet_refinement,omitempty"`
+ FacetAutoDetectParam *FacetAutoDetectParam `protobuf:"bytes,18,opt,name=facet_auto_detect_param,json=facetAutoDetectParam" json:"facet_auto_detect_param,omitempty"`
+ FacetDepth *int32 `protobuf:"varint,19,opt,name=facet_depth,json=facetDepth,def=1000" json:"facet_depth,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchParams) Reset() { *m = SearchParams{} }
+func (m *SearchParams) String() string { return proto.CompactTextString(m) }
+func (*SearchParams) ProtoMessage() {}
+func (*SearchParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} }
+
+const Default_SearchParams_CursorType SearchParams_CursorType = SearchParams_NONE
+const Default_SearchParams_Limit int32 = 20
+const Default_SearchParams_ParsingMode SearchParams_ParsingMode = SearchParams_STRICT
+const Default_SearchParams_AutoDiscoverFacetCount int32 = 0
+const Default_SearchParams_FacetDepth int32 = 1000
+
+func (m *SearchParams) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+func (m *SearchParams) GetQuery() string {
+ if m != nil && m.Query != nil {
+ return *m.Query
+ }
+ return ""
+}
+
+func (m *SearchParams) GetCursor() string {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return ""
+}
+
+func (m *SearchParams) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return 0
+}
+
+func (m *SearchParams) GetCursorType() SearchParams_CursorType {
+ if m != nil && m.CursorType != nil {
+ return *m.CursorType
+ }
+ return Default_SearchParams_CursorType
+}
+
+func (m *SearchParams) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return Default_SearchParams_Limit
+}
+
+func (m *SearchParams) GetMatchedCountAccuracy() int32 {
+ if m != nil && m.MatchedCountAccuracy != nil {
+ return *m.MatchedCountAccuracy
+ }
+ return 0
+}
+
+func (m *SearchParams) GetSortSpec() []*SortSpec {
+ if m != nil {
+ return m.SortSpec
+ }
+ return nil
+}
+
+func (m *SearchParams) GetScorerSpec() *ScorerSpec {
+ if m != nil {
+ return m.ScorerSpec
+ }
+ return nil
+}
+
+func (m *SearchParams) GetFieldSpec() *FieldSpec {
+ if m != nil {
+ return m.FieldSpec
+ }
+ return nil
+}
+
+func (m *SearchParams) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+func (m *SearchParams) GetParsingMode() SearchParams_ParsingMode {
+ if m != nil && m.ParsingMode != nil {
+ return *m.ParsingMode
+ }
+ return Default_SearchParams_ParsingMode
+}
+
+func (m *SearchParams) GetAutoDiscoverFacetCount() int32 {
+ if m != nil && m.AutoDiscoverFacetCount != nil {
+ return *m.AutoDiscoverFacetCount
+ }
+ return Default_SearchParams_AutoDiscoverFacetCount
+}
+
+func (m *SearchParams) GetIncludeFacet() []*FacetRequest {
+ if m != nil {
+ return m.IncludeFacet
+ }
+ return nil
+}
+
+func (m *SearchParams) GetFacetRefinement() []*FacetRefinement {
+ if m != nil {
+ return m.FacetRefinement
+ }
+ return nil
+}
+
+func (m *SearchParams) GetFacetAutoDetectParam() *FacetAutoDetectParam {
+ if m != nil {
+ return m.FacetAutoDetectParam
+ }
+ return nil
+}
+
+func (m *SearchParams) GetFacetDepth() int32 {
+ if m != nil && m.FacetDepth != nil {
+ return *m.FacetDepth
+ }
+ return Default_SearchParams_FacetDepth
+}
+
+type SearchRequest struct {
+ Params *SearchParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id,json=appId" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchRequest) Reset() { *m = SearchRequest{} }
+func (m *SearchRequest) String() string { return proto.CompactTextString(m) }
+func (*SearchRequest) ProtoMessage() {}
+func (*SearchRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} }
+
+func (m *SearchRequest) GetParams() *SearchParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *SearchRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type FacetResultValue struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Count *int32 `protobuf:"varint,2,req,name=count" json:"count,omitempty"`
+ Refinement *FacetRefinement `protobuf:"bytes,3,req,name=refinement" json:"refinement,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetResultValue) Reset() { *m = FacetResultValue{} }
+func (m *FacetResultValue) String() string { return proto.CompactTextString(m) }
+func (*FacetResultValue) ProtoMessage() {}
+func (*FacetResultValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} }
+
+func (m *FacetResultValue) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetResultValue) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *FacetResultValue) GetRefinement() *FacetRefinement {
+ if m != nil {
+ return m.Refinement
+ }
+ return nil
+}
+
+type FacetResult struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Value []*FacetResultValue `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetResult) Reset() { *m = FacetResult{} }
+func (m *FacetResult) String() string { return proto.CompactTextString(m) }
+func (*FacetResult) ProtoMessage() {}
+func (*FacetResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} }
+
+func (m *FacetResult) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetResult) GetValue() []*FacetResultValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type SearchResult struct {
+ Document *Document `protobuf:"bytes,1,req,name=document" json:"document,omitempty"`
+ Expression []*Field `protobuf:"bytes,4,rep,name=expression" json:"expression,omitempty"`
+ Score []float64 `protobuf:"fixed64,2,rep,name=score" json:"score,omitempty"`
+ Cursor *string `protobuf:"bytes,3,opt,name=cursor" json:"cursor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchResult) Reset() { *m = SearchResult{} }
+func (m *SearchResult) String() string { return proto.CompactTextString(m) }
+func (*SearchResult) ProtoMessage() {}
+func (*SearchResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} }
+
+func (m *SearchResult) GetDocument() *Document {
+ if m != nil {
+ return m.Document
+ }
+ return nil
+}
+
+func (m *SearchResult) GetExpression() []*Field {
+ if m != nil {
+ return m.Expression
+ }
+ return nil
+}
+
+func (m *SearchResult) GetScore() []float64 {
+ if m != nil {
+ return m.Score
+ }
+ return nil
+}
+
+func (m *SearchResult) GetCursor() string {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return ""
+}
+
+type SearchResponse struct {
+ Result []*SearchResult `protobuf:"bytes,1,rep,name=result" json:"result,omitempty"`
+ MatchedCount *int64 `protobuf:"varint,2,req,name=matched_count,json=matchedCount" json:"matched_count,omitempty"`
+ Status *RequestStatus `protobuf:"bytes,3,req,name=status" json:"status,omitempty"`
+ Cursor *string `protobuf:"bytes,4,opt,name=cursor" json:"cursor,omitempty"`
+ FacetResult []*FacetResult `protobuf:"bytes,5,rep,name=facet_result,json=facetResult" json:"facet_result,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchResponse) Reset() { *m = SearchResponse{} }
+func (m *SearchResponse) String() string { return proto.CompactTextString(m) }
+func (*SearchResponse) ProtoMessage() {}
+func (*SearchResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} }
+
+var extRange_SearchResponse = []proto.ExtensionRange{
+ {1000, 9999},
+}
+
+func (*SearchResponse) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_SearchResponse
+}
+
+func (m *SearchResponse) GetResult() []*SearchResult {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+func (m *SearchResponse) GetMatchedCount() int64 {
+ if m != nil && m.MatchedCount != nil {
+ return *m.MatchedCount
+ }
+ return 0
+}
+
+func (m *SearchResponse) GetStatus() *RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *SearchResponse) GetCursor() string {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return ""
+}
+
+func (m *SearchResponse) GetFacetResult() []*FacetResult {
+ if m != nil {
+ return m.FacetResult
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*Scope)(nil), "search.Scope")
+ proto.RegisterType((*Entry)(nil), "search.Entry")
+ proto.RegisterType((*AccessControlList)(nil), "search.AccessControlList")
+ proto.RegisterType((*FieldValue)(nil), "search.FieldValue")
+ proto.RegisterType((*FieldValue_Geo)(nil), "search.FieldValue.Geo")
+ proto.RegisterType((*Field)(nil), "search.Field")
+ proto.RegisterType((*FieldTypes)(nil), "search.FieldTypes")
+ proto.RegisterType((*IndexShardSettings)(nil), "search.IndexShardSettings")
+ proto.RegisterType((*FacetValue)(nil), "search.FacetValue")
+ proto.RegisterType((*Facet)(nil), "search.Facet")
+ proto.RegisterType((*DocumentMetadata)(nil), "search.DocumentMetadata")
+ proto.RegisterType((*Document)(nil), "search.Document")
+ proto.RegisterType((*SearchServiceError)(nil), "search.SearchServiceError")
+ proto.RegisterType((*RequestStatus)(nil), "search.RequestStatus")
+ proto.RegisterType((*IndexSpec)(nil), "search.IndexSpec")
+ proto.RegisterType((*IndexMetadata)(nil), "search.IndexMetadata")
+ proto.RegisterType((*IndexMetadata_Storage)(nil), "search.IndexMetadata.Storage")
+ proto.RegisterType((*IndexDocumentParams)(nil), "search.IndexDocumentParams")
+ proto.RegisterType((*IndexDocumentRequest)(nil), "search.IndexDocumentRequest")
+ proto.RegisterType((*IndexDocumentResponse)(nil), "search.IndexDocumentResponse")
+ proto.RegisterType((*DeleteDocumentParams)(nil), "search.DeleteDocumentParams")
+ proto.RegisterType((*DeleteDocumentRequest)(nil), "search.DeleteDocumentRequest")
+ proto.RegisterType((*DeleteDocumentResponse)(nil), "search.DeleteDocumentResponse")
+ proto.RegisterType((*ListDocumentsParams)(nil), "search.ListDocumentsParams")
+ proto.RegisterType((*ListDocumentsRequest)(nil), "search.ListDocumentsRequest")
+ proto.RegisterType((*ListDocumentsResponse)(nil), "search.ListDocumentsResponse")
+ proto.RegisterType((*ListIndexesParams)(nil), "search.ListIndexesParams")
+ proto.RegisterType((*ListIndexesRequest)(nil), "search.ListIndexesRequest")
+ proto.RegisterType((*ListIndexesResponse)(nil), "search.ListIndexesResponse")
+ proto.RegisterType((*DeleteSchemaParams)(nil), "search.DeleteSchemaParams")
+ proto.RegisterType((*DeleteSchemaRequest)(nil), "search.DeleteSchemaRequest")
+ proto.RegisterType((*DeleteSchemaResponse)(nil), "search.DeleteSchemaResponse")
+ proto.RegisterType((*SortSpec)(nil), "search.SortSpec")
+ proto.RegisterType((*ScorerSpec)(nil), "search.ScorerSpec")
+ proto.RegisterType((*FieldSpec)(nil), "search.FieldSpec")
+ proto.RegisterType((*FieldSpec_Expression)(nil), "search.FieldSpec.Expression")
+ proto.RegisterType((*FacetRange)(nil), "search.FacetRange")
+ proto.RegisterType((*FacetRequestParam)(nil), "search.FacetRequestParam")
+ proto.RegisterType((*FacetAutoDetectParam)(nil), "search.FacetAutoDetectParam")
+ proto.RegisterType((*FacetRequest)(nil), "search.FacetRequest")
+ proto.RegisterType((*FacetRefinement)(nil), "search.FacetRefinement")
+ proto.RegisterType((*FacetRefinement_Range)(nil), "search.FacetRefinement.Range")
+ proto.RegisterType((*SearchParams)(nil), "search.SearchParams")
+ proto.RegisterType((*SearchRequest)(nil), "search.SearchRequest")
+ proto.RegisterType((*FacetResultValue)(nil), "search.FacetResultValue")
+ proto.RegisterType((*FacetResult)(nil), "search.FacetResult")
+ proto.RegisterType((*SearchResult)(nil), "search.SearchResult")
+ proto.RegisterType((*SearchResponse)(nil), "search.SearchResponse")
+ proto.RegisterEnum("search.Scope_Type", Scope_Type_name, Scope_Type_value)
+ proto.RegisterEnum("search.Entry_Permission", Entry_Permission_name, Entry_Permission_value)
+ proto.RegisterEnum("search.FieldValue_ContentType", FieldValue_ContentType_name, FieldValue_ContentType_value)
+ proto.RegisterEnum("search.FacetValue_ContentType", FacetValue_ContentType_name, FacetValue_ContentType_value)
+ proto.RegisterEnum("search.Document_OrderIdSource", Document_OrderIdSource_name, Document_OrderIdSource_value)
+ proto.RegisterEnum("search.Document_Storage", Document_Storage_name, Document_Storage_value)
+ proto.RegisterEnum("search.SearchServiceError_ErrorCode", SearchServiceError_ErrorCode_name, SearchServiceError_ErrorCode_value)
+ proto.RegisterEnum("search.IndexSpec_Consistency", IndexSpec_Consistency_name, IndexSpec_Consistency_value)
+ proto.RegisterEnum("search.IndexSpec_Source", IndexSpec_Source_name, IndexSpec_Source_value)
+ proto.RegisterEnum("search.IndexSpec_Mode", IndexSpec_Mode_name, IndexSpec_Mode_value)
+ proto.RegisterEnum("search.IndexDocumentParams_Freshness", IndexDocumentParams_Freshness_name, IndexDocumentParams_Freshness_value)
+ proto.RegisterEnum("search.ScorerSpec_Scorer", ScorerSpec_Scorer_name, ScorerSpec_Scorer_value)
+ proto.RegisterEnum("search.SearchParams_CursorType", SearchParams_CursorType_name, SearchParams_CursorType_value)
+ proto.RegisterEnum("search.SearchParams_ParsingMode", SearchParams_ParsingMode_name, SearchParams_ParsingMode_value)
+}
+
+func init() { proto.RegisterFile("search.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 2960 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x59, 0xcf, 0x73, 0xdb, 0xc6,
+ 0xf5, 0x17, 0x40, 0x91, 0x22, 0x1f, 0x49, 0x09, 0x5a, 0xfd, 0x30, 0xed, 0xf8, 0x9b, 0x28, 0x70,
+ 0x9c, 0x28, 0xf9, 0xda, 0xfa, 0xca, 0xb2, 0x27, 0xf1, 0x57, 0xcd, 0xb4, 0xa1, 0x49, 0x58, 0x66,
+ 0x4d, 0x91, 0xca, 0x12, 0x74, 0xe2, 0xce, 0x34, 0x28, 0x0a, 0xac, 0x64, 0x4c, 0x49, 0x80, 0x01,
+ 0x40, 0xd7, 0xba, 0x75, 0x72, 0xeb, 0xa5, 0xd3, 0x4e, 0x4f, 0x3d, 0x75, 0x32, 0xbd, 0x74, 0x7a,
+ 0xed, 0xbd, 0xa7, 0xf6, 0xd6, 0x5b, 0x4f, 0xfd, 0x07, 0x3a, 0x9d, 0x76, 0xa6, 0x7f, 0x43, 0x67,
+ 0xdf, 0x2e, 0x40, 0x80, 0xa2, 0xa3, 0xd8, 0x37, 0xe2, 0xed, 0xdb, 0xb7, 0x6f, 0xdf, 0xe7, 0xbd,
+ 0xcf, 0xbe, 0x5d, 0x42, 0x2d, 0x62, 0x76, 0xe8, 0x3c, 0xdb, 0x9b, 0x84, 0x41, 0x1c, 0x90, 0x92,
+ 0xf8, 0xd2, 0xff, 0xad, 0x40, 0x71, 0xe0, 0x04, 0x13, 0x46, 0xde, 0x85, 0xe5, 0xf8, 0x7c, 0xc2,
+ 0x1a, 0xca, 0x8e, 0xb2, 0xbb, 0x7a, 0x40, 0xf6, 0xa4, 0x3a, 0x0e, 0xee, 0x99, 0xe7, 0x13, 0x46,
+ 0x71, 0x9c, 0x6c, 0x42, 0xf1, 0xb9, 0x3d, 0x9a, 0xb2, 0x86, 0xba, 0xa3, 0xec, 0x56, 0xa8, 0xf8,
+ 0xd0, 0x7f, 0xa7, 0xc0, 0x32, 0x57, 0x22, 0x0d, 0xd8, 0x1c, 0x0e, 0x0c, 0x6a, 0x3d, 0x78, 0x6a,
+ 0xb5, 0x9a, 0xbd, 0x7e, 0xaf, 0xd3, 0x6a, 0x76, 0xad, 0x4e, 0x5b, 0x53, 0xc8, 0x3a, 0xd4, 0x93,
+ 0x11, 0xe3, 0xb8, 0xd9, 0xe9, 0x6a, 0x2a, 0xb9, 0x0a, 0x5b, 0x47, 0xb4, 0x3f, 0x3c, 0xb9, 0xa0,
+ 0x5d, 0x20, 0x04, 0x56, 0xd3, 0x21, 0xa1, 0xbe, 0x4c, 0x36, 0x60, 0x2d, 0x95, 0xb5, 0xfb, 0xc7,
+ 0xcd, 0x4e, 0x4f, 0x2b, 0x92, 0x3a, 0x54, 0x9a, 0xdd, 0xae, 0xc5, 0x4d, 0x0f, 0xb4, 0x12, 0x79,
+ 0x03, 0xae, 0xf0, 0xcf, 0xe6, 0xd0, 0x7c, 0x64, 0xf4, 0xcc, 0x4e, 0xab, 0x69, 0x1a, 0x6d, 0x39,
+ 0xb8, 0xa2, 0xff, 0x49, 0x81, 0xa2, 0xe1, 0xc7, 0xe1, 0x39, 0xb9, 0x01, 0xc5, 0x88, 0xef, 0x0c,
+ 0xb7, 0x5b, 0x3d, 0xa8, 0xe7, 0xb6, 0x4b, 0xc5, 0x18, 0xb9, 0x0f, 0x30, 0x61, 0xe1, 0xd8, 0x8b,
+ 0x22, 0x2f, 0xf0, 0x71, 0xbf, 0xab, 0x07, 0x8d, 0x44, 0x13, 0xed, 0xec, 0x9d, 0xa4, 0xe3, 0x34,
+ 0xa3, 0x4b, 0xde, 0x86, 0x9a, 0xeb, 0x45, 0x93, 0x91, 0x7d, 0x6e, 0xf9, 0xf6, 0x98, 0x35, 0x0a,
+ 0x18, 0xab, 0xaa, 0x94, 0xf5, 0xec, 0x31, 0xd3, 0xef, 0x02, 0xcc, 0x26, 0x93, 0x32, 0x2c, 0x53,
+ 0xa3, 0xc9, 0xc3, 0x54, 0x81, 0xe2, 0x67, 0xb4, 0x63, 0x1a, 0x9a, 0x4a, 0x34, 0xa8, 0x3d, 0x1c,
+ 0x76, 0xbb, 0x56, 0xab, 0xdf, 0x33, 0x69, 0xbf, 0xab, 0x15, 0x74, 0x0a, 0xeb, 0x4d, 0xc7, 0x61,
+ 0x51, 0xd4, 0x0a, 0xfc, 0x38, 0x0c, 0x46, 0x5d, 0x2f, 0x8a, 0x39, 0x22, 0xc1, 0x4f, 0x7d, 0x16,
+ 0xe2, 0x5e, 0x2a, 0x54, 0x7c, 0x90, 0xf7, 0x60, 0x85, 0xf9, 0x71, 0xe8, 0xb1, 0xa8, 0xa1, 0xee,
+ 0x14, 0xb2, 0x7b, 0x44, 0xcf, 0x69, 0x32, 0xaa, 0xff, 0x41, 0x05, 0x78, 0xe8, 0xb1, 0x91, 0xfb,
+ 0x84, 0x23, 0x49, 0xee, 0xe7, 0xf2, 0xe0, 0xcd, 0x64, 0xd2, 0x4c, 0x63, 0x8f, 0xaf, 0xcd, 0xfc,
+ 0x98, 0xc3, 0x7d, 0xb8, 0x6c, 0x1a, 0x9f, 0x9b, 0x32, 0x33, 0xde, 0x84, 0xf2, 0xc8, 0xf6, 0xcf,
+ 0xa6, 0xf6, 0x99, 0x4c, 0x8e, 0x43, 0x95, 0xf9, 0x34, 0x95, 0xf1, 0xa0, 0x44, 0x71, 0xe8, 0xf9,
+ 0x67, 0x96, 0x48, 0x20, 0x19, 0x14, 0x21, 0x13, 0x8b, 0xef, 0x42, 0xe1, 0x8c, 0x05, 0x8d, 0xe5,
+ 0x1d, 0x65, 0x17, 0x0e, 0xb6, 0x17, 0xac, 0x7d, 0xc4, 0x02, 0xca, 0x55, 0xae, 0xbd, 0x0f, 0x85,
+ 0x23, 0x16, 0x10, 0x0d, 0x0a, 0x23, 0x3b, 0x6e, 0x14, 0x77, 0xd4, 0x5d, 0x85, 0xf2, 0x9f, 0x28,
+ 0xf1, 0xcf, 0x1a, 0x25, 0x29, 0xf1, 0xcf, 0xf4, 0xef, 0x43, 0x35, 0xe3, 0x32, 0x0f, 0x35, 0x77,
+ 0x5a, 0x5b, 0xe2, 0xbf, 0x1e, 0x99, 0xc7, 0x5d, 0x4d, 0xe1, 0xbf, 0x9a, 0x66, 0xff, 0x58, 0x53,
+ 0xf9, 0xaf, 0x76, 0xd3, 0x34, 0xb4, 0x02, 0x01, 0x28, 0xf5, 0x86, 0xc7, 0x0f, 0x0c, 0xaa, 0x2d,
+ 0x93, 0x15, 0x28, 0x1c, 0x19, 0x7d, 0xad, 0xa8, 0x1b, 0x50, 0x44, 0x6f, 0x08, 0x81, 0x65, 0x44,
+ 0x56, 0xd9, 0x51, 0x77, 0x2b, 0x14, 0x7f, 0x93, 0xdd, 0x59, 0x69, 0xa8, 0xbb, 0xd5, 0x59, 0x0d,
+ 0xcd, 0xfc, 0x4f, 0xca, 0xc5, 0x94, 0x21, 0xe7, 0x0e, 0x45, 0x0b, 0x6d, 0x1d, 0x48, 0x18, 0x38,
+ 0x76, 0x97, 0xc2, 0x20, 0x00, 0xd0, 0xff, 0xa2, 0x00, 0xe9, 0xf8, 0x2e, 0x7b, 0x31, 0x78, 0x66,
+ 0x87, 0xee, 0x80, 0xc5, 0xb1, 0xe7, 0x9f, 0x45, 0xe4, 0x5d, 0x58, 0x9b, 0x84, 0xec, 0xb9, 0xe5,
+ 0x4f, 0xc7, 0x56, 0xc4, 0x47, 0xa2, 0x86, 0xb2, 0x53, 0xd8, 0x2d, 0xd2, 0x3a, 0x17, 0xf7, 0xa6,
+ 0x63, 0x54, 0x8f, 0xc8, 0x0e, 0x40, 0x46, 0x85, 0xef, 0xa1, 0x78, 0xa8, 0xdc, 0xa1, 0x15, 0x3f,
+ 0xd5, 0xf8, 0x2e, 0x5c, 0x9f, 0xb3, 0x64, 0x09, 0xbf, 0xac, 0x53, 0x7b, 0x14, 0x71, 0x44, 0xb9,
+ 0xd9, 0x46, 0xce, 0xec, 0x00, 0x15, 0x1e, 0xf2, 0x71, 0x72, 0x13, 0xea, 0xa3, 0xc0, 0xb1, 0x47,
+ 0x56, 0xc8, 0x26, 0x23, 0xcf, 0xb1, 0x11, 0xe8, 0xca, 0xe1, 0x12, 0xad, 0xa1, 0x98, 0x0a, 0xa9,
+ 0xfe, 0x0b, 0x05, 0xe0, 0xa1, 0xed, 0xb0, 0xf8, 0x9b, 0x33, 0x32, 0xd5, 0xc8, 0x67, 0x24, 0x07,
+ 0x52, 0x66, 0xe4, 0xe5, 0x19, 0xa7, 0xdf, 0xb8, 0x90, 0x1c, 0x32, 0x11, 0x32, 0xf0, 0x23, 0xea,
+ 0x7c, 0xb5, 0x57, 0x43, 0x3d, 0xf5, 0x2f, 0x41, 0xfd, 0x0b, 0xd0, 0xda, 0x81, 0x33, 0x1d, 0x33,
+ 0x3f, 0x3e, 0x66, 0xb1, 0xed, 0xda, 0xb1, 0x4d, 0x1a, 0xb0, 0xf2, 0x9c, 0x85, 0x48, 0x30, 0x7c,
+ 0x7f, 0x05, 0x9a, 0x7c, 0x92, 0x7d, 0xd8, 0x74, 0x82, 0xf1, 0xd8, 0x8b, 0x63, 0xe6, 0x5a, 0x51,
+ 0x6c, 0x25, 0x6a, 0x2a, 0xaa, 0x91, 0x74, 0x6c, 0x10, 0x3f, 0x11, 0x23, 0xfa, 0x7f, 0x54, 0x28,
+ 0x27, 0x0b, 0x90, 0x55, 0x50, 0x3d, 0x57, 0x52, 0x82, 0xea, 0xb9, 0x97, 0x56, 0xe7, 0x0d, 0x28,
+ 0x9e, 0xf2, 0xe4, 0x42, 0x10, 0x33, 0x6c, 0x81, 0x19, 0x47, 0xc5, 0x18, 0xb9, 0x0a, 0xe5, 0x20,
+ 0x74, 0x59, 0x68, 0x79, 0x2e, 0x62, 0x57, 0xa4, 0x2b, 0xf8, 0xdd, 0x71, 0xc9, 0x09, 0xac, 0x25,
+ 0x43, 0x56, 0x14, 0x4c, 0x43, 0x87, 0x35, 0x4a, 0x79, 0xc0, 0x12, 0xd7, 0xf6, 0xfa, 0x62, 0xca,
+ 0x00, 0xb5, 0x0e, 0xcb, 0x83, 0xe1, 0xc9, 0x49, 0xb7, 0x63, 0xb4, 0x69, 0x3d, 0xc8, 0x0e, 0x90,
+ 0xfb, 0xb0, 0x12, 0xc5, 0x41, 0xc8, 0x1d, 0x2e, 0xe6, 0xb9, 0x37, 0xb5, 0x34, 0x10, 0xe3, 0x87,
+ 0xcb, 0xed, 0xce, 0xe0, 0x31, 0x4d, 0xd4, 0x71, 0x2f, 0x3c, 0xfa, 0x8d, 0xf2, 0xdc, 0x5e, 0xb8,
+ 0x90, 0x8a, 0x31, 0xfd, 0x16, 0xd4, 0x73, 0x8e, 0xf0, 0x93, 0xa4, 0x6d, 0x3c, 0x6c, 0x0e, 0xbb,
+ 0xa6, 0xd1, 0xd6, 0x96, 0x48, 0x0d, 0x52, 0xcf, 0x34, 0x45, 0xdf, 0x80, 0x15, 0xb9, 0x18, 0x52,
+ 0x44, 0x67, 0xf0, 0x58, 0x5b, 0xd2, 0x7f, 0xaf, 0x00, 0x11, 0xf9, 0x3d, 0x60, 0xe1, 0x73, 0xcf,
+ 0x61, 0x46, 0x18, 0x06, 0xa1, 0xfe, 0x2b, 0x05, 0x2a, 0xf8, 0xab, 0x15, 0xb8, 0x8c, 0x94, 0x40,
+ 0xed, 0x3f, 0xd6, 0x96, 0xf8, 0xe9, 0xd5, 0xe9, 0x3d, 0x69, 0x76, 0x3b, 0x6d, 0x8b, 0x1a, 0x9f,
+ 0x0e, 0x8d, 0x81, 0xa9, 0x29, 0x5c, 0x68, 0xd2, 0x66, 0x6f, 0xd0, 0x31, 0x7a, 0xa6, 0x65, 0x50,
+ 0xda, 0xa7, 0x9a, 0xca, 0xcf, 0xbe, 0x4e, 0xcf, 0x34, 0x68, 0xaf, 0xd9, 0x95, 0xb2, 0x02, 0xd9,
+ 0x82, 0xf5, 0x13, 0x83, 0x1e, 0x77, 0x06, 0x83, 0x4e, 0xbf, 0x67, 0xb5, 0x8d, 0x1e, 0x77, 0x6b,
+ 0x99, 0x54, 0x61, 0xc5, 0xec, 0x1c, 0x1b, 0xfd, 0xa1, 0xa9, 0x15, 0xc9, 0x35, 0xd8, 0x6e, 0xf5,
+ 0x7b, 0xad, 0x21, 0xa5, 0xdc, 0x1a, 0xda, 0x6d, 0xb6, 0xcc, 0x4e, 0xbf, 0xa7, 0x95, 0xf4, 0x5f,
+ 0x2b, 0x50, 0xa7, 0xec, 0xcb, 0x29, 0x8b, 0xe2, 0x41, 0x6c, 0xc7, 0xd3, 0x88, 0x97, 0x95, 0x13,
+ 0xb8, 0x22, 0x97, 0x57, 0x0f, 0xde, 0x49, 0x4f, 0xc0, 0x0b, 0xfb, 0xd9, 0x4b, 0xf7, 0x42, 0x71,
+ 0x06, 0x2f, 0x2b, 0xc6, 0x45, 0x96, 0xcb, 0x62, 0xdb, 0x1b, 0xc9, 0x4e, 0xa0, 0x8a, 0xb2, 0x36,
+ 0x8a, 0xc8, 0x4d, 0x58, 0x75, 0x6c, 0x3f, 0xf0, 0x3d, 0x5e, 0xed, 0xb8, 0x4c, 0x01, 0xd3, 0xa5,
+ 0x9e, 0x4a, 0xb9, 0x3d, 0xfd, 0xeb, 0x02, 0x54, 0x04, 0x63, 0x4d, 0x98, 0xb3, 0xb0, 0xba, 0x8e,
+ 0xa1, 0xea, 0x04, 0x7e, 0xe4, 0x45, 0x31, 0xf3, 0x9d, 0x73, 0x79, 0x08, 0xff, 0x4f, 0xe2, 0x6c,
+ 0x3a, 0x97, 0x53, 0x40, 0xa2, 0x74, 0x58, 0x3b, 0x31, 0xa8, 0xd5, 0xee, 0xb7, 0x86, 0xc7, 0x46,
+ 0xcf, 0xa4, 0xd9, 0xf9, 0xe4, 0x3a, 0x54, 0xb8, 0xd9, 0x68, 0x62, 0x3b, 0x09, 0x1d, 0xcc, 0x04,
+ 0xd9, 0x62, 0x94, 0xd9, 0x9d, 0x14, 0xe3, 0x7d, 0x28, 0xc9, 0xa4, 0x9e, 0x4b, 0xc5, 0x99, 0x07,
+ 0x32, 0x9d, 0x4b, 0x03, 0xa3, 0x49, 0x5b, 0x8f, 0xa8, 0xd4, 0x27, 0xf7, 0x60, 0x79, 0xcc, 0xf7,
+ 0x2f, 0x8a, 0x61, 0xfb, 0xe2, 0xbc, 0xe3, 0xc0, 0x65, 0x87, 0xe5, 0x13, 0xda, 0xe9, 0xd3, 0x8e,
+ 0xf9, 0x94, 0xa2, 0xb6, 0xfe, 0xbf, 0x48, 0x4b, 0xa9, 0xdb, 0x00, 0xa5, 0xa3, 0x6e, 0xff, 0x41,
+ 0xb3, 0xab, 0x2d, 0xf1, 0xae, 0x20, 0xbb, 0x3f, 0x4d, 0xd1, 0x3f, 0x84, 0x92, 0x4c, 0x61, 0x00,
+ 0xb9, 0xbc, 0xb6, 0x84, 0xe9, 0xdc, 0x34, 0x9b, 0x03, 0xb3, 0x4f, 0x0d, 0xd1, 0x7e, 0xb5, 0xba,
+ 0xfd, 0x61, 0xdb, 0xe2, 0x82, 0xe6, 0x91, 0xa1, 0xa9, 0xfa, 0x3b, 0xb0, 0xcc, 0x17, 0xe7, 0x99,
+ 0x9e, 0x2c, 0xaf, 0x2d, 0x91, 0x55, 0x80, 0x07, 0xcd, 0xd6, 0x63, 0xde, 0x69, 0xf5, 0x78, 0xe6,
+ 0xff, 0x43, 0x81, 0x3a, 0x7a, 0x9b, 0x72, 0xd6, 0x3e, 0x80, 0xc7, 0x05, 0x56, 0x34, 0x61, 0x0e,
+ 0xa2, 0x55, 0x3d, 0x58, 0xbf, 0xb0, 0x31, 0x5a, 0xf1, 0x52, 0x64, 0x77, 0x13, 0x72, 0x11, 0xad,
+ 0x48, 0xfe, 0x64, 0xc4, 0x43, 0x30, 0x61, 0x98, 0x8f, 0x66, 0x45, 0x5f, 0xc0, 0xd6, 0x2c, 0x8f,
+ 0x75, 0xe2, 0x43, 0x52, 0xf9, 0x69, 0xcd, 0x5f, 0xfb, 0x64, 0x56, 0xa0, 0x6f, 0x41, 0xd5, 0x1e,
+ 0x07, 0x53, 0x3f, 0xb6, 0xa6, 0x11, 0x73, 0x25, 0xaf, 0x82, 0x10, 0x0d, 0x23, 0xe6, 0xf2, 0x8e,
+ 0x69, 0xe4, 0x8d, 0xbd, 0x58, 0x72, 0xa9, 0xf8, 0xd0, 0xbf, 0x52, 0x61, 0x03, 0x17, 0x49, 0xe8,
+ 0xe5, 0xc4, 0x0e, 0xed, 0x71, 0x44, 0x6e, 0x41, 0xd9, 0x95, 0x12, 0x3c, 0x38, 0xab, 0x07, 0xda,
+ 0x3c, 0x11, 0xd1, 0x54, 0x83, 0x3c, 0x81, 0xca, 0x69, 0xc8, 0xa2, 0x67, 0x3e, 0x8b, 0x22, 0x99,
+ 0xae, 0x37, 0x73, 0x5b, 0xc8, 0x5b, 0xdf, 0x7b, 0x98, 0x28, 0x1f, 0xd6, 0x07, 0x4f, 0x7b, 0xad,
+ 0x47, 0xb4, 0xdf, 0xeb, 0x0f, 0x07, 0xdd, 0xa7, 0x0f, 0xd4, 0x86, 0x42, 0x67, 0xa6, 0xe6, 0x82,
+ 0x5e, 0xb8, 0x3c, 0xe8, 0xfa, 0x5d, 0xa8, 0xa4, 0xc6, 0x39, 0xfc, 0x39, 0xf3, 0x82, 0x90, 0x3e,
+ 0x7b, 0x64, 0xf4, 0x78, 0x7b, 0xf9, 0x84, 0xf3, 0x09, 0xe6, 0xd2, 0x8f, 0x61, 0x33, 0xe7, 0xa5,
+ 0xe4, 0x0c, 0x72, 0x17, 0x4a, 0x13, 0x74, 0x58, 0xe2, 0xfd, 0xc6, 0x37, 0xec, 0x89, 0x4a, 0x55,
+ 0xb2, 0x05, 0x25, 0x7b, 0x32, 0xe1, 0x87, 0x05, 0xc7, 0xb2, 0x46, 0x8b, 0xf6, 0x64, 0xd2, 0x71,
+ 0xf5, 0x1f, 0xc2, 0xd6, 0xdc, 0x1a, 0xd1, 0x24, 0xf0, 0x23, 0x46, 0x6e, 0x43, 0x29, 0x42, 0x72,
+ 0x92, 0x71, 0xde, 0x4a, 0x16, 0xc9, 0x31, 0x17, 0x95, 0x4a, 0xdc, 0xbc, 0x1b, 0x38, 0xdc, 0x3c,
+ 0x4f, 0xab, 0x0a, 0x2d, 0xba, 0x81, 0xd3, 0x71, 0x75, 0x0b, 0x36, 0xdb, 0x6c, 0xc4, 0x62, 0x36,
+ 0x87, 0xe3, 0x4c, 0x5d, 0xc9, 0xa8, 0xcf, 0x05, 0x56, 0xfd, 0x16, 0x81, 0x75, 0x61, 0x2b, 0xbf,
+ 0x40, 0x12, 0xa4, 0x7b, 0x73, 0x41, 0xba, 0x9e, 0xe6, 0xc9, 0x02, 0x7f, 0x2e, 0x8b, 0xd2, 0x11,
+ 0x6c, 0xcf, 0xaf, 0xf2, 0x5a, 0x61, 0xd2, 0xff, 0xa6, 0xc0, 0x06, 0xbf, 0x28, 0x24, 0x76, 0x22,
+ 0x19, 0x8f, 0x57, 0x2f, 0xe3, 0x1d, 0xde, 0x4f, 0xd9, 0x61, 0x6c, 0xa5, 0x61, 0xe7, 0x04, 0x0a,
+ 0x28, 0x6b, 0xcb, 0x60, 0xae, 0x7b, 0xbe, 0x33, 0x9a, 0xba, 0xcc, 0x4a, 0x35, 0x71, 0x5b, 0xe5,
+ 0xc3, 0xe5, 0x38, 0x9c, 0x32, 0xba, 0x26, 0x87, 0x07, 0x72, 0x0e, 0xb9, 0x9a, 0xd4, 0x22, 0x32,
+ 0xee, 0x61, 0xe1, 0xce, 0xfe, 0xbe, 0x2c, 0x48, 0xf2, 0x06, 0x54, 0x7e, 0xc2, 0xce, 0x23, 0x2b,
+ 0xf0, 0x47, 0xe7, 0xc8, 0xbb, 0x65, 0x5a, 0xe6, 0x82, 0xbe, 0x3f, 0x3a, 0xe7, 0x89, 0x9a, 0xdb,
+ 0xd4, 0xa5, 0x89, 0xba, 0x20, 0x04, 0x0b, 0x20, 0x50, 0xb3, 0x10, 0xc4, 0xb0, 0x35, 0xb7, 0xc6,
+ 0x02, 0x04, 0xd4, 0xcb, 0x13, 0x35, 0xcb, 0x20, 0xea, 0x65, 0x0c, 0xa2, 0xff, 0x55, 0x85, 0x75,
+ 0xbe, 0x2c, 0x42, 0xc0, 0x12, 0xb4, 0xde, 0x86, 0xda, 0x29, 0x8b, 0x9d, 0x67, 0x56, 0xe4, 0x3c,
+ 0x63, 0x63, 0x1b, 0x59, 0xad, 0x4c, 0xab, 0x28, 0x1b, 0xa0, 0x88, 0x34, 0xb2, 0xb4, 0x56, 0x3c,
+ 0x54, 0x0f, 0xd2, 0x48, 0x7e, 0xf3, 0xb1, 0xb7, 0x0b, 0x9a, 0x00, 0x4b, 0xa4, 0x03, 0x9e, 0xc1,
+ 0xd8, 0x99, 0xd3, 0x55, 0x94, 0xa3, 0x23, 0xfc, 0xd2, 0x4a, 0xee, 0xc1, 0x46, 0x1e, 0x5e, 0x9c,
+ 0x21, 0xb0, 0x91, 0x00, 0xaf, 0x67, 0x01, 0xc6, 0x99, 0xe4, 0x03, 0x9e, 0x14, 0x89, 0x65, 0x6b,
+ 0x12, 0xb2, 0x53, 0xef, 0x05, 0x9e, 0x87, 0x15, 0x9e, 0x0e, 0xd2, 0xf6, 0x09, 0x8a, 0xc9, 0x36,
+ 0x94, 0x82, 0xd3, 0xd3, 0x88, 0xc5, 0x8d, 0x15, 0x3c, 0x81, 0xe5, 0x57, 0xe6, 0x00, 0x2e, 0xbf,
+ 0xda, 0x01, 0xac, 0x7f, 0x01, 0x24, 0x13, 0xcd, 0x24, 0x4d, 0xee, 0xcc, 0xa5, 0xc9, 0xd5, 0x6c,
+ 0x9a, 0xe4, 0x22, 0x7f, 0x59, 0x9d, 0x7e, 0x25, 0xcb, 0x2b, 0x5d, 0xe0, 0xf5, 0x72, 0xe4, 0x63,
+ 0x58, 0x15, 0x41, 0x1a, 0xcb, 0x23, 0x4e, 0x66, 0xca, 0xd6, 0xc2, 0xf3, 0x8f, 0xd6, 0xbd, 0xec,
+ 0xa7, 0xfe, 0x33, 0x05, 0x88, 0x60, 0x0b, 0x91, 0x0b, 0x32, 0x69, 0x66, 0x51, 0x53, 0x5e, 0xb1,
+ 0x6d, 0x99, 0x67, 0xc5, 0xc2, 0xa5, 0xac, 0xf8, 0x23, 0xd8, 0xc8, 0x7a, 0x90, 0x04, 0xfa, 0x60,
+ 0x2e, 0xd0, 0xd7, 0xf2, 0x9c, 0x98, 0x75, 0xf7, 0xb2, 0x48, 0x1b, 0x09, 0xb1, 0x27, 0x2b, 0xbc,
+ 0x1e, 0x1f, 0xfe, 0x59, 0x81, 0xf2, 0x20, 0x08, 0x63, 0xa4, 0xb4, 0xf7, 0x60, 0x2d, 0x0a, 0xc2,
+ 0xd8, 0x62, 0x2f, 0x26, 0x21, 0x8b, 0xe4, 0x3d, 0x4c, 0xc5, 0xd4, 0x0f, 0xc2, 0xd8, 0x48, 0xa5,
+ 0xe4, 0xb6, 0x54, 0x74, 0x59, 0xe4, 0x30, 0xdf, 0xf5, 0xfc, 0x33, 0x2c, 0xb3, 0x24, 0xed, 0x51,
+ 0xbd, 0x9d, 0x8e, 0x91, 0x5b, 0x40, 0x5c, 0x76, 0x6a, 0x4f, 0x47, 0xb1, 0xb8, 0x7b, 0x5a, 0x31,
+ 0x7b, 0x11, 0xcb, 0xaa, 0xd2, 0xe4, 0x08, 0x5e, 0x0e, 0x4d, 0xf6, 0x82, 0x07, 0x69, 0x2b, 0xaf,
+ 0xed, 0x4f, 0xc7, 0x2c, 0xf4, 0x1c, 0xac, 0x2c, 0x85, 0x6e, 0x64, 0x27, 0xf4, 0xc4, 0x90, 0xfe,
+ 0x77, 0x05, 0x60, 0xe0, 0x04, 0x21, 0x0b, 0x71, 0x23, 0xdf, 0x83, 0x52, 0x84, 0x5f, 0x12, 0xea,
+ 0xab, 0x99, 0x27, 0x2d, 0xa9, 0x23, 0x7f, 0x1e, 0xd6, 0x8e, 0x9b, 0x66, 0xeb, 0x91, 0x35, 0x68,
+ 0xf5, 0xa9, 0x41, 0xa9, 0x9c, 0x46, 0xae, 0xe5, 0xd9, 0x63, 0xf9, 0xce, 0xfe, 0x8c, 0x89, 0x3f,
+ 0x84, 0x2b, 0x63, 0x5b, 0x90, 0x0f, 0xd7, 0xb5, 0x10, 0x27, 0x16, 0xb3, 0x30, 0x6a, 0x54, 0x70,
+ 0x4b, 0x5b, 0x38, 0x2c, 0xec, 0x9f, 0xa4, 0x83, 0xd8, 0x99, 0x26, 0xd6, 0xb7, 0xa9, 0xc1, 0x57,
+ 0xec, 0xf4, 0x8e, 0xac, 0xec, 0xfa, 0xa2, 0xa3, 0xcd, 0x49, 0x54, 0xfd, 0xb7, 0x0a, 0x54, 0xb0,
+ 0x37, 0x9c, 0xbb, 0x17, 0x14, 0xd2, 0x7b, 0xc1, 0xc7, 0x00, 0x19, 0xc8, 0x78, 0x7e, 0xc2, 0xec,
+ 0xb8, 0x4d, 0xa7, 0xee, 0xcd, 0x00, 0xa4, 0x19, 0xfd, 0x6b, 0x9f, 0x00, 0x64, 0xa0, 0x4d, 0xec,
+ 0x17, 0x32, 0xf7, 0x8e, 0x37, 0x73, 0xf6, 0x97, 0x71, 0x24, 0x23, 0xd1, 0x1f, 0xc9, 0x27, 0x0a,
+ 0x6a, 0xfb, 0x67, 0x2c, 0xe3, 0xa1, 0x92, 0x5a, 0xd8, 0x84, 0x22, 0x72, 0x64, 0xf2, 0x50, 0x8a,
+ 0x1f, 0x44, 0x83, 0x02, 0xf3, 0x5d, 0xc9, 0xc1, 0xfc, 0xa7, 0xfe, 0x73, 0x05, 0xd6, 0x85, 0x29,
+ 0x91, 0xad, 0x18, 0x3e, 0xde, 0xc3, 0x8a, 0x4c, 0x10, 0x98, 0x28, 0x48, 0x86, 0x80, 0xa2, 0x2e,
+ 0x42, 0xb2, 0x0b, 0xc5, 0x90, 0xaf, 0x7d, 0xa1, 0xa5, 0x4e, 0xbd, 0xa2, 0x42, 0x81, 0xbc, 0x0f,
+ 0x9a, 0x30, 0xc5, 0x2f, 0x42, 0x71, 0x68, 0x7b, 0x7e, 0x8c, 0x97, 0xfc, 0x0a, 0x5d, 0x43, 0x79,
+ 0x2b, 0x15, 0xeb, 0xdf, 0x81, 0x4d, 0x9c, 0xdf, 0x9c, 0xc6, 0x41, 0x9b, 0xc5, 0xcc, 0x91, 0xde,
+ 0xdc, 0x58, 0xe0, 0xcd, 0xa1, 0x7a, 0x67, 0x3f, 0xeb, 0x91, 0x3e, 0x84, 0x5a, 0x76, 0x1f, 0x0b,
+ 0xaf, 0x73, 0x33, 0xda, 0x55, 0xb1, 0xbb, 0xbf, 0x9a, 0x77, 0x3b, 0x13, 0x81, 0x84, 0x0c, 0xf4,
+ 0xaf, 0x15, 0x58, 0x93, 0xa3, 0xa7, 0x9e, 0xcf, 0xb0, 0xc9, 0x5e, 0x64, 0x7a, 0xe1, 0xc3, 0x34,
+ 0xb9, 0x9b, 0x84, 0x69, 0xee, 0x36, 0x31, 0x67, 0x71, 0x2f, 0x1b, 0xb1, 0x6b, 0xff, 0x07, 0x45,
+ 0x81, 0x6b, 0x8a, 0xa1, 0xb2, 0x00, 0x43, 0x75, 0x86, 0xe1, 0x1f, 0x57, 0xa0, 0x26, 0x2e, 0xce,
+ 0xaf, 0xdd, 0x5b, 0x6d, 0x42, 0xf1, 0xcb, 0x29, 0x0b, 0xcf, 0xb1, 0x03, 0xad, 0x50, 0xf1, 0xc1,
+ 0x8f, 0x43, 0x67, 0x1a, 0x46, 0x41, 0x28, 0xa9, 0x43, 0x7e, 0x65, 0x8e, 0xc9, 0x6a, 0xee, 0x98,
+ 0x7c, 0x08, 0x55, 0xa1, 0x61, 0xe1, 0x93, 0x99, 0xb8, 0xac, 0xbe, 0x95, 0xbf, 0xdb, 0xcb, 0x8b,
+ 0x47, 0x0b, 0xf5, 0xc4, 0x9b, 0x59, 0xaf, 0xdf, 0x33, 0x28, 0x38, 0xa9, 0x64, 0xd6, 0x4a, 0x94,
+ 0xe6, 0x5b, 0x89, 0x7b, 0xb0, 0x8d, 0xb5, 0xce, 0x5c, 0xcb, 0xc1, 0x3b, 0x96, 0xed, 0x38, 0xd3,
+ 0xd0, 0x76, 0xce, 0xe5, 0x81, 0xbd, 0x29, 0x47, 0x5b, 0x7c, 0xb0, 0x29, 0xc7, 0xc8, 0x6d, 0xa8,
+ 0x20, 0x7b, 0x62, 0x38, 0xca, 0xf9, 0x16, 0x28, 0xe1, 0x62, 0x5a, 0x8e, 0x12, 0x56, 0xbe, 0x0b,
+ 0x55, 0xc9, 0x34, 0x38, 0xa1, 0x82, 0xd8, 0x91, 0x8b, 0x8c, 0x46, 0x21, 0x9a, 0x31, 0xe0, 0x3e,
+ 0x00, 0xde, 0x21, 0xc5, 0x1c, 0xc0, 0x39, 0xeb, 0x17, 0x28, 0x81, 0x56, 0x4e, 0x53, 0x62, 0xc9,
+ 0x35, 0x98, 0xb5, 0x7c, 0x83, 0x49, 0x1e, 0x43, 0x6d, 0x62, 0x87, 0x91, 0xe7, 0x9f, 0x59, 0x78,
+ 0x81, 0xaf, 0x63, 0x2c, 0x77, 0x16, 0xc6, 0xf2, 0x44, 0x28, 0xe2, 0x55, 0xbe, 0x34, 0x30, 0x69,
+ 0xa7, 0x65, 0xd2, 0xea, 0x64, 0x26, 0x24, 0x1f, 0xc3, 0x55, 0x7b, 0x1a, 0x07, 0x96, 0xeb, 0x45,
+ 0x4e, 0xf0, 0x9c, 0x85, 0x16, 0xbe, 0x41, 0x89, 0x08, 0x36, 0xd6, 0x30, 0xc6, 0xca, 0x3e, 0xdd,
+ 0xe6, 0x3a, 0x6d, 0xa9, 0x82, 0x19, 0x8a, 0x51, 0x24, 0xff, 0x0f, 0xf5, 0xa4, 0xed, 0x12, 0xef,
+ 0x5a, 0x1a, 0x46, 0x70, 0x73, 0x51, 0xf1, 0xd0, 0x9a, 0x54, 0x15, 0x2f, 0x96, 0x0f, 0x40, 0x13,
+ 0x4b, 0x85, 0x69, 0xae, 0x37, 0xd6, 0x71, 0xf6, 0x95, 0x97, 0x94, 0x02, 0x5d, 0x3b, 0x9d, 0xab,
+ 0xb6, 0x01, 0x5c, 0x11, 0x36, 0xc4, 0x16, 0x90, 0x17, 0xc4, 0x11, 0xd0, 0x20, 0x18, 0xe5, 0xeb,
+ 0x39, 0x53, 0x73, 0xe4, 0x41, 0x37, 0x4f, 0x17, 0x51, 0xca, 0x4d, 0xa8, 0x0a, 0xa3, 0x2e, 0x9b,
+ 0xc4, 0xcf, 0x1a, 0x1b, 0x99, 0x43, 0x07, 0x70, 0xa0, 0xcd, 0xe5, 0xfa, 0x01, 0xc0, 0x2c, 0x51,
+ 0x49, 0x19, 0x30, 0x55, 0xb5, 0x25, 0x7c, 0xe9, 0xe8, 0xf4, 0x8e, 0xba, 0x86, 0xa6, 0x90, 0x55,
+ 0x80, 0x13, 0x83, 0x5a, 0xd4, 0x18, 0x0c, 0xbb, 0xa6, 0xa6, 0xea, 0xef, 0x42, 0x35, 0x03, 0x08,
+ 0xaa, 0x22, 0x24, 0xda, 0x12, 0xa9, 0xc2, 0x0a, 0x35, 0xba, 0xcd, 0xcf, 0xf1, 0x4d, 0xcf, 0x84,
+ 0xba, 0x40, 0x31, 0x61, 0xac, 0x5b, 0x73, 0xbd, 0xca, 0xe6, 0x22, 0xb0, 0x2f, 0xeb, 0x52, 0xa6,
+ 0xa0, 0xc9, 0x88, 0x46, 0xc9, 0x91, 0xfd, 0x32, 0xbe, 0x12, 0xf0, 0xe3, 0x4b, 0x3b, 0x15, 0x1f,
+ 0xe4, 0x23, 0x80, 0x0c, 0x52, 0xe2, 0x9a, 0xff, 0x52, 0xa4, 0x32, 0xaa, 0xfa, 0xa7, 0x50, 0xcd,
+ 0x2c, 0xbb, 0x70, 0xc5, 0xbd, 0x19, 0x43, 0xf2, 0x04, 0x68, 0xcc, 0x99, 0x4d, 0xdd, 0x4d, 0xde,
+ 0xab, 0x7f, 0xa3, 0x24, 0xac, 0x26, 0x8d, 0xe6, 0x5f, 0x42, 0xd4, 0x4b, 0x5e, 0x42, 0x6e, 0xcf,
+ 0x1d, 0xa1, 0x0b, 0x9e, 0x95, 0x33, 0x0a, 0xc8, 0xb5, 0xbc, 0x98, 0xd1, 0x3b, 0x85, 0x8a, 0x8f,
+ 0x0c, 0x01, 0x16, 0xb2, 0x04, 0xa8, 0xff, 0x4b, 0x81, 0xd5, 0xd4, 0x37, 0xd1, 0x06, 0xde, 0x82,
+ 0x52, 0x88, 0x7e, 0xca, 0x36, 0x70, 0x0e, 0x3d, 0xb1, 0x07, 0x2a, 0x75, 0xc8, 0x0d, 0xa8, 0xe7,
+ 0x78, 0x0c, 0x61, 0x28, 0xd0, 0x5a, 0x96, 0xbe, 0x32, 0x9d, 0x65, 0xe1, 0xdb, 0xf4, 0xf0, 0x2f,
+ 0x63, 0xeb, 0x0f, 0xa1, 0x96, 0x14, 0x21, 0xfa, 0x57, 0x44, 0xff, 0x36, 0x16, 0xc4, 0x9f, 0x56,
+ 0x4f, 0x67, 0x1f, 0x1f, 0x94, 0xca, 0xff, 0x5c, 0xd1, 0x7e, 0xd9, 0x7b, 0x50, 0xfe, 0x81, 0xfc,
+ 0xbf, 0xf6, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0c, 0x12, 0xcb, 0x31, 0xc6, 0x1d, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/appengine/internal/search/search.proto b/vendor/google.golang.org/appengine/internal/search/search.proto
new file mode 100644
index 000000000..61df6508b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/search/search.proto
@@ -0,0 +1,394 @@
+syntax = "proto2";
+option go_package = "search";
+
+package search;
+
+message Scope {
+ enum Type {
+ USER_BY_CANONICAL_ID = 1;
+ USER_BY_EMAIL = 2;
+ GROUP_BY_CANONICAL_ID = 3;
+ GROUP_BY_EMAIL = 4;
+ GROUP_BY_DOMAIN = 5;
+ ALL_USERS = 6;
+ ALL_AUTHENTICATED_USERS = 7;
+ }
+
+ optional Type type = 1;
+ optional string value = 2;
+}
+
+message Entry {
+ enum Permission {
+ READ = 1;
+ WRITE = 2;
+ FULL_CONTROL = 3;
+ }
+
+ optional Scope scope = 1;
+ optional Permission permission = 2;
+ optional string display_name = 3;
+}
+
+message AccessControlList {
+ optional string owner = 1;
+ repeated Entry entries = 2;
+}
+
+message FieldValue {
+ enum ContentType {
+ TEXT = 0;
+ HTML = 1;
+ ATOM = 2;
+ DATE = 3;
+ NUMBER = 4;
+ GEO = 5;
+ }
+
+ optional ContentType type = 1 [default = TEXT];
+
+ optional string language = 2 [default = "en"];
+
+ optional string string_value = 3;
+
+ optional group Geo = 4 {
+ required double lat = 5;
+ required double lng = 6;
+ }
+}
+
+message Field {
+ required string name = 1;
+ required FieldValue value = 2;
+}
+
+message FieldTypes {
+ required string name = 1;
+ repeated FieldValue.ContentType type = 2;
+}
+
+message IndexShardSettings {
+ repeated int32 prev_num_shards = 1;
+ required int32 num_shards = 2 [default=1];
+ repeated int32 prev_num_shards_search_false = 3;
+ optional string local_replica = 4 [default = ""];
+}
+
+message FacetValue {
+ enum ContentType {
+ ATOM = 2;
+ NUMBER = 4;
+ }
+
+ optional ContentType type = 1 [default = ATOM];
+ optional string string_value = 3;
+}
+
+message Facet {
+ required string name = 1;
+ required FacetValue value = 2;
+}
+
+message DocumentMetadata {
+ optional int64 version = 1;
+ optional int64 committed_st_version = 2;
+}
+
+message Document {
+ optional string id = 1;
+ optional string language = 2 [default = "en"];
+ repeated Field field = 3;
+ optional int32 order_id = 4;
+ optional OrderIdSource order_id_source = 6 [default = SUPPLIED];
+
+ enum OrderIdSource {
+ DEFAULTED = 0;
+ SUPPLIED = 1;
+ }
+
+ enum Storage {
+ DISK = 0;
+ }
+
+ optional Storage storage = 5 [default = DISK];
+ repeated Facet facet = 8;
+}
+
+message SearchServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_REQUEST = 1;
+ TRANSIENT_ERROR = 2;
+ INTERNAL_ERROR = 3;
+ PERMISSION_DENIED = 4;
+ TIMEOUT = 5;
+ CONCURRENT_TRANSACTION = 6;
+ }
+}
+
+message RequestStatus {
+ required SearchServiceError.ErrorCode code = 1;
+ optional string error_detail = 2;
+ optional int32 canonical_code = 3;
+}
+
+message IndexSpec {
+ required string name = 1;
+
+ enum Consistency {
+ GLOBAL = 0;
+ PER_DOCUMENT = 1;
+ }
+ optional Consistency consistency = 2 [default = PER_DOCUMENT];
+
+ optional string namespace = 3;
+ optional int32 version = 4;
+
+ enum Source {
+ SEARCH = 0;
+ DATASTORE = 1;
+ CLOUD_STORAGE = 2;
+ }
+ optional Source source = 5 [default = SEARCH];
+
+ enum Mode {
+ PRIORITY = 0;
+ BACKGROUND = 1;
+ }
+ optional Mode mode = 6 [default = PRIORITY];
+}
+
+message IndexMetadata {
+ required IndexSpec index_spec = 1;
+
+ repeated FieldTypes field = 2;
+
+ message Storage {
+ optional int64 amount_used = 1;
+ optional int64 limit = 2;
+ }
+ optional Storage storage = 3;
+}
+
+message IndexDocumentParams {
+ repeated Document document = 1;
+
+ enum Freshness {
+ SYNCHRONOUSLY = 0;
+ WHEN_CONVENIENT = 1;
+ }
+ optional Freshness freshness = 2 [default = SYNCHRONOUSLY, deprecated=true];
+
+ required IndexSpec index_spec = 3;
+}
+
+message IndexDocumentRequest {
+ required IndexDocumentParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message IndexDocumentResponse {
+ repeated RequestStatus status = 1;
+
+ repeated string doc_id = 2;
+}
+
+message DeleteDocumentParams {
+ repeated string doc_id = 1;
+
+ required IndexSpec index_spec = 2;
+}
+
+message DeleteDocumentRequest {
+ required DeleteDocumentParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message DeleteDocumentResponse {
+ repeated RequestStatus status = 1;
+}
+
+message ListDocumentsParams {
+ required IndexSpec index_spec = 1;
+ optional string start_doc_id = 2;
+ optional bool include_start_doc = 3 [default = true];
+ optional int32 limit = 4 [default = 100];
+ optional bool keys_only = 5;
+}
+
+message ListDocumentsRequest {
+ required ListDocumentsParams params = 1;
+
+ optional bytes app_id = 2;
+}
+
+message ListDocumentsResponse {
+ required RequestStatus status = 1;
+
+ repeated Document document = 2;
+}
+
+message ListIndexesParams {
+ optional bool fetch_schema = 1;
+ optional int32 limit = 2 [default = 20];
+ optional string namespace = 3;
+ optional string start_index_name = 4;
+ optional bool include_start_index = 5 [default = true];
+ optional string index_name_prefix = 6;
+ optional int32 offset = 7;
+ optional IndexSpec.Source source = 8 [default = SEARCH];
+}
+
+message ListIndexesRequest {
+ required ListIndexesParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message ListIndexesResponse {
+ required RequestStatus status = 1;
+ repeated IndexMetadata index_metadata = 2;
+}
+
+message DeleteSchemaParams {
+ optional IndexSpec.Source source = 1 [default = SEARCH];
+ repeated IndexSpec index_spec = 2;
+}
+
+message DeleteSchemaRequest {
+ required DeleteSchemaParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message DeleteSchemaResponse {
+ repeated RequestStatus status = 1;
+}
+
+message SortSpec {
+ required string sort_expression = 1;
+ optional bool sort_descending = 2 [default = true];
+ optional string default_value_text = 4;
+ optional double default_value_numeric = 5;
+}
+
+message ScorerSpec {
+ enum Scorer {
+ RESCORING_MATCH_SCORER = 0;
+ MATCH_SCORER = 2;
+ }
+ optional Scorer scorer = 1 [default = MATCH_SCORER];
+
+ optional int32 limit = 2 [default = 1000];
+ optional string match_scorer_parameters = 9;
+}
+
+message FieldSpec {
+ repeated string name = 1;
+
+ repeated group Expression = 2 {
+ required string name = 3;
+ required string expression = 4;
+ }
+}
+
+message FacetRange {
+ optional string name = 1;
+ optional string start = 2;
+ optional string end = 3;
+}
+
+message FacetRequestParam {
+ optional int32 value_limit = 1;
+ repeated FacetRange range = 2;
+ repeated string value_constraint = 3;
+}
+
+message FacetAutoDetectParam {
+ optional int32 value_limit = 1 [default = 10];
+}
+
+message FacetRequest {
+ required string name = 1;
+ optional FacetRequestParam params = 2;
+}
+
+message FacetRefinement {
+ required string name = 1;
+ optional string value = 2;
+
+ message Range {
+ optional string start = 1;
+ optional string end = 2;
+ }
+ optional Range range = 3;
+}
+
+message SearchParams {
+ required IndexSpec index_spec = 1;
+ required string query = 2;
+ optional string cursor = 4;
+ optional int32 offset = 11;
+
+ enum CursorType {
+ NONE = 0;
+ SINGLE = 1;
+ PER_RESULT = 2;
+ }
+ optional CursorType cursor_type = 5 [default = NONE];
+
+ optional int32 limit = 6 [default = 20];
+ optional int32 matched_count_accuracy = 7;
+ repeated SortSpec sort_spec = 8;
+ optional ScorerSpec scorer_spec = 9;
+ optional FieldSpec field_spec = 10;
+ optional bool keys_only = 12;
+
+ enum ParsingMode {
+ STRICT = 0;
+ RELAXED = 1;
+ }
+ optional ParsingMode parsing_mode = 13 [default = STRICT];
+
+ optional int32 auto_discover_facet_count = 15 [default = 0];
+ repeated FacetRequest include_facet = 16;
+ repeated FacetRefinement facet_refinement = 17;
+ optional FacetAutoDetectParam facet_auto_detect_param = 18;
+ optional int32 facet_depth = 19 [default=1000];
+}
+
+message SearchRequest {
+ required SearchParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message FacetResultValue {
+ required string name = 1;
+ required int32 count = 2;
+ required FacetRefinement refinement = 3;
+}
+
+message FacetResult {
+ required string name = 1;
+ repeated FacetResultValue value = 2;
+}
+
+message SearchResult {
+ required Document document = 1;
+ repeated Field expression = 4;
+ repeated double score = 2;
+ optional string cursor = 3;
+}
+
+message SearchResponse {
+ repeated SearchResult result = 1;
+ required int64 matched_count = 2;
+ required RequestStatus status = 3;
+ optional string cursor = 4;
+ repeated FacetResult facet_result = 5;
+
+ extensions 1000 to 9999;
+}
diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go
new file mode 100644
index 000000000..60628ec9b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go
@@ -0,0 +1,1858 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/socket/socket_service.proto
+// DO NOT EDIT!
+
+/*
+Package socket is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/socket/socket_service.proto
+
+It has these top-level messages:
+ RemoteSocketServiceError
+ AddressPort
+ CreateSocketRequest
+ CreateSocketReply
+ BindRequest
+ BindReply
+ GetSocketNameRequest
+ GetSocketNameReply
+ GetPeerNameRequest
+ GetPeerNameReply
+ SocketOption
+ SetSocketOptionsRequest
+ SetSocketOptionsReply
+ GetSocketOptionsRequest
+ GetSocketOptionsReply
+ ConnectRequest
+ ConnectReply
+ ListenRequest
+ ListenReply
+ AcceptRequest
+ AcceptReply
+ ShutDownRequest
+ ShutDownReply
+ CloseRequest
+ CloseReply
+ SendRequest
+ SendReply
+ ReceiveRequest
+ ReceiveReply
+ PollEvent
+ PollRequest
+ PollReply
+ ResolveRequest
+ ResolveReply
+*/
+package socket
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type RemoteSocketServiceError_ErrorCode int32
+
+const (
+ RemoteSocketServiceError_SYSTEM_ERROR RemoteSocketServiceError_ErrorCode = 1
+ RemoteSocketServiceError_GAI_ERROR RemoteSocketServiceError_ErrorCode = 2
+ RemoteSocketServiceError_FAILURE RemoteSocketServiceError_ErrorCode = 4
+ RemoteSocketServiceError_PERMISSION_DENIED RemoteSocketServiceError_ErrorCode = 5
+ RemoteSocketServiceError_INVALID_REQUEST RemoteSocketServiceError_ErrorCode = 6
+ RemoteSocketServiceError_SOCKET_CLOSED RemoteSocketServiceError_ErrorCode = 7
+)
+
+var RemoteSocketServiceError_ErrorCode_name = map[int32]string{
+ 1: "SYSTEM_ERROR",
+ 2: "GAI_ERROR",
+ 4: "FAILURE",
+ 5: "PERMISSION_DENIED",
+ 6: "INVALID_REQUEST",
+ 7: "SOCKET_CLOSED",
+}
+var RemoteSocketServiceError_ErrorCode_value = map[string]int32{
+ "SYSTEM_ERROR": 1,
+ "GAI_ERROR": 2,
+ "FAILURE": 4,
+ "PERMISSION_DENIED": 5,
+ "INVALID_REQUEST": 6,
+ "SOCKET_CLOSED": 7,
+}
+
+func (x RemoteSocketServiceError_ErrorCode) Enum() *RemoteSocketServiceError_ErrorCode {
+ p := new(RemoteSocketServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x RemoteSocketServiceError_ErrorCode) String() string {
+ return proto.EnumName(RemoteSocketServiceError_ErrorCode_name, int32(x))
+}
+func (x *RemoteSocketServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_ErrorCode_value, data, "RemoteSocketServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = RemoteSocketServiceError_ErrorCode(value)
+ return nil
+}
+
+type RemoteSocketServiceError_SystemError int32
+
+const (
+ RemoteSocketServiceError_SYS_SUCCESS RemoteSocketServiceError_SystemError = 0
+ RemoteSocketServiceError_SYS_EPERM RemoteSocketServiceError_SystemError = 1
+ RemoteSocketServiceError_SYS_ENOENT RemoteSocketServiceError_SystemError = 2
+ RemoteSocketServiceError_SYS_ESRCH RemoteSocketServiceError_SystemError = 3
+ RemoteSocketServiceError_SYS_EINTR RemoteSocketServiceError_SystemError = 4
+ RemoteSocketServiceError_SYS_EIO RemoteSocketServiceError_SystemError = 5
+ RemoteSocketServiceError_SYS_ENXIO RemoteSocketServiceError_SystemError = 6
+ RemoteSocketServiceError_SYS_E2BIG RemoteSocketServiceError_SystemError = 7
+ RemoteSocketServiceError_SYS_ENOEXEC RemoteSocketServiceError_SystemError = 8
+ RemoteSocketServiceError_SYS_EBADF RemoteSocketServiceError_SystemError = 9
+ RemoteSocketServiceError_SYS_ECHILD RemoteSocketServiceError_SystemError = 10
+ RemoteSocketServiceError_SYS_EAGAIN RemoteSocketServiceError_SystemError = 11
+ RemoteSocketServiceError_SYS_EWOULDBLOCK RemoteSocketServiceError_SystemError = 11
+ RemoteSocketServiceError_SYS_ENOMEM RemoteSocketServiceError_SystemError = 12
+ RemoteSocketServiceError_SYS_EACCES RemoteSocketServiceError_SystemError = 13
+ RemoteSocketServiceError_SYS_EFAULT RemoteSocketServiceError_SystemError = 14
+ RemoteSocketServiceError_SYS_ENOTBLK RemoteSocketServiceError_SystemError = 15
+ RemoteSocketServiceError_SYS_EBUSY RemoteSocketServiceError_SystemError = 16
+ RemoteSocketServiceError_SYS_EEXIST RemoteSocketServiceError_SystemError = 17
+ RemoteSocketServiceError_SYS_EXDEV RemoteSocketServiceError_SystemError = 18
+ RemoteSocketServiceError_SYS_ENODEV RemoteSocketServiceError_SystemError = 19
+ RemoteSocketServiceError_SYS_ENOTDIR RemoteSocketServiceError_SystemError = 20
+ RemoteSocketServiceError_SYS_EISDIR RemoteSocketServiceError_SystemError = 21
+ RemoteSocketServiceError_SYS_EINVAL RemoteSocketServiceError_SystemError = 22
+ RemoteSocketServiceError_SYS_ENFILE RemoteSocketServiceError_SystemError = 23
+ RemoteSocketServiceError_SYS_EMFILE RemoteSocketServiceError_SystemError = 24
+ RemoteSocketServiceError_SYS_ENOTTY RemoteSocketServiceError_SystemError = 25
+ RemoteSocketServiceError_SYS_ETXTBSY RemoteSocketServiceError_SystemError = 26
+ RemoteSocketServiceError_SYS_EFBIG RemoteSocketServiceError_SystemError = 27
+ RemoteSocketServiceError_SYS_ENOSPC RemoteSocketServiceError_SystemError = 28
+ RemoteSocketServiceError_SYS_ESPIPE RemoteSocketServiceError_SystemError = 29
+ RemoteSocketServiceError_SYS_EROFS RemoteSocketServiceError_SystemError = 30
+ RemoteSocketServiceError_SYS_EMLINK RemoteSocketServiceError_SystemError = 31
+ RemoteSocketServiceError_SYS_EPIPE RemoteSocketServiceError_SystemError = 32
+ RemoteSocketServiceError_SYS_EDOM RemoteSocketServiceError_SystemError = 33
+ RemoteSocketServiceError_SYS_ERANGE RemoteSocketServiceError_SystemError = 34
+ RemoteSocketServiceError_SYS_EDEADLK RemoteSocketServiceError_SystemError = 35
+ RemoteSocketServiceError_SYS_EDEADLOCK RemoteSocketServiceError_SystemError = 35
+ RemoteSocketServiceError_SYS_ENAMETOOLONG RemoteSocketServiceError_SystemError = 36
+ RemoteSocketServiceError_SYS_ENOLCK RemoteSocketServiceError_SystemError = 37
+ RemoteSocketServiceError_SYS_ENOSYS RemoteSocketServiceError_SystemError = 38
+ RemoteSocketServiceError_SYS_ENOTEMPTY RemoteSocketServiceError_SystemError = 39
+ RemoteSocketServiceError_SYS_ELOOP RemoteSocketServiceError_SystemError = 40
+ RemoteSocketServiceError_SYS_ENOMSG RemoteSocketServiceError_SystemError = 42
+ RemoteSocketServiceError_SYS_EIDRM RemoteSocketServiceError_SystemError = 43
+ RemoteSocketServiceError_SYS_ECHRNG RemoteSocketServiceError_SystemError = 44
+ RemoteSocketServiceError_SYS_EL2NSYNC RemoteSocketServiceError_SystemError = 45
+ RemoteSocketServiceError_SYS_EL3HLT RemoteSocketServiceError_SystemError = 46
+ RemoteSocketServiceError_SYS_EL3RST RemoteSocketServiceError_SystemError = 47
+ RemoteSocketServiceError_SYS_ELNRNG RemoteSocketServiceError_SystemError = 48
+ RemoteSocketServiceError_SYS_EUNATCH RemoteSocketServiceError_SystemError = 49
+ RemoteSocketServiceError_SYS_ENOCSI RemoteSocketServiceError_SystemError = 50
+ RemoteSocketServiceError_SYS_EL2HLT RemoteSocketServiceError_SystemError = 51
+ RemoteSocketServiceError_SYS_EBADE RemoteSocketServiceError_SystemError = 52
+ RemoteSocketServiceError_SYS_EBADR RemoteSocketServiceError_SystemError = 53
+ RemoteSocketServiceError_SYS_EXFULL RemoteSocketServiceError_SystemError = 54
+ RemoteSocketServiceError_SYS_ENOANO RemoteSocketServiceError_SystemError = 55
+ RemoteSocketServiceError_SYS_EBADRQC RemoteSocketServiceError_SystemError = 56
+ RemoteSocketServiceError_SYS_EBADSLT RemoteSocketServiceError_SystemError = 57
+ RemoteSocketServiceError_SYS_EBFONT RemoteSocketServiceError_SystemError = 59
+ RemoteSocketServiceError_SYS_ENOSTR RemoteSocketServiceError_SystemError = 60
+ RemoteSocketServiceError_SYS_ENODATA RemoteSocketServiceError_SystemError = 61
+ RemoteSocketServiceError_SYS_ETIME RemoteSocketServiceError_SystemError = 62
+ RemoteSocketServiceError_SYS_ENOSR RemoteSocketServiceError_SystemError = 63
+ RemoteSocketServiceError_SYS_ENONET RemoteSocketServiceError_SystemError = 64
+ RemoteSocketServiceError_SYS_ENOPKG RemoteSocketServiceError_SystemError = 65
+ RemoteSocketServiceError_SYS_EREMOTE RemoteSocketServiceError_SystemError = 66
+ RemoteSocketServiceError_SYS_ENOLINK RemoteSocketServiceError_SystemError = 67
+ RemoteSocketServiceError_SYS_EADV RemoteSocketServiceError_SystemError = 68
+ RemoteSocketServiceError_SYS_ESRMNT RemoteSocketServiceError_SystemError = 69
+ RemoteSocketServiceError_SYS_ECOMM RemoteSocketServiceError_SystemError = 70
+ RemoteSocketServiceError_SYS_EPROTO RemoteSocketServiceError_SystemError = 71
+ RemoteSocketServiceError_SYS_EMULTIHOP RemoteSocketServiceError_SystemError = 72
+ RemoteSocketServiceError_SYS_EDOTDOT RemoteSocketServiceError_SystemError = 73
+ RemoteSocketServiceError_SYS_EBADMSG RemoteSocketServiceError_SystemError = 74
+ RemoteSocketServiceError_SYS_EOVERFLOW RemoteSocketServiceError_SystemError = 75
+ RemoteSocketServiceError_SYS_ENOTUNIQ RemoteSocketServiceError_SystemError = 76
+ RemoteSocketServiceError_SYS_EBADFD RemoteSocketServiceError_SystemError = 77
+ RemoteSocketServiceError_SYS_EREMCHG RemoteSocketServiceError_SystemError = 78
+ RemoteSocketServiceError_SYS_ELIBACC RemoteSocketServiceError_SystemError = 79
+ RemoteSocketServiceError_SYS_ELIBBAD RemoteSocketServiceError_SystemError = 80
+ RemoteSocketServiceError_SYS_ELIBSCN RemoteSocketServiceError_SystemError = 81
+ RemoteSocketServiceError_SYS_ELIBMAX RemoteSocketServiceError_SystemError = 82
+ RemoteSocketServiceError_SYS_ELIBEXEC RemoteSocketServiceError_SystemError = 83
+ RemoteSocketServiceError_SYS_EILSEQ RemoteSocketServiceError_SystemError = 84
+ RemoteSocketServiceError_SYS_ERESTART RemoteSocketServiceError_SystemError = 85
+ RemoteSocketServiceError_SYS_ESTRPIPE RemoteSocketServiceError_SystemError = 86
+ RemoteSocketServiceError_SYS_EUSERS RemoteSocketServiceError_SystemError = 87
+ RemoteSocketServiceError_SYS_ENOTSOCK RemoteSocketServiceError_SystemError = 88
+ RemoteSocketServiceError_SYS_EDESTADDRREQ RemoteSocketServiceError_SystemError = 89
+ RemoteSocketServiceError_SYS_EMSGSIZE RemoteSocketServiceError_SystemError = 90
+ RemoteSocketServiceError_SYS_EPROTOTYPE RemoteSocketServiceError_SystemError = 91
+ RemoteSocketServiceError_SYS_ENOPROTOOPT RemoteSocketServiceError_SystemError = 92
+ RemoteSocketServiceError_SYS_EPROTONOSUPPORT RemoteSocketServiceError_SystemError = 93
+ RemoteSocketServiceError_SYS_ESOCKTNOSUPPORT RemoteSocketServiceError_SystemError = 94
+ RemoteSocketServiceError_SYS_EOPNOTSUPP RemoteSocketServiceError_SystemError = 95
+ RemoteSocketServiceError_SYS_ENOTSUP RemoteSocketServiceError_SystemError = 95
+ RemoteSocketServiceError_SYS_EPFNOSUPPORT RemoteSocketServiceError_SystemError = 96
+ RemoteSocketServiceError_SYS_EAFNOSUPPORT RemoteSocketServiceError_SystemError = 97
+ RemoteSocketServiceError_SYS_EADDRINUSE RemoteSocketServiceError_SystemError = 98
+ RemoteSocketServiceError_SYS_EADDRNOTAVAIL RemoteSocketServiceError_SystemError = 99
+ RemoteSocketServiceError_SYS_ENETDOWN RemoteSocketServiceError_SystemError = 100
+ RemoteSocketServiceError_SYS_ENETUNREACH RemoteSocketServiceError_SystemError = 101
+ RemoteSocketServiceError_SYS_ENETRESET RemoteSocketServiceError_SystemError = 102
+ RemoteSocketServiceError_SYS_ECONNABORTED RemoteSocketServiceError_SystemError = 103
+ RemoteSocketServiceError_SYS_ECONNRESET RemoteSocketServiceError_SystemError = 104
+ RemoteSocketServiceError_SYS_ENOBUFS RemoteSocketServiceError_SystemError = 105
+ RemoteSocketServiceError_SYS_EISCONN RemoteSocketServiceError_SystemError = 106
+ RemoteSocketServiceError_SYS_ENOTCONN RemoteSocketServiceError_SystemError = 107
+ RemoteSocketServiceError_SYS_ESHUTDOWN RemoteSocketServiceError_SystemError = 108
+ RemoteSocketServiceError_SYS_ETOOMANYREFS RemoteSocketServiceError_SystemError = 109
+ RemoteSocketServiceError_SYS_ETIMEDOUT RemoteSocketServiceError_SystemError = 110
+ RemoteSocketServiceError_SYS_ECONNREFUSED RemoteSocketServiceError_SystemError = 111
+ RemoteSocketServiceError_SYS_EHOSTDOWN RemoteSocketServiceError_SystemError = 112
+ RemoteSocketServiceError_SYS_EHOSTUNREACH RemoteSocketServiceError_SystemError = 113
+ RemoteSocketServiceError_SYS_EALREADY RemoteSocketServiceError_SystemError = 114
+ RemoteSocketServiceError_SYS_EINPROGRESS RemoteSocketServiceError_SystemError = 115
+ RemoteSocketServiceError_SYS_ESTALE RemoteSocketServiceError_SystemError = 116
+ RemoteSocketServiceError_SYS_EUCLEAN RemoteSocketServiceError_SystemError = 117
+ RemoteSocketServiceError_SYS_ENOTNAM RemoteSocketServiceError_SystemError = 118
+ RemoteSocketServiceError_SYS_ENAVAIL RemoteSocketServiceError_SystemError = 119
+ RemoteSocketServiceError_SYS_EISNAM RemoteSocketServiceError_SystemError = 120
+ RemoteSocketServiceError_SYS_EREMOTEIO RemoteSocketServiceError_SystemError = 121
+ RemoteSocketServiceError_SYS_EDQUOT RemoteSocketServiceError_SystemError = 122
+ RemoteSocketServiceError_SYS_ENOMEDIUM RemoteSocketServiceError_SystemError = 123
+ RemoteSocketServiceError_SYS_EMEDIUMTYPE RemoteSocketServiceError_SystemError = 124
+ RemoteSocketServiceError_SYS_ECANCELED RemoteSocketServiceError_SystemError = 125
+ RemoteSocketServiceError_SYS_ENOKEY RemoteSocketServiceError_SystemError = 126
+ RemoteSocketServiceError_SYS_EKEYEXPIRED RemoteSocketServiceError_SystemError = 127
+ RemoteSocketServiceError_SYS_EKEYREVOKED RemoteSocketServiceError_SystemError = 128
+ RemoteSocketServiceError_SYS_EKEYREJECTED RemoteSocketServiceError_SystemError = 129
+ RemoteSocketServiceError_SYS_EOWNERDEAD RemoteSocketServiceError_SystemError = 130
+ RemoteSocketServiceError_SYS_ENOTRECOVERABLE RemoteSocketServiceError_SystemError = 131
+ RemoteSocketServiceError_SYS_ERFKILL RemoteSocketServiceError_SystemError = 132
+)
+
+var RemoteSocketServiceError_SystemError_name = map[int32]string{
+ 0: "SYS_SUCCESS",
+ 1: "SYS_EPERM",
+ 2: "SYS_ENOENT",
+ 3: "SYS_ESRCH",
+ 4: "SYS_EINTR",
+ 5: "SYS_EIO",
+ 6: "SYS_ENXIO",
+ 7: "SYS_E2BIG",
+ 8: "SYS_ENOEXEC",
+ 9: "SYS_EBADF",
+ 10: "SYS_ECHILD",
+ 11: "SYS_EAGAIN",
+ // Duplicate value: 11: "SYS_EWOULDBLOCK",
+ 12: "SYS_ENOMEM",
+ 13: "SYS_EACCES",
+ 14: "SYS_EFAULT",
+ 15: "SYS_ENOTBLK",
+ 16: "SYS_EBUSY",
+ 17: "SYS_EEXIST",
+ 18: "SYS_EXDEV",
+ 19: "SYS_ENODEV",
+ 20: "SYS_ENOTDIR",
+ 21: "SYS_EISDIR",
+ 22: "SYS_EINVAL",
+ 23: "SYS_ENFILE",
+ 24: "SYS_EMFILE",
+ 25: "SYS_ENOTTY",
+ 26: "SYS_ETXTBSY",
+ 27: "SYS_EFBIG",
+ 28: "SYS_ENOSPC",
+ 29: "SYS_ESPIPE",
+ 30: "SYS_EROFS",
+ 31: "SYS_EMLINK",
+ 32: "SYS_EPIPE",
+ 33: "SYS_EDOM",
+ 34: "SYS_ERANGE",
+ 35: "SYS_EDEADLK",
+ // Duplicate value: 35: "SYS_EDEADLOCK",
+ 36: "SYS_ENAMETOOLONG",
+ 37: "SYS_ENOLCK",
+ 38: "SYS_ENOSYS",
+ 39: "SYS_ENOTEMPTY",
+ 40: "SYS_ELOOP",
+ 42: "SYS_ENOMSG",
+ 43: "SYS_EIDRM",
+ 44: "SYS_ECHRNG",
+ 45: "SYS_EL2NSYNC",
+ 46: "SYS_EL3HLT",
+ 47: "SYS_EL3RST",
+ 48: "SYS_ELNRNG",
+ 49: "SYS_EUNATCH",
+ 50: "SYS_ENOCSI",
+ 51: "SYS_EL2HLT",
+ 52: "SYS_EBADE",
+ 53: "SYS_EBADR",
+ 54: "SYS_EXFULL",
+ 55: "SYS_ENOANO",
+ 56: "SYS_EBADRQC",
+ 57: "SYS_EBADSLT",
+ 59: "SYS_EBFONT",
+ 60: "SYS_ENOSTR",
+ 61: "SYS_ENODATA",
+ 62: "SYS_ETIME",
+ 63: "SYS_ENOSR",
+ 64: "SYS_ENONET",
+ 65: "SYS_ENOPKG",
+ 66: "SYS_EREMOTE",
+ 67: "SYS_ENOLINK",
+ 68: "SYS_EADV",
+ 69: "SYS_ESRMNT",
+ 70: "SYS_ECOMM",
+ 71: "SYS_EPROTO",
+ 72: "SYS_EMULTIHOP",
+ 73: "SYS_EDOTDOT",
+ 74: "SYS_EBADMSG",
+ 75: "SYS_EOVERFLOW",
+ 76: "SYS_ENOTUNIQ",
+ 77: "SYS_EBADFD",
+ 78: "SYS_EREMCHG",
+ 79: "SYS_ELIBACC",
+ 80: "SYS_ELIBBAD",
+ 81: "SYS_ELIBSCN",
+ 82: "SYS_ELIBMAX",
+ 83: "SYS_ELIBEXEC",
+ 84: "SYS_EILSEQ",
+ 85: "SYS_ERESTART",
+ 86: "SYS_ESTRPIPE",
+ 87: "SYS_EUSERS",
+ 88: "SYS_ENOTSOCK",
+ 89: "SYS_EDESTADDRREQ",
+ 90: "SYS_EMSGSIZE",
+ 91: "SYS_EPROTOTYPE",
+ 92: "SYS_ENOPROTOOPT",
+ 93: "SYS_EPROTONOSUPPORT",
+ 94: "SYS_ESOCKTNOSUPPORT",
+ 95: "SYS_EOPNOTSUPP",
+ // Duplicate value: 95: "SYS_ENOTSUP",
+ 96: "SYS_EPFNOSUPPORT",
+ 97: "SYS_EAFNOSUPPORT",
+ 98: "SYS_EADDRINUSE",
+ 99: "SYS_EADDRNOTAVAIL",
+ 100: "SYS_ENETDOWN",
+ 101: "SYS_ENETUNREACH",
+ 102: "SYS_ENETRESET",
+ 103: "SYS_ECONNABORTED",
+ 104: "SYS_ECONNRESET",
+ 105: "SYS_ENOBUFS",
+ 106: "SYS_EISCONN",
+ 107: "SYS_ENOTCONN",
+ 108: "SYS_ESHUTDOWN",
+ 109: "SYS_ETOOMANYREFS",
+ 110: "SYS_ETIMEDOUT",
+ 111: "SYS_ECONNREFUSED",
+ 112: "SYS_EHOSTDOWN",
+ 113: "SYS_EHOSTUNREACH",
+ 114: "SYS_EALREADY",
+ 115: "SYS_EINPROGRESS",
+ 116: "SYS_ESTALE",
+ 117: "SYS_EUCLEAN",
+ 118: "SYS_ENOTNAM",
+ 119: "SYS_ENAVAIL",
+ 120: "SYS_EISNAM",
+ 121: "SYS_EREMOTEIO",
+ 122: "SYS_EDQUOT",
+ 123: "SYS_ENOMEDIUM",
+ 124: "SYS_EMEDIUMTYPE",
+ 125: "SYS_ECANCELED",
+ 126: "SYS_ENOKEY",
+ 127: "SYS_EKEYEXPIRED",
+ 128: "SYS_EKEYREVOKED",
+ 129: "SYS_EKEYREJECTED",
+ 130: "SYS_EOWNERDEAD",
+ 131: "SYS_ENOTRECOVERABLE",
+ 132: "SYS_ERFKILL",
+}
+var RemoteSocketServiceError_SystemError_value = map[string]int32{
+ "SYS_SUCCESS": 0,
+ "SYS_EPERM": 1,
+ "SYS_ENOENT": 2,
+ "SYS_ESRCH": 3,
+ "SYS_EINTR": 4,
+ "SYS_EIO": 5,
+ "SYS_ENXIO": 6,
+ "SYS_E2BIG": 7,
+ "SYS_ENOEXEC": 8,
+ "SYS_EBADF": 9,
+ "SYS_ECHILD": 10,
+ "SYS_EAGAIN": 11,
+ "SYS_EWOULDBLOCK": 11,
+ "SYS_ENOMEM": 12,
+ "SYS_EACCES": 13,
+ "SYS_EFAULT": 14,
+ "SYS_ENOTBLK": 15,
+ "SYS_EBUSY": 16,
+ "SYS_EEXIST": 17,
+ "SYS_EXDEV": 18,
+ "SYS_ENODEV": 19,
+ "SYS_ENOTDIR": 20,
+ "SYS_EISDIR": 21,
+ "SYS_EINVAL": 22,
+ "SYS_ENFILE": 23,
+ "SYS_EMFILE": 24,
+ "SYS_ENOTTY": 25,
+ "SYS_ETXTBSY": 26,
+ "SYS_EFBIG": 27,
+ "SYS_ENOSPC": 28,
+ "SYS_ESPIPE": 29,
+ "SYS_EROFS": 30,
+ "SYS_EMLINK": 31,
+ "SYS_EPIPE": 32,
+ "SYS_EDOM": 33,
+ "SYS_ERANGE": 34,
+ "SYS_EDEADLK": 35,
+ "SYS_EDEADLOCK": 35,
+ "SYS_ENAMETOOLONG": 36,
+ "SYS_ENOLCK": 37,
+ "SYS_ENOSYS": 38,
+ "SYS_ENOTEMPTY": 39,
+ "SYS_ELOOP": 40,
+ "SYS_ENOMSG": 42,
+ "SYS_EIDRM": 43,
+ "SYS_ECHRNG": 44,
+ "SYS_EL2NSYNC": 45,
+ "SYS_EL3HLT": 46,
+ "SYS_EL3RST": 47,
+ "SYS_ELNRNG": 48,
+ "SYS_EUNATCH": 49,
+ "SYS_ENOCSI": 50,
+ "SYS_EL2HLT": 51,
+ "SYS_EBADE": 52,
+ "SYS_EBADR": 53,
+ "SYS_EXFULL": 54,
+ "SYS_ENOANO": 55,
+ "SYS_EBADRQC": 56,
+ "SYS_EBADSLT": 57,
+ "SYS_EBFONT": 59,
+ "SYS_ENOSTR": 60,
+ "SYS_ENODATA": 61,
+ "SYS_ETIME": 62,
+ "SYS_ENOSR": 63,
+ "SYS_ENONET": 64,
+ "SYS_ENOPKG": 65,
+ "SYS_EREMOTE": 66,
+ "SYS_ENOLINK": 67,
+ "SYS_EADV": 68,
+ "SYS_ESRMNT": 69,
+ "SYS_ECOMM": 70,
+ "SYS_EPROTO": 71,
+ "SYS_EMULTIHOP": 72,
+ "SYS_EDOTDOT": 73,
+ "SYS_EBADMSG": 74,
+ "SYS_EOVERFLOW": 75,
+ "SYS_ENOTUNIQ": 76,
+ "SYS_EBADFD": 77,
+ "SYS_EREMCHG": 78,
+ "SYS_ELIBACC": 79,
+ "SYS_ELIBBAD": 80,
+ "SYS_ELIBSCN": 81,
+ "SYS_ELIBMAX": 82,
+ "SYS_ELIBEXEC": 83,
+ "SYS_EILSEQ": 84,
+ "SYS_ERESTART": 85,
+ "SYS_ESTRPIPE": 86,
+ "SYS_EUSERS": 87,
+ "SYS_ENOTSOCK": 88,
+ "SYS_EDESTADDRREQ": 89,
+ "SYS_EMSGSIZE": 90,
+ "SYS_EPROTOTYPE": 91,
+ "SYS_ENOPROTOOPT": 92,
+ "SYS_EPROTONOSUPPORT": 93,
+ "SYS_ESOCKTNOSUPPORT": 94,
+ "SYS_EOPNOTSUPP": 95,
+ "SYS_ENOTSUP": 95,
+ "SYS_EPFNOSUPPORT": 96,
+ "SYS_EAFNOSUPPORT": 97,
+ "SYS_EADDRINUSE": 98,
+ "SYS_EADDRNOTAVAIL": 99,
+ "SYS_ENETDOWN": 100,
+ "SYS_ENETUNREACH": 101,
+ "SYS_ENETRESET": 102,
+ "SYS_ECONNABORTED": 103,
+ "SYS_ECONNRESET": 104,
+ "SYS_ENOBUFS": 105,
+ "SYS_EISCONN": 106,
+ "SYS_ENOTCONN": 107,
+ "SYS_ESHUTDOWN": 108,
+ "SYS_ETOOMANYREFS": 109,
+ "SYS_ETIMEDOUT": 110,
+ "SYS_ECONNREFUSED": 111,
+ "SYS_EHOSTDOWN": 112,
+ "SYS_EHOSTUNREACH": 113,
+ "SYS_EALREADY": 114,
+ "SYS_EINPROGRESS": 115,
+ "SYS_ESTALE": 116,
+ "SYS_EUCLEAN": 117,
+ "SYS_ENOTNAM": 118,
+ "SYS_ENAVAIL": 119,
+ "SYS_EISNAM": 120,
+ "SYS_EREMOTEIO": 121,
+ "SYS_EDQUOT": 122,
+ "SYS_ENOMEDIUM": 123,
+ "SYS_EMEDIUMTYPE": 124,
+ "SYS_ECANCELED": 125,
+ "SYS_ENOKEY": 126,
+ "SYS_EKEYEXPIRED": 127,
+ "SYS_EKEYREVOKED": 128,
+ "SYS_EKEYREJECTED": 129,
+ "SYS_EOWNERDEAD": 130,
+ "SYS_ENOTRECOVERABLE": 131,
+ "SYS_ERFKILL": 132,
+}
+
+func (x RemoteSocketServiceError_SystemError) Enum() *RemoteSocketServiceError_SystemError {
+ p := new(RemoteSocketServiceError_SystemError)
+ *p = x
+ return p
+}
+func (x RemoteSocketServiceError_SystemError) String() string {
+ return proto.EnumName(RemoteSocketServiceError_SystemError_name, int32(x))
+}
+func (x *RemoteSocketServiceError_SystemError) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_SystemError_value, data, "RemoteSocketServiceError_SystemError")
+ if err != nil {
+ return err
+ }
+ *x = RemoteSocketServiceError_SystemError(value)
+ return nil
+}
+
+type CreateSocketRequest_SocketFamily int32
+
+const (
+ CreateSocketRequest_IPv4 CreateSocketRequest_SocketFamily = 1
+ CreateSocketRequest_IPv6 CreateSocketRequest_SocketFamily = 2
+)
+
+var CreateSocketRequest_SocketFamily_name = map[int32]string{
+ 1: "IPv4",
+ 2: "IPv6",
+}
+var CreateSocketRequest_SocketFamily_value = map[string]int32{
+ "IPv4": 1,
+ "IPv6": 2,
+}
+
+func (x CreateSocketRequest_SocketFamily) Enum() *CreateSocketRequest_SocketFamily {
+ p := new(CreateSocketRequest_SocketFamily)
+ *p = x
+ return p
+}
+func (x CreateSocketRequest_SocketFamily) String() string {
+ return proto.EnumName(CreateSocketRequest_SocketFamily_name, int32(x))
+}
+func (x *CreateSocketRequest_SocketFamily) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketFamily_value, data, "CreateSocketRequest_SocketFamily")
+ if err != nil {
+ return err
+ }
+ *x = CreateSocketRequest_SocketFamily(value)
+ return nil
+}
+
+type CreateSocketRequest_SocketProtocol int32
+
+const (
+ CreateSocketRequest_TCP CreateSocketRequest_SocketProtocol = 1
+ CreateSocketRequest_UDP CreateSocketRequest_SocketProtocol = 2
+)
+
+var CreateSocketRequest_SocketProtocol_name = map[int32]string{
+ 1: "TCP",
+ 2: "UDP",
+}
+var CreateSocketRequest_SocketProtocol_value = map[string]int32{
+ "TCP": 1,
+ "UDP": 2,
+}
+
+func (x CreateSocketRequest_SocketProtocol) Enum() *CreateSocketRequest_SocketProtocol {
+ p := new(CreateSocketRequest_SocketProtocol)
+ *p = x
+ return p
+}
+func (x CreateSocketRequest_SocketProtocol) String() string {
+ return proto.EnumName(CreateSocketRequest_SocketProtocol_name, int32(x))
+}
+func (x *CreateSocketRequest_SocketProtocol) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketProtocol_value, data, "CreateSocketRequest_SocketProtocol")
+ if err != nil {
+ return err
+ }
+ *x = CreateSocketRequest_SocketProtocol(value)
+ return nil
+}
+
+type SocketOption_SocketOptionLevel int32
+
+const (
+ SocketOption_SOCKET_SOL_IP SocketOption_SocketOptionLevel = 0
+ SocketOption_SOCKET_SOL_SOCKET SocketOption_SocketOptionLevel = 1
+ SocketOption_SOCKET_SOL_TCP SocketOption_SocketOptionLevel = 6
+ SocketOption_SOCKET_SOL_UDP SocketOption_SocketOptionLevel = 17
+)
+
+var SocketOption_SocketOptionLevel_name = map[int32]string{
+ 0: "SOCKET_SOL_IP",
+ 1: "SOCKET_SOL_SOCKET",
+ 6: "SOCKET_SOL_TCP",
+ 17: "SOCKET_SOL_UDP",
+}
+var SocketOption_SocketOptionLevel_value = map[string]int32{
+ "SOCKET_SOL_IP": 0,
+ "SOCKET_SOL_SOCKET": 1,
+ "SOCKET_SOL_TCP": 6,
+ "SOCKET_SOL_UDP": 17,
+}
+
+func (x SocketOption_SocketOptionLevel) Enum() *SocketOption_SocketOptionLevel {
+ p := new(SocketOption_SocketOptionLevel)
+ *p = x
+ return p
+}
+func (x SocketOption_SocketOptionLevel) String() string {
+ return proto.EnumName(SocketOption_SocketOptionLevel_name, int32(x))
+}
+func (x *SocketOption_SocketOptionLevel) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionLevel_value, data, "SocketOption_SocketOptionLevel")
+ if err != nil {
+ return err
+ }
+ *x = SocketOption_SocketOptionLevel(value)
+ return nil
+}
+
+type SocketOption_SocketOptionName int32
+
+const (
+ SocketOption_SOCKET_SO_DEBUG SocketOption_SocketOptionName = 1
+ SocketOption_SOCKET_SO_REUSEADDR SocketOption_SocketOptionName = 2
+ SocketOption_SOCKET_SO_TYPE SocketOption_SocketOptionName = 3
+ SocketOption_SOCKET_SO_ERROR SocketOption_SocketOptionName = 4
+ SocketOption_SOCKET_SO_DONTROUTE SocketOption_SocketOptionName = 5
+ SocketOption_SOCKET_SO_BROADCAST SocketOption_SocketOptionName = 6
+ SocketOption_SOCKET_SO_SNDBUF SocketOption_SocketOptionName = 7
+ SocketOption_SOCKET_SO_RCVBUF SocketOption_SocketOptionName = 8
+ SocketOption_SOCKET_SO_KEEPALIVE SocketOption_SocketOptionName = 9
+ SocketOption_SOCKET_SO_OOBINLINE SocketOption_SocketOptionName = 10
+ SocketOption_SOCKET_SO_LINGER SocketOption_SocketOptionName = 13
+ SocketOption_SOCKET_SO_RCVTIMEO SocketOption_SocketOptionName = 20
+ SocketOption_SOCKET_SO_SNDTIMEO SocketOption_SocketOptionName = 21
+ SocketOption_SOCKET_IP_TOS SocketOption_SocketOptionName = 1
+ SocketOption_SOCKET_IP_TTL SocketOption_SocketOptionName = 2
+ SocketOption_SOCKET_IP_HDRINCL SocketOption_SocketOptionName = 3
+ SocketOption_SOCKET_IP_OPTIONS SocketOption_SocketOptionName = 4
+ SocketOption_SOCKET_TCP_NODELAY SocketOption_SocketOptionName = 1
+ SocketOption_SOCKET_TCP_MAXSEG SocketOption_SocketOptionName = 2
+ SocketOption_SOCKET_TCP_CORK SocketOption_SocketOptionName = 3
+ SocketOption_SOCKET_TCP_KEEPIDLE SocketOption_SocketOptionName = 4
+ SocketOption_SOCKET_TCP_KEEPINTVL SocketOption_SocketOptionName = 5
+ SocketOption_SOCKET_TCP_KEEPCNT SocketOption_SocketOptionName = 6
+ SocketOption_SOCKET_TCP_SYNCNT SocketOption_SocketOptionName = 7
+ SocketOption_SOCKET_TCP_LINGER2 SocketOption_SocketOptionName = 8
+ SocketOption_SOCKET_TCP_DEFER_ACCEPT SocketOption_SocketOptionName = 9
+ SocketOption_SOCKET_TCP_WINDOW_CLAMP SocketOption_SocketOptionName = 10
+ SocketOption_SOCKET_TCP_INFO SocketOption_SocketOptionName = 11
+ SocketOption_SOCKET_TCP_QUICKACK SocketOption_SocketOptionName = 12
+)
+
+var SocketOption_SocketOptionName_name = map[int32]string{
+ 1: "SOCKET_SO_DEBUG",
+ 2: "SOCKET_SO_REUSEADDR",
+ 3: "SOCKET_SO_TYPE",
+ 4: "SOCKET_SO_ERROR",
+ 5: "SOCKET_SO_DONTROUTE",
+ 6: "SOCKET_SO_BROADCAST",
+ 7: "SOCKET_SO_SNDBUF",
+ 8: "SOCKET_SO_RCVBUF",
+ 9: "SOCKET_SO_KEEPALIVE",
+ 10: "SOCKET_SO_OOBINLINE",
+ 13: "SOCKET_SO_LINGER",
+ 20: "SOCKET_SO_RCVTIMEO",
+ 21: "SOCKET_SO_SNDTIMEO",
+ // Duplicate value: 1: "SOCKET_IP_TOS",
+ // Duplicate value: 2: "SOCKET_IP_TTL",
+ // Duplicate value: 3: "SOCKET_IP_HDRINCL",
+ // Duplicate value: 4: "SOCKET_IP_OPTIONS",
+ // Duplicate value: 1: "SOCKET_TCP_NODELAY",
+ // Duplicate value: 2: "SOCKET_TCP_MAXSEG",
+ // Duplicate value: 3: "SOCKET_TCP_CORK",
+ // Duplicate value: 4: "SOCKET_TCP_KEEPIDLE",
+ // Duplicate value: 5: "SOCKET_TCP_KEEPINTVL",
+ // Duplicate value: 6: "SOCKET_TCP_KEEPCNT",
+ // Duplicate value: 7: "SOCKET_TCP_SYNCNT",
+ // Duplicate value: 8: "SOCKET_TCP_LINGER2",
+ // Duplicate value: 9: "SOCKET_TCP_DEFER_ACCEPT",
+ // Duplicate value: 10: "SOCKET_TCP_WINDOW_CLAMP",
+ 11: "SOCKET_TCP_INFO",
+ 12: "SOCKET_TCP_QUICKACK",
+}
+var SocketOption_SocketOptionName_value = map[string]int32{
+ "SOCKET_SO_DEBUG": 1,
+ "SOCKET_SO_REUSEADDR": 2,
+ "SOCKET_SO_TYPE": 3,
+ "SOCKET_SO_ERROR": 4,
+ "SOCKET_SO_DONTROUTE": 5,
+ "SOCKET_SO_BROADCAST": 6,
+ "SOCKET_SO_SNDBUF": 7,
+ "SOCKET_SO_RCVBUF": 8,
+ "SOCKET_SO_KEEPALIVE": 9,
+ "SOCKET_SO_OOBINLINE": 10,
+ "SOCKET_SO_LINGER": 13,
+ "SOCKET_SO_RCVTIMEO": 20,
+ "SOCKET_SO_SNDTIMEO": 21,
+ "SOCKET_IP_TOS": 1,
+ "SOCKET_IP_TTL": 2,
+ "SOCKET_IP_HDRINCL": 3,
+ "SOCKET_IP_OPTIONS": 4,
+ "SOCKET_TCP_NODELAY": 1,
+ "SOCKET_TCP_MAXSEG": 2,
+ "SOCKET_TCP_CORK": 3,
+ "SOCKET_TCP_KEEPIDLE": 4,
+ "SOCKET_TCP_KEEPINTVL": 5,
+ "SOCKET_TCP_KEEPCNT": 6,
+ "SOCKET_TCP_SYNCNT": 7,
+ "SOCKET_TCP_LINGER2": 8,
+ "SOCKET_TCP_DEFER_ACCEPT": 9,
+ "SOCKET_TCP_WINDOW_CLAMP": 10,
+ "SOCKET_TCP_INFO": 11,
+ "SOCKET_TCP_QUICKACK": 12,
+}
+
+func (x SocketOption_SocketOptionName) Enum() *SocketOption_SocketOptionName {
+ p := new(SocketOption_SocketOptionName)
+ *p = x
+ return p
+}
+func (x SocketOption_SocketOptionName) String() string {
+ return proto.EnumName(SocketOption_SocketOptionName_name, int32(x))
+}
+func (x *SocketOption_SocketOptionName) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionName_value, data, "SocketOption_SocketOptionName")
+ if err != nil {
+ return err
+ }
+ *x = SocketOption_SocketOptionName(value)
+ return nil
+}
+
+type ShutDownRequest_How int32
+
+const (
+ ShutDownRequest_SOCKET_SHUT_RD ShutDownRequest_How = 1
+ ShutDownRequest_SOCKET_SHUT_WR ShutDownRequest_How = 2
+ ShutDownRequest_SOCKET_SHUT_RDWR ShutDownRequest_How = 3
+)
+
+var ShutDownRequest_How_name = map[int32]string{
+ 1: "SOCKET_SHUT_RD",
+ 2: "SOCKET_SHUT_WR",
+ 3: "SOCKET_SHUT_RDWR",
+}
+var ShutDownRequest_How_value = map[string]int32{
+ "SOCKET_SHUT_RD": 1,
+ "SOCKET_SHUT_WR": 2,
+ "SOCKET_SHUT_RDWR": 3,
+}
+
+func (x ShutDownRequest_How) Enum() *ShutDownRequest_How {
+ p := new(ShutDownRequest_How)
+ *p = x
+ return p
+}
+func (x ShutDownRequest_How) String() string {
+ return proto.EnumName(ShutDownRequest_How_name, int32(x))
+}
+func (x *ShutDownRequest_How) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ShutDownRequest_How_value, data, "ShutDownRequest_How")
+ if err != nil {
+ return err
+ }
+ *x = ShutDownRequest_How(value)
+ return nil
+}
+
+type ReceiveRequest_Flags int32
+
+const (
+ ReceiveRequest_MSG_OOB ReceiveRequest_Flags = 1
+ ReceiveRequest_MSG_PEEK ReceiveRequest_Flags = 2
+)
+
+var ReceiveRequest_Flags_name = map[int32]string{
+ 1: "MSG_OOB",
+ 2: "MSG_PEEK",
+}
+var ReceiveRequest_Flags_value = map[string]int32{
+ "MSG_OOB": 1,
+ "MSG_PEEK": 2,
+}
+
+func (x ReceiveRequest_Flags) Enum() *ReceiveRequest_Flags {
+ p := new(ReceiveRequest_Flags)
+ *p = x
+ return p
+}
+func (x ReceiveRequest_Flags) String() string {
+ return proto.EnumName(ReceiveRequest_Flags_name, int32(x))
+}
+func (x *ReceiveRequest_Flags) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ReceiveRequest_Flags_value, data, "ReceiveRequest_Flags")
+ if err != nil {
+ return err
+ }
+ *x = ReceiveRequest_Flags(value)
+ return nil
+}
+
+type PollEvent_PollEventFlag int32
+
+const (
+ PollEvent_SOCKET_POLLNONE PollEvent_PollEventFlag = 0
+ PollEvent_SOCKET_POLLIN PollEvent_PollEventFlag = 1
+ PollEvent_SOCKET_POLLPRI PollEvent_PollEventFlag = 2
+ PollEvent_SOCKET_POLLOUT PollEvent_PollEventFlag = 4
+ PollEvent_SOCKET_POLLERR PollEvent_PollEventFlag = 8
+ PollEvent_SOCKET_POLLHUP PollEvent_PollEventFlag = 16
+ PollEvent_SOCKET_POLLNVAL PollEvent_PollEventFlag = 32
+ PollEvent_SOCKET_POLLRDNORM PollEvent_PollEventFlag = 64
+ PollEvent_SOCKET_POLLRDBAND PollEvent_PollEventFlag = 128
+ PollEvent_SOCKET_POLLWRNORM PollEvent_PollEventFlag = 256
+ PollEvent_SOCKET_POLLWRBAND PollEvent_PollEventFlag = 512
+ PollEvent_SOCKET_POLLMSG PollEvent_PollEventFlag = 1024
+ PollEvent_SOCKET_POLLREMOVE PollEvent_PollEventFlag = 4096
+ PollEvent_SOCKET_POLLRDHUP PollEvent_PollEventFlag = 8192
+)
+
+var PollEvent_PollEventFlag_name = map[int32]string{
+ 0: "SOCKET_POLLNONE",
+ 1: "SOCKET_POLLIN",
+ 2: "SOCKET_POLLPRI",
+ 4: "SOCKET_POLLOUT",
+ 8: "SOCKET_POLLERR",
+ 16: "SOCKET_POLLHUP",
+ 32: "SOCKET_POLLNVAL",
+ 64: "SOCKET_POLLRDNORM",
+ 128: "SOCKET_POLLRDBAND",
+ 256: "SOCKET_POLLWRNORM",
+ 512: "SOCKET_POLLWRBAND",
+ 1024: "SOCKET_POLLMSG",
+ 4096: "SOCKET_POLLREMOVE",
+ 8192: "SOCKET_POLLRDHUP",
+}
+var PollEvent_PollEventFlag_value = map[string]int32{
+ "SOCKET_POLLNONE": 0,
+ "SOCKET_POLLIN": 1,
+ "SOCKET_POLLPRI": 2,
+ "SOCKET_POLLOUT": 4,
+ "SOCKET_POLLERR": 8,
+ "SOCKET_POLLHUP": 16,
+ "SOCKET_POLLNVAL": 32,
+ "SOCKET_POLLRDNORM": 64,
+ "SOCKET_POLLRDBAND": 128,
+ "SOCKET_POLLWRNORM": 256,
+ "SOCKET_POLLWRBAND": 512,
+ "SOCKET_POLLMSG": 1024,
+ "SOCKET_POLLREMOVE": 4096,
+ "SOCKET_POLLRDHUP": 8192,
+}
+
+func (x PollEvent_PollEventFlag) Enum() *PollEvent_PollEventFlag {
+ p := new(PollEvent_PollEventFlag)
+ *p = x
+ return p
+}
+func (x PollEvent_PollEventFlag) String() string {
+ return proto.EnumName(PollEvent_PollEventFlag_name, int32(x))
+}
+func (x *PollEvent_PollEventFlag) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(PollEvent_PollEventFlag_value, data, "PollEvent_PollEventFlag")
+ if err != nil {
+ return err
+ }
+ *x = PollEvent_PollEventFlag(value)
+ return nil
+}
+
+type ResolveReply_ErrorCode int32
+
+const (
+ ResolveReply_SOCKET_EAI_ADDRFAMILY ResolveReply_ErrorCode = 1
+ ResolveReply_SOCKET_EAI_AGAIN ResolveReply_ErrorCode = 2
+ ResolveReply_SOCKET_EAI_BADFLAGS ResolveReply_ErrorCode = 3
+ ResolveReply_SOCKET_EAI_FAIL ResolveReply_ErrorCode = 4
+ ResolveReply_SOCKET_EAI_FAMILY ResolveReply_ErrorCode = 5
+ ResolveReply_SOCKET_EAI_MEMORY ResolveReply_ErrorCode = 6
+ ResolveReply_SOCKET_EAI_NODATA ResolveReply_ErrorCode = 7
+ ResolveReply_SOCKET_EAI_NONAME ResolveReply_ErrorCode = 8
+ ResolveReply_SOCKET_EAI_SERVICE ResolveReply_ErrorCode = 9
+ ResolveReply_SOCKET_EAI_SOCKTYPE ResolveReply_ErrorCode = 10
+ ResolveReply_SOCKET_EAI_SYSTEM ResolveReply_ErrorCode = 11
+ ResolveReply_SOCKET_EAI_BADHINTS ResolveReply_ErrorCode = 12
+ ResolveReply_SOCKET_EAI_PROTOCOL ResolveReply_ErrorCode = 13
+ ResolveReply_SOCKET_EAI_OVERFLOW ResolveReply_ErrorCode = 14
+ ResolveReply_SOCKET_EAI_MAX ResolveReply_ErrorCode = 15
+)
+
+var ResolveReply_ErrorCode_name = map[int32]string{
+ 1: "SOCKET_EAI_ADDRFAMILY",
+ 2: "SOCKET_EAI_AGAIN",
+ 3: "SOCKET_EAI_BADFLAGS",
+ 4: "SOCKET_EAI_FAIL",
+ 5: "SOCKET_EAI_FAMILY",
+ 6: "SOCKET_EAI_MEMORY",
+ 7: "SOCKET_EAI_NODATA",
+ 8: "SOCKET_EAI_NONAME",
+ 9: "SOCKET_EAI_SERVICE",
+ 10: "SOCKET_EAI_SOCKTYPE",
+ 11: "SOCKET_EAI_SYSTEM",
+ 12: "SOCKET_EAI_BADHINTS",
+ 13: "SOCKET_EAI_PROTOCOL",
+ 14: "SOCKET_EAI_OVERFLOW",
+ 15: "SOCKET_EAI_MAX",
+}
+var ResolveReply_ErrorCode_value = map[string]int32{
+ "SOCKET_EAI_ADDRFAMILY": 1,
+ "SOCKET_EAI_AGAIN": 2,
+ "SOCKET_EAI_BADFLAGS": 3,
+ "SOCKET_EAI_FAIL": 4,
+ "SOCKET_EAI_FAMILY": 5,
+ "SOCKET_EAI_MEMORY": 6,
+ "SOCKET_EAI_NODATA": 7,
+ "SOCKET_EAI_NONAME": 8,
+ "SOCKET_EAI_SERVICE": 9,
+ "SOCKET_EAI_SOCKTYPE": 10,
+ "SOCKET_EAI_SYSTEM": 11,
+ "SOCKET_EAI_BADHINTS": 12,
+ "SOCKET_EAI_PROTOCOL": 13,
+ "SOCKET_EAI_OVERFLOW": 14,
+ "SOCKET_EAI_MAX": 15,
+}
+
+func (x ResolveReply_ErrorCode) Enum() *ResolveReply_ErrorCode {
+ p := new(ResolveReply_ErrorCode)
+ *p = x
+ return p
+}
+func (x ResolveReply_ErrorCode) String() string {
+ return proto.EnumName(ResolveReply_ErrorCode_name, int32(x))
+}
+func (x *ResolveReply_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ResolveReply_ErrorCode_value, data, "ResolveReply_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = ResolveReply_ErrorCode(value)
+ return nil
+}
+
+type RemoteSocketServiceError struct {
+ SystemError *int32 `protobuf:"varint,1,opt,name=system_error,def=0" json:"system_error,omitempty"`
+ ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail" json:"error_detail,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RemoteSocketServiceError) Reset() { *m = RemoteSocketServiceError{} }
+func (m *RemoteSocketServiceError) String() string { return proto.CompactTextString(m) }
+func (*RemoteSocketServiceError) ProtoMessage() {}
+
+const Default_RemoteSocketServiceError_SystemError int32 = 0
+
+func (m *RemoteSocketServiceError) GetSystemError() int32 {
+ if m != nil && m.SystemError != nil {
+ return *m.SystemError
+ }
+ return Default_RemoteSocketServiceError_SystemError
+}
+
+func (m *RemoteSocketServiceError) GetErrorDetail() string {
+ if m != nil && m.ErrorDetail != nil {
+ return *m.ErrorDetail
+ }
+ return ""
+}
+
+type AddressPort struct {
+ Port *int32 `protobuf:"varint,1,req,name=port" json:"port,omitempty"`
+ PackedAddress []byte `protobuf:"bytes,2,opt,name=packed_address" json:"packed_address,omitempty"`
+ HostnameHint *string `protobuf:"bytes,3,opt,name=hostname_hint" json:"hostname_hint,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AddressPort) Reset() { *m = AddressPort{} }
+func (m *AddressPort) String() string { return proto.CompactTextString(m) }
+func (*AddressPort) ProtoMessage() {}
+
+func (m *AddressPort) GetPort() int32 {
+ if m != nil && m.Port != nil {
+ return *m.Port
+ }
+ return 0
+}
+
+func (m *AddressPort) GetPackedAddress() []byte {
+ if m != nil {
+ return m.PackedAddress
+ }
+ return nil
+}
+
+func (m *AddressPort) GetHostnameHint() string {
+ if m != nil && m.HostnameHint != nil {
+ return *m.HostnameHint
+ }
+ return ""
+}
+
+type CreateSocketRequest struct {
+ Family *CreateSocketRequest_SocketFamily `protobuf:"varint,1,req,name=family,enum=appengine.CreateSocketRequest_SocketFamily" json:"family,omitempty"`
+ Protocol *CreateSocketRequest_SocketProtocol `protobuf:"varint,2,req,name=protocol,enum=appengine.CreateSocketRequest_SocketProtocol" json:"protocol,omitempty"`
+ SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options" json:"socket_options,omitempty"`
+ ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"`
+ ListenBacklog *int32 `protobuf:"varint,5,opt,name=listen_backlog,def=0" json:"listen_backlog,omitempty"`
+ RemoteIp *AddressPort `protobuf:"bytes,6,opt,name=remote_ip" json:"remote_ip,omitempty"`
+ AppId *string `protobuf:"bytes,9,opt,name=app_id" json:"app_id,omitempty"`
+ ProjectId *int64 `protobuf:"varint,10,opt,name=project_id" json:"project_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateSocketRequest) Reset() { *m = CreateSocketRequest{} }
+func (m *CreateSocketRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateSocketRequest) ProtoMessage() {}
+
+const Default_CreateSocketRequest_ListenBacklog int32 = 0
+
+func (m *CreateSocketRequest) GetFamily() CreateSocketRequest_SocketFamily {
+ if m != nil && m.Family != nil {
+ return *m.Family
+ }
+ return CreateSocketRequest_IPv4
+}
+
+func (m *CreateSocketRequest) GetProtocol() CreateSocketRequest_SocketProtocol {
+ if m != nil && m.Protocol != nil {
+ return *m.Protocol
+ }
+ return CreateSocketRequest_TCP
+}
+
+func (m *CreateSocketRequest) GetSocketOptions() []*SocketOption {
+ if m != nil {
+ return m.SocketOptions
+ }
+ return nil
+}
+
+func (m *CreateSocketRequest) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+func (m *CreateSocketRequest) GetListenBacklog() int32 {
+ if m != nil && m.ListenBacklog != nil {
+ return *m.ListenBacklog
+ }
+ return Default_CreateSocketRequest_ListenBacklog
+}
+
+func (m *CreateSocketRequest) GetRemoteIp() *AddressPort {
+ if m != nil {
+ return m.RemoteIp
+ }
+ return nil
+}
+
+func (m *CreateSocketRequest) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *CreateSocketRequest) GetProjectId() int64 {
+ if m != nil && m.ProjectId != nil {
+ return *m.ProjectId
+ }
+ return 0
+}
+
+type CreateSocketReply struct {
+ SocketDescriptor *string `protobuf:"bytes,1,opt,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ ServerAddress *AddressPort `protobuf:"bytes,3,opt,name=server_address" json:"server_address,omitempty"`
+ ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"`
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateSocketReply) Reset() { *m = CreateSocketReply{} }
+func (m *CreateSocketReply) String() string { return proto.CompactTextString(m) }
+func (*CreateSocketReply) ProtoMessage() {}
+
+var extRange_CreateSocketReply = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*CreateSocketReply) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_CreateSocketReply
+}
+func (m *CreateSocketReply) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+func (m *CreateSocketReply) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *CreateSocketReply) GetServerAddress() *AddressPort {
+ if m != nil {
+ return m.ServerAddress
+ }
+ return nil
+}
+
+func (m *CreateSocketReply) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+type BindRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ ProxyExternalIp *AddressPort `protobuf:"bytes,2,req,name=proxy_external_ip" json:"proxy_external_ip,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BindRequest) Reset() { *m = BindRequest{} }
+func (m *BindRequest) String() string { return proto.CompactTextString(m) }
+func (*BindRequest) ProtoMessage() {}
+
+func (m *BindRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *BindRequest) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+type BindReply struct {
+ ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BindReply) Reset() { *m = BindReply{} }
+func (m *BindReply) String() string { return proto.CompactTextString(m) }
+func (*BindReply) ProtoMessage() {}
+
+func (m *BindReply) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+type GetSocketNameRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetSocketNameRequest) Reset() { *m = GetSocketNameRequest{} }
+func (m *GetSocketNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetSocketNameRequest) ProtoMessage() {}
+
+func (m *GetSocketNameRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+type GetSocketNameReply struct {
+ ProxyExternalIp *AddressPort `protobuf:"bytes,2,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetSocketNameReply) Reset() { *m = GetSocketNameReply{} }
+func (m *GetSocketNameReply) String() string { return proto.CompactTextString(m) }
+func (*GetSocketNameReply) ProtoMessage() {}
+
+func (m *GetSocketNameReply) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+type GetPeerNameRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetPeerNameRequest) Reset() { *m = GetPeerNameRequest{} }
+func (m *GetPeerNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetPeerNameRequest) ProtoMessage() {}
+
+func (m *GetPeerNameRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+type GetPeerNameReply struct {
+ PeerIp *AddressPort `protobuf:"bytes,2,opt,name=peer_ip" json:"peer_ip,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetPeerNameReply) Reset() { *m = GetPeerNameReply{} }
+func (m *GetPeerNameReply) String() string { return proto.CompactTextString(m) }
+func (*GetPeerNameReply) ProtoMessage() {}
+
+func (m *GetPeerNameReply) GetPeerIp() *AddressPort {
+ if m != nil {
+ return m.PeerIp
+ }
+ return nil
+}
+
+type SocketOption struct {
+ Level *SocketOption_SocketOptionLevel `protobuf:"varint,1,req,name=level,enum=appengine.SocketOption_SocketOptionLevel" json:"level,omitempty"`
+ Option *SocketOption_SocketOptionName `protobuf:"varint,2,req,name=option,enum=appengine.SocketOption_SocketOptionName" json:"option,omitempty"`
+ Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SocketOption) Reset() { *m = SocketOption{} }
+func (m *SocketOption) String() string { return proto.CompactTextString(m) }
+func (*SocketOption) ProtoMessage() {}
+
+func (m *SocketOption) GetLevel() SocketOption_SocketOptionLevel {
+ if m != nil && m.Level != nil {
+ return *m.Level
+ }
+ return SocketOption_SOCKET_SOL_IP
+}
+
+func (m *SocketOption) GetOption() SocketOption_SocketOptionName {
+ if m != nil && m.Option != nil {
+ return *m.Option
+ }
+ return SocketOption_SOCKET_SO_DEBUG
+}
+
+func (m *SocketOption) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type SetSocketOptionsRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetSocketOptionsRequest) Reset() { *m = SetSocketOptionsRequest{} }
+func (m *SetSocketOptionsRequest) String() string { return proto.CompactTextString(m) }
+func (*SetSocketOptionsRequest) ProtoMessage() {}
+
+func (m *SetSocketOptionsRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *SetSocketOptionsRequest) GetOptions() []*SocketOption {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+type SetSocketOptionsReply struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetSocketOptionsReply) Reset() { *m = SetSocketOptionsReply{} }
+func (m *SetSocketOptionsReply) String() string { return proto.CompactTextString(m) }
+func (*SetSocketOptionsReply) ProtoMessage() {}
+
+type GetSocketOptionsRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetSocketOptionsRequest) Reset() { *m = GetSocketOptionsRequest{} }
+func (m *GetSocketOptionsRequest) String() string { return proto.CompactTextString(m) }
+func (*GetSocketOptionsRequest) ProtoMessage() {}
+
+func (m *GetSocketOptionsRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *GetSocketOptionsRequest) GetOptions() []*SocketOption {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+type GetSocketOptionsReply struct {
+ Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetSocketOptionsReply) Reset() { *m = GetSocketOptionsReply{} }
+func (m *GetSocketOptionsReply) String() string { return proto.CompactTextString(m) }
+func (*GetSocketOptionsReply) ProtoMessage() {}
+
+func (m *GetSocketOptionsReply) GetOptions() []*SocketOption {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+type ConnectRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ RemoteIp *AddressPort `protobuf:"bytes,2,req,name=remote_ip" json:"remote_ip,omitempty"`
+ TimeoutSeconds *float64 `protobuf:"fixed64,3,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ConnectRequest) Reset() { *m = ConnectRequest{} }
+func (m *ConnectRequest) String() string { return proto.CompactTextString(m) }
+func (*ConnectRequest) ProtoMessage() {}
+
+const Default_ConnectRequest_TimeoutSeconds float64 = -1
+
+func (m *ConnectRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *ConnectRequest) GetRemoteIp() *AddressPort {
+ if m != nil {
+ return m.RemoteIp
+ }
+ return nil
+}
+
+func (m *ConnectRequest) GetTimeoutSeconds() float64 {
+ if m != nil && m.TimeoutSeconds != nil {
+ return *m.TimeoutSeconds
+ }
+ return Default_ConnectRequest_TimeoutSeconds
+}
+
+type ConnectReply struct {
+ ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"`
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ConnectReply) Reset() { *m = ConnectReply{} }
+func (m *ConnectReply) String() string { return proto.CompactTextString(m) }
+func (*ConnectReply) ProtoMessage() {}
+
+var extRange_ConnectReply = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*ConnectReply) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_ConnectReply
+}
+func (m *ConnectReply) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+func (m *ConnectReply) GetProxyExternalIp() *AddressPort {
+ if m != nil {
+ return m.ProxyExternalIp
+ }
+ return nil
+}
+
+type ListenRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ Backlog *int32 `protobuf:"varint,2,req,name=backlog" json:"backlog,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListenRequest) Reset() { *m = ListenRequest{} }
+func (m *ListenRequest) String() string { return proto.CompactTextString(m) }
+func (*ListenRequest) ProtoMessage() {}
+
+func (m *ListenRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *ListenRequest) GetBacklog() int32 {
+ if m != nil && m.Backlog != nil {
+ return *m.Backlog
+ }
+ return 0
+}
+
+type ListenReply struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListenReply) Reset() { *m = ListenReply{} }
+func (m *ListenReply) String() string { return proto.CompactTextString(m) }
+func (*ListenReply) ProtoMessage() {}
+
+type AcceptRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AcceptRequest) Reset() { *m = AcceptRequest{} }
+func (m *AcceptRequest) String() string { return proto.CompactTextString(m) }
+func (*AcceptRequest) ProtoMessage() {}
+
+const Default_AcceptRequest_TimeoutSeconds float64 = -1
+
+func (m *AcceptRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *AcceptRequest) GetTimeoutSeconds() float64 {
+ if m != nil && m.TimeoutSeconds != nil {
+ return *m.TimeoutSeconds
+ }
+ return Default_AcceptRequest_TimeoutSeconds
+}
+
+type AcceptReply struct {
+ NewSocketDescriptor []byte `protobuf:"bytes,2,opt,name=new_socket_descriptor" json:"new_socket_descriptor,omitempty"`
+ RemoteAddress *AddressPort `protobuf:"bytes,3,opt,name=remote_address" json:"remote_address,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AcceptReply) Reset() { *m = AcceptReply{} }
+func (m *AcceptReply) String() string { return proto.CompactTextString(m) }
+func (*AcceptReply) ProtoMessage() {}
+
+func (m *AcceptReply) GetNewSocketDescriptor() []byte {
+ if m != nil {
+ return m.NewSocketDescriptor
+ }
+ return nil
+}
+
+func (m *AcceptReply) GetRemoteAddress() *AddressPort {
+ if m != nil {
+ return m.RemoteAddress
+ }
+ return nil
+}
+
+type ShutDownRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ How *ShutDownRequest_How `protobuf:"varint,2,req,name=how,enum=appengine.ShutDownRequest_How" json:"how,omitempty"`
+ SendOffset *int64 `protobuf:"varint,3,req,name=send_offset" json:"send_offset,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ShutDownRequest) Reset() { *m = ShutDownRequest{} }
+func (m *ShutDownRequest) String() string { return proto.CompactTextString(m) }
+func (*ShutDownRequest) ProtoMessage() {}
+
+func (m *ShutDownRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *ShutDownRequest) GetHow() ShutDownRequest_How {
+ if m != nil && m.How != nil {
+ return *m.How
+ }
+ return ShutDownRequest_SOCKET_SHUT_RD
+}
+
+func (m *ShutDownRequest) GetSendOffset() int64 {
+ if m != nil && m.SendOffset != nil {
+ return *m.SendOffset
+ }
+ return 0
+}
+
+type ShutDownReply struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ShutDownReply) Reset() { *m = ShutDownReply{} }
+func (m *ShutDownReply) String() string { return proto.CompactTextString(m) }
+func (*ShutDownReply) ProtoMessage() {}
+
+type CloseRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ SendOffset *int64 `protobuf:"varint,2,opt,name=send_offset,def=-1" json:"send_offset,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CloseRequest) Reset() { *m = CloseRequest{} }
+func (m *CloseRequest) String() string { return proto.CompactTextString(m) }
+func (*CloseRequest) ProtoMessage() {}
+
+const Default_CloseRequest_SendOffset int64 = -1
+
+func (m *CloseRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *CloseRequest) GetSendOffset() int64 {
+ if m != nil && m.SendOffset != nil {
+ return *m.SendOffset
+ }
+ return Default_CloseRequest_SendOffset
+}
+
+type CloseReply struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CloseReply) Reset() { *m = CloseReply{} }
+func (m *CloseReply) String() string { return proto.CompactTextString(m) }
+func (*CloseReply) ProtoMessage() {}
+
+type SendRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ Data []byte `protobuf:"bytes,2,req,name=data" json:"data,omitempty"`
+ StreamOffset *int64 `protobuf:"varint,3,req,name=stream_offset" json:"stream_offset,omitempty"`
+ Flags *int32 `protobuf:"varint,4,opt,name=flags,def=0" json:"flags,omitempty"`
+ SendTo *AddressPort `protobuf:"bytes,5,opt,name=send_to" json:"send_to,omitempty"`
+ TimeoutSeconds *float64 `protobuf:"fixed64,6,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SendRequest) Reset() { *m = SendRequest{} }
+func (m *SendRequest) String() string { return proto.CompactTextString(m) }
+func (*SendRequest) ProtoMessage() {}
+
+const Default_SendRequest_Flags int32 = 0
+const Default_SendRequest_TimeoutSeconds float64 = -1
+
+func (m *SendRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *SendRequest) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *SendRequest) GetStreamOffset() int64 {
+ if m != nil && m.StreamOffset != nil {
+ return *m.StreamOffset
+ }
+ return 0
+}
+
+func (m *SendRequest) GetFlags() int32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return Default_SendRequest_Flags
+}
+
+func (m *SendRequest) GetSendTo() *AddressPort {
+ if m != nil {
+ return m.SendTo
+ }
+ return nil
+}
+
+func (m *SendRequest) GetTimeoutSeconds() float64 {
+ if m != nil && m.TimeoutSeconds != nil {
+ return *m.TimeoutSeconds
+ }
+ return Default_SendRequest_TimeoutSeconds
+}
+
+type SendReply struct {
+ DataSent *int32 `protobuf:"varint,1,opt,name=data_sent" json:"data_sent,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SendReply) Reset() { *m = SendReply{} }
+func (m *SendReply) String() string { return proto.CompactTextString(m) }
+func (*SendReply) ProtoMessage() {}
+
+func (m *SendReply) GetDataSent() int32 {
+ if m != nil && m.DataSent != nil {
+ return *m.DataSent
+ }
+ return 0
+}
+
+type ReceiveRequest struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ DataSize *int32 `protobuf:"varint,2,req,name=data_size" json:"data_size,omitempty"`
+ Flags *int32 `protobuf:"varint,3,opt,name=flags,def=0" json:"flags,omitempty"`
+ TimeoutSeconds *float64 `protobuf:"fixed64,5,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ReceiveRequest) Reset() { *m = ReceiveRequest{} }
+func (m *ReceiveRequest) String() string { return proto.CompactTextString(m) }
+func (*ReceiveRequest) ProtoMessage() {}
+
+const Default_ReceiveRequest_Flags int32 = 0
+const Default_ReceiveRequest_TimeoutSeconds float64 = -1
+
+func (m *ReceiveRequest) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *ReceiveRequest) GetDataSize() int32 {
+ if m != nil && m.DataSize != nil {
+ return *m.DataSize
+ }
+ return 0
+}
+
+func (m *ReceiveRequest) GetFlags() int32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return Default_ReceiveRequest_Flags
+}
+
+func (m *ReceiveRequest) GetTimeoutSeconds() float64 {
+ if m != nil && m.TimeoutSeconds != nil {
+ return *m.TimeoutSeconds
+ }
+ return Default_ReceiveRequest_TimeoutSeconds
+}
+
+type ReceiveReply struct {
+ StreamOffset *int64 `protobuf:"varint,2,opt,name=stream_offset" json:"stream_offset,omitempty"`
+ Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
+ ReceivedFrom *AddressPort `protobuf:"bytes,4,opt,name=received_from" json:"received_from,omitempty"`
+ BufferSize *int32 `protobuf:"varint,5,opt,name=buffer_size" json:"buffer_size,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ReceiveReply) Reset() { *m = ReceiveReply{} }
+func (m *ReceiveReply) String() string { return proto.CompactTextString(m) }
+func (*ReceiveReply) ProtoMessage() {}
+
+func (m *ReceiveReply) GetStreamOffset() int64 {
+ if m != nil && m.StreamOffset != nil {
+ return *m.StreamOffset
+ }
+ return 0
+}
+
+func (m *ReceiveReply) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *ReceiveReply) GetReceivedFrom() *AddressPort {
+ if m != nil {
+ return m.ReceivedFrom
+ }
+ return nil
+}
+
+func (m *ReceiveReply) GetBufferSize() int32 {
+ if m != nil && m.BufferSize != nil {
+ return *m.BufferSize
+ }
+ return 0
+}
+
+type PollEvent struct {
+ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"`
+ RequestedEvents *int32 `protobuf:"varint,2,req,name=requested_events" json:"requested_events,omitempty"`
+ ObservedEvents *int32 `protobuf:"varint,3,req,name=observed_events" json:"observed_events,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PollEvent) Reset() { *m = PollEvent{} }
+func (m *PollEvent) String() string { return proto.CompactTextString(m) }
+func (*PollEvent) ProtoMessage() {}
+
+func (m *PollEvent) GetSocketDescriptor() string {
+ if m != nil && m.SocketDescriptor != nil {
+ return *m.SocketDescriptor
+ }
+ return ""
+}
+
+func (m *PollEvent) GetRequestedEvents() int32 {
+ if m != nil && m.RequestedEvents != nil {
+ return *m.RequestedEvents
+ }
+ return 0
+}
+
+func (m *PollEvent) GetObservedEvents() int32 {
+ if m != nil && m.ObservedEvents != nil {
+ return *m.ObservedEvents
+ }
+ return 0
+}
+
+type PollRequest struct {
+ Events []*PollEvent `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"`
+ TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PollRequest) Reset() { *m = PollRequest{} }
+func (m *PollRequest) String() string { return proto.CompactTextString(m) }
+func (*PollRequest) ProtoMessage() {}
+
+const Default_PollRequest_TimeoutSeconds float64 = -1
+
+func (m *PollRequest) GetEvents() []*PollEvent {
+ if m != nil {
+ return m.Events
+ }
+ return nil
+}
+
+func (m *PollRequest) GetTimeoutSeconds() float64 {
+ if m != nil && m.TimeoutSeconds != nil {
+ return *m.TimeoutSeconds
+ }
+ return Default_PollRequest_TimeoutSeconds
+}
+
+type PollReply struct {
+ Events []*PollEvent `protobuf:"bytes,2,rep,name=events" json:"events,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PollReply) Reset() { *m = PollReply{} }
+func (m *PollReply) String() string { return proto.CompactTextString(m) }
+func (*PollReply) ProtoMessage() {}
+
+func (m *PollReply) GetEvents() []*PollEvent {
+ if m != nil {
+ return m.Events
+ }
+ return nil
+}
+
+type ResolveRequest struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ AddressFamilies []CreateSocketRequest_SocketFamily `protobuf:"varint,2,rep,name=address_families,enum=appengine.CreateSocketRequest_SocketFamily" json:"address_families,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ResolveRequest) Reset() { *m = ResolveRequest{} }
+func (m *ResolveRequest) String() string { return proto.CompactTextString(m) }
+func (*ResolveRequest) ProtoMessage() {}
+
+func (m *ResolveRequest) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *ResolveRequest) GetAddressFamilies() []CreateSocketRequest_SocketFamily {
+ if m != nil {
+ return m.AddressFamilies
+ }
+ return nil
+}
+
+type ResolveReply struct {
+ PackedAddress [][]byte `protobuf:"bytes,2,rep,name=packed_address" json:"packed_address,omitempty"`
+ CanonicalName *string `protobuf:"bytes,3,opt,name=canonical_name" json:"canonical_name,omitempty"`
+ Aliases []string `protobuf:"bytes,4,rep,name=aliases" json:"aliases,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ResolveReply) Reset() { *m = ResolveReply{} }
+func (m *ResolveReply) String() string { return proto.CompactTextString(m) }
+func (*ResolveReply) ProtoMessage() {}
+
+func (m *ResolveReply) GetPackedAddress() [][]byte {
+ if m != nil {
+ return m.PackedAddress
+ }
+ return nil
+}
+
+func (m *ResolveReply) GetCanonicalName() string {
+ if m != nil && m.CanonicalName != nil {
+ return *m.CanonicalName
+ }
+ return ""
+}
+
+func (m *ResolveReply) GetAliases() []string {
+ if m != nil {
+ return m.Aliases
+ }
+ return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.proto b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto
new file mode 100644
index 000000000..2fcc7953d
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto
@@ -0,0 +1,460 @@
+syntax = "proto2";
+option go_package = "socket";
+
+package appengine;
+
+message RemoteSocketServiceError {
+ enum ErrorCode {
+ SYSTEM_ERROR = 1;
+ GAI_ERROR = 2;
+ FAILURE = 4;
+ PERMISSION_DENIED = 5;
+ INVALID_REQUEST = 6;
+ SOCKET_CLOSED = 7;
+ }
+
+ enum SystemError {
+ option allow_alias = true;
+
+ SYS_SUCCESS = 0;
+ SYS_EPERM = 1;
+ SYS_ENOENT = 2;
+ SYS_ESRCH = 3;
+ SYS_EINTR = 4;
+ SYS_EIO = 5;
+ SYS_ENXIO = 6;
+ SYS_E2BIG = 7;
+ SYS_ENOEXEC = 8;
+ SYS_EBADF = 9;
+ SYS_ECHILD = 10;
+ SYS_EAGAIN = 11;
+ SYS_EWOULDBLOCK = 11;
+ SYS_ENOMEM = 12;
+ SYS_EACCES = 13;
+ SYS_EFAULT = 14;
+ SYS_ENOTBLK = 15;
+ SYS_EBUSY = 16;
+ SYS_EEXIST = 17;
+ SYS_EXDEV = 18;
+ SYS_ENODEV = 19;
+ SYS_ENOTDIR = 20;
+ SYS_EISDIR = 21;
+ SYS_EINVAL = 22;
+ SYS_ENFILE = 23;
+ SYS_EMFILE = 24;
+ SYS_ENOTTY = 25;
+ SYS_ETXTBSY = 26;
+ SYS_EFBIG = 27;
+ SYS_ENOSPC = 28;
+ SYS_ESPIPE = 29;
+ SYS_EROFS = 30;
+ SYS_EMLINK = 31;
+ SYS_EPIPE = 32;
+ SYS_EDOM = 33;
+ SYS_ERANGE = 34;
+ SYS_EDEADLK = 35;
+ SYS_EDEADLOCK = 35;
+ SYS_ENAMETOOLONG = 36;
+ SYS_ENOLCK = 37;
+ SYS_ENOSYS = 38;
+ SYS_ENOTEMPTY = 39;
+ SYS_ELOOP = 40;
+ SYS_ENOMSG = 42;
+ SYS_EIDRM = 43;
+ SYS_ECHRNG = 44;
+ SYS_EL2NSYNC = 45;
+ SYS_EL3HLT = 46;
+ SYS_EL3RST = 47;
+ SYS_ELNRNG = 48;
+ SYS_EUNATCH = 49;
+ SYS_ENOCSI = 50;
+ SYS_EL2HLT = 51;
+ SYS_EBADE = 52;
+ SYS_EBADR = 53;
+ SYS_EXFULL = 54;
+ SYS_ENOANO = 55;
+ SYS_EBADRQC = 56;
+ SYS_EBADSLT = 57;
+ SYS_EBFONT = 59;
+ SYS_ENOSTR = 60;
+ SYS_ENODATA = 61;
+ SYS_ETIME = 62;
+ SYS_ENOSR = 63;
+ SYS_ENONET = 64;
+ SYS_ENOPKG = 65;
+ SYS_EREMOTE = 66;
+ SYS_ENOLINK = 67;
+ SYS_EADV = 68;
+ SYS_ESRMNT = 69;
+ SYS_ECOMM = 70;
+ SYS_EPROTO = 71;
+ SYS_EMULTIHOP = 72;
+ SYS_EDOTDOT = 73;
+ SYS_EBADMSG = 74;
+ SYS_EOVERFLOW = 75;
+ SYS_ENOTUNIQ = 76;
+ SYS_EBADFD = 77;
+ SYS_EREMCHG = 78;
+ SYS_ELIBACC = 79;
+ SYS_ELIBBAD = 80;
+ SYS_ELIBSCN = 81;
+ SYS_ELIBMAX = 82;
+ SYS_ELIBEXEC = 83;
+ SYS_EILSEQ = 84;
+ SYS_ERESTART = 85;
+ SYS_ESTRPIPE = 86;
+ SYS_EUSERS = 87;
+ SYS_ENOTSOCK = 88;
+ SYS_EDESTADDRREQ = 89;
+ SYS_EMSGSIZE = 90;
+ SYS_EPROTOTYPE = 91;
+ SYS_ENOPROTOOPT = 92;
+ SYS_EPROTONOSUPPORT = 93;
+ SYS_ESOCKTNOSUPPORT = 94;
+ SYS_EOPNOTSUPP = 95;
+ SYS_ENOTSUP = 95;
+ SYS_EPFNOSUPPORT = 96;
+ SYS_EAFNOSUPPORT = 97;
+ SYS_EADDRINUSE = 98;
+ SYS_EADDRNOTAVAIL = 99;
+ SYS_ENETDOWN = 100;
+ SYS_ENETUNREACH = 101;
+ SYS_ENETRESET = 102;
+ SYS_ECONNABORTED = 103;
+ SYS_ECONNRESET = 104;
+ SYS_ENOBUFS = 105;
+ SYS_EISCONN = 106;
+ SYS_ENOTCONN = 107;
+ SYS_ESHUTDOWN = 108;
+ SYS_ETOOMANYREFS = 109;
+ SYS_ETIMEDOUT = 110;
+ SYS_ECONNREFUSED = 111;
+ SYS_EHOSTDOWN = 112;
+ SYS_EHOSTUNREACH = 113;
+ SYS_EALREADY = 114;
+ SYS_EINPROGRESS = 115;
+ SYS_ESTALE = 116;
+ SYS_EUCLEAN = 117;
+ SYS_ENOTNAM = 118;
+ SYS_ENAVAIL = 119;
+ SYS_EISNAM = 120;
+ SYS_EREMOTEIO = 121;
+ SYS_EDQUOT = 122;
+ SYS_ENOMEDIUM = 123;
+ SYS_EMEDIUMTYPE = 124;
+ SYS_ECANCELED = 125;
+ SYS_ENOKEY = 126;
+ SYS_EKEYEXPIRED = 127;
+ SYS_EKEYREVOKED = 128;
+ SYS_EKEYREJECTED = 129;
+ SYS_EOWNERDEAD = 130;
+ SYS_ENOTRECOVERABLE = 131;
+ SYS_ERFKILL = 132;
+ }
+
+ optional int32 system_error = 1 [default=0];
+ optional string error_detail = 2;
+}
+
+message AddressPort {
+ required int32 port = 1;
+ optional bytes packed_address = 2;
+
+ optional string hostname_hint = 3;
+}
+
+
+
+message CreateSocketRequest {
+ enum SocketFamily {
+ IPv4 = 1;
+ IPv6 = 2;
+ }
+
+ enum SocketProtocol {
+ TCP = 1;
+ UDP = 2;
+ }
+
+ required SocketFamily family = 1;
+ required SocketProtocol protocol = 2;
+
+ repeated SocketOption socket_options = 3;
+
+ optional AddressPort proxy_external_ip = 4;
+
+ optional int32 listen_backlog = 5 [default=0];
+
+ optional AddressPort remote_ip = 6;
+
+ optional string app_id = 9;
+
+ optional int64 project_id = 10;
+}
+
+message CreateSocketReply {
+ optional string socket_descriptor = 1;
+
+ optional AddressPort server_address = 3;
+
+ optional AddressPort proxy_external_ip = 4;
+
+ extensions 1000 to max;
+}
+
+
+
+message BindRequest {
+ required string socket_descriptor = 1;
+ required AddressPort proxy_external_ip = 2;
+}
+
+message BindReply {
+ optional AddressPort proxy_external_ip = 1;
+}
+
+
+
+message GetSocketNameRequest {
+ required string socket_descriptor = 1;
+}
+
+message GetSocketNameReply {
+ optional AddressPort proxy_external_ip = 2;
+}
+
+
+
+message GetPeerNameRequest {
+ required string socket_descriptor = 1;
+}
+
+message GetPeerNameReply {
+ optional AddressPort peer_ip = 2;
+}
+
+
+message SocketOption {
+
+ enum SocketOptionLevel {
+ SOCKET_SOL_IP = 0;
+ SOCKET_SOL_SOCKET = 1;
+ SOCKET_SOL_TCP = 6;
+ SOCKET_SOL_UDP = 17;
+ }
+
+ enum SocketOptionName {
+ option allow_alias = true;
+
+ SOCKET_SO_DEBUG = 1;
+ SOCKET_SO_REUSEADDR = 2;
+ SOCKET_SO_TYPE = 3;
+ SOCKET_SO_ERROR = 4;
+ SOCKET_SO_DONTROUTE = 5;
+ SOCKET_SO_BROADCAST = 6;
+ SOCKET_SO_SNDBUF = 7;
+ SOCKET_SO_RCVBUF = 8;
+ SOCKET_SO_KEEPALIVE = 9;
+ SOCKET_SO_OOBINLINE = 10;
+ SOCKET_SO_LINGER = 13;
+ SOCKET_SO_RCVTIMEO = 20;
+ SOCKET_SO_SNDTIMEO = 21;
+
+ SOCKET_IP_TOS = 1;
+ SOCKET_IP_TTL = 2;
+ SOCKET_IP_HDRINCL = 3;
+ SOCKET_IP_OPTIONS = 4;
+
+ SOCKET_TCP_NODELAY = 1;
+ SOCKET_TCP_MAXSEG = 2;
+ SOCKET_TCP_CORK = 3;
+ SOCKET_TCP_KEEPIDLE = 4;
+ SOCKET_TCP_KEEPINTVL = 5;
+ SOCKET_TCP_KEEPCNT = 6;
+ SOCKET_TCP_SYNCNT = 7;
+ SOCKET_TCP_LINGER2 = 8;
+ SOCKET_TCP_DEFER_ACCEPT = 9;
+ SOCKET_TCP_WINDOW_CLAMP = 10;
+ SOCKET_TCP_INFO = 11;
+ SOCKET_TCP_QUICKACK = 12;
+ }
+
+ required SocketOptionLevel level = 1;
+ required SocketOptionName option = 2;
+ required bytes value = 3;
+}
+
+
+message SetSocketOptionsRequest {
+ required string socket_descriptor = 1;
+ repeated SocketOption options = 2;
+}
+
+message SetSocketOptionsReply {
+}
+
+message GetSocketOptionsRequest {
+ required string socket_descriptor = 1;
+ repeated SocketOption options = 2;
+}
+
+message GetSocketOptionsReply {
+ repeated SocketOption options = 2;
+}
+
+
+message ConnectRequest {
+ required string socket_descriptor = 1;
+ required AddressPort remote_ip = 2;
+ optional double timeout_seconds = 3 [default=-1];
+}
+
+message ConnectReply {
+ optional AddressPort proxy_external_ip = 1;
+
+ extensions 1000 to max;
+}
+
+
+message ListenRequest {
+ required string socket_descriptor = 1;
+ required int32 backlog = 2;
+}
+
+message ListenReply {
+}
+
+
+message AcceptRequest {
+ required string socket_descriptor = 1;
+ optional double timeout_seconds = 2 [default=-1];
+}
+
+message AcceptReply {
+ optional bytes new_socket_descriptor = 2;
+ optional AddressPort remote_address = 3;
+}
+
+
+
+message ShutDownRequest {
+ enum How {
+ SOCKET_SHUT_RD = 1;
+ SOCKET_SHUT_WR = 2;
+ SOCKET_SHUT_RDWR = 3;
+ }
+ required string socket_descriptor = 1;
+ required How how = 2;
+ required int64 send_offset = 3;
+}
+
+message ShutDownReply {
+}
+
+
+
+message CloseRequest {
+ required string socket_descriptor = 1;
+ optional int64 send_offset = 2 [default=-1];
+}
+
+message CloseReply {
+}
+
+
+
+message SendRequest {
+ required string socket_descriptor = 1;
+ required bytes data = 2 [ctype=CORD];
+ required int64 stream_offset = 3;
+ optional int32 flags = 4 [default=0];
+ optional AddressPort send_to = 5;
+ optional double timeout_seconds = 6 [default=-1];
+}
+
+message SendReply {
+ optional int32 data_sent = 1;
+}
+
+
+message ReceiveRequest {
+ enum Flags {
+ MSG_OOB = 1;
+ MSG_PEEK = 2;
+ }
+ required string socket_descriptor = 1;
+ required int32 data_size = 2;
+ optional int32 flags = 3 [default=0];
+ optional double timeout_seconds = 5 [default=-1];
+}
+
+message ReceiveReply {
+ optional int64 stream_offset = 2;
+ optional bytes data = 3 [ctype=CORD];
+ optional AddressPort received_from = 4;
+ optional int32 buffer_size = 5;
+}
+
+
+
+message PollEvent {
+
+ enum PollEventFlag {
+ SOCKET_POLLNONE = 0;
+ SOCKET_POLLIN = 1;
+ SOCKET_POLLPRI = 2;
+ SOCKET_POLLOUT = 4;
+ SOCKET_POLLERR = 8;
+ SOCKET_POLLHUP = 16;
+ SOCKET_POLLNVAL = 32;
+ SOCKET_POLLRDNORM = 64;
+ SOCKET_POLLRDBAND = 128;
+ SOCKET_POLLWRNORM = 256;
+ SOCKET_POLLWRBAND = 512;
+ SOCKET_POLLMSG = 1024;
+ SOCKET_POLLREMOVE = 4096;
+ SOCKET_POLLRDHUP = 8192;
+ };
+
+ required string socket_descriptor = 1;
+ required int32 requested_events = 2;
+ required int32 observed_events = 3;
+}
+
+message PollRequest {
+ repeated PollEvent events = 1;
+ optional double timeout_seconds = 2 [default=-1];
+}
+
+message PollReply {
+ repeated PollEvent events = 2;
+}
+
+message ResolveRequest {
+ required string name = 1;
+ repeated CreateSocketRequest.SocketFamily address_families = 2;
+}
+
+message ResolveReply {
+ enum ErrorCode {
+ SOCKET_EAI_ADDRFAMILY = 1;
+ SOCKET_EAI_AGAIN = 2;
+ SOCKET_EAI_BADFLAGS = 3;
+ SOCKET_EAI_FAIL = 4;
+ SOCKET_EAI_FAMILY = 5;
+ SOCKET_EAI_MEMORY = 6;
+ SOCKET_EAI_NODATA = 7;
+ SOCKET_EAI_NONAME = 8;
+ SOCKET_EAI_SERVICE = 9;
+ SOCKET_EAI_SOCKTYPE = 10;
+ SOCKET_EAI_SYSTEM = 11;
+ SOCKET_EAI_BADHINTS = 12;
+ SOCKET_EAI_PROTOCOL = 13;
+ SOCKET_EAI_OVERFLOW = 14;
+ SOCKET_EAI_MAX = 15;
+ };
+
+ repeated bytes packed_address = 2;
+ optional string canonical_name = 3;
+ repeated string aliases = 4;
+}
diff --git a/vendor/google.golang.org/appengine/internal/system/system_service.pb.go b/vendor/google.golang.org/appengine/internal/system/system_service.pb.go
new file mode 100644
index 000000000..56cc3f805
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/system/system_service.pb.go
@@ -0,0 +1,198 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/system/system_service.proto
+// DO NOT EDIT!
+
+/*
+Package system is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/system/system_service.proto
+
+It has these top-level messages:
+ SystemServiceError
+ SystemStat
+ GetSystemStatsRequest
+ GetSystemStatsResponse
+ StartBackgroundRequestRequest
+ StartBackgroundRequestResponse
+*/
+package system
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type SystemServiceError_ErrorCode int32
+
+const (
+ SystemServiceError_OK SystemServiceError_ErrorCode = 0
+ SystemServiceError_INTERNAL_ERROR SystemServiceError_ErrorCode = 1
+ SystemServiceError_BACKEND_REQUIRED SystemServiceError_ErrorCode = 2
+ SystemServiceError_LIMIT_REACHED SystemServiceError_ErrorCode = 3
+)
+
+var SystemServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INTERNAL_ERROR",
+ 2: "BACKEND_REQUIRED",
+ 3: "LIMIT_REACHED",
+}
+var SystemServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INTERNAL_ERROR": 1,
+ "BACKEND_REQUIRED": 2,
+ "LIMIT_REACHED": 3,
+}
+
+func (x SystemServiceError_ErrorCode) Enum() *SystemServiceError_ErrorCode {
+ p := new(SystemServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x SystemServiceError_ErrorCode) String() string {
+ return proto.EnumName(SystemServiceError_ErrorCode_name, int32(x))
+}
+func (x *SystemServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SystemServiceError_ErrorCode_value, data, "SystemServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = SystemServiceError_ErrorCode(value)
+ return nil
+}
+
+type SystemServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SystemServiceError) Reset() { *m = SystemServiceError{} }
+func (m *SystemServiceError) String() string { return proto.CompactTextString(m) }
+func (*SystemServiceError) ProtoMessage() {}
+
+type SystemStat struct {
+ // Instaneous value of this stat.
+ Current *float64 `protobuf:"fixed64,1,opt,name=current" json:"current,omitempty"`
+ // Average over time, if this stat has an instaneous value.
+ Average1M *float64 `protobuf:"fixed64,3,opt,name=average1m" json:"average1m,omitempty"`
+ Average10M *float64 `protobuf:"fixed64,4,opt,name=average10m" json:"average10m,omitempty"`
+ // Total value, if the stat accumulates over time.
+ Total *float64 `protobuf:"fixed64,2,opt,name=total" json:"total,omitempty"`
+ // Rate over time, if this stat accumulates.
+ Rate1M *float64 `protobuf:"fixed64,5,opt,name=rate1m" json:"rate1m,omitempty"`
+ Rate10M *float64 `protobuf:"fixed64,6,opt,name=rate10m" json:"rate10m,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SystemStat) Reset() { *m = SystemStat{} }
+func (m *SystemStat) String() string { return proto.CompactTextString(m) }
+func (*SystemStat) ProtoMessage() {}
+
+func (m *SystemStat) GetCurrent() float64 {
+ if m != nil && m.Current != nil {
+ return *m.Current
+ }
+ return 0
+}
+
+func (m *SystemStat) GetAverage1M() float64 {
+ if m != nil && m.Average1M != nil {
+ return *m.Average1M
+ }
+ return 0
+}
+
+func (m *SystemStat) GetAverage10M() float64 {
+ if m != nil && m.Average10M != nil {
+ return *m.Average10M
+ }
+ return 0
+}
+
+func (m *SystemStat) GetTotal() float64 {
+ if m != nil && m.Total != nil {
+ return *m.Total
+ }
+ return 0
+}
+
+func (m *SystemStat) GetRate1M() float64 {
+ if m != nil && m.Rate1M != nil {
+ return *m.Rate1M
+ }
+ return 0
+}
+
+func (m *SystemStat) GetRate10M() float64 {
+ if m != nil && m.Rate10M != nil {
+ return *m.Rate10M
+ }
+ return 0
+}
+
+type GetSystemStatsRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetSystemStatsRequest) Reset() { *m = GetSystemStatsRequest{} }
+func (m *GetSystemStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*GetSystemStatsRequest) ProtoMessage() {}
+
+type GetSystemStatsResponse struct {
+ // CPU used by this instance, in mcycles.
+ Cpu *SystemStat `protobuf:"bytes,1,opt,name=cpu" json:"cpu,omitempty"`
+ // Physical memory (RAM) used by this instance, in megabytes.
+ Memory *SystemStat `protobuf:"bytes,2,opt,name=memory" json:"memory,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetSystemStatsResponse) Reset() { *m = GetSystemStatsResponse{} }
+func (m *GetSystemStatsResponse) String() string { return proto.CompactTextString(m) }
+func (*GetSystemStatsResponse) ProtoMessage() {}
+
+func (m *GetSystemStatsResponse) GetCpu() *SystemStat {
+ if m != nil {
+ return m.Cpu
+ }
+ return nil
+}
+
+func (m *GetSystemStatsResponse) GetMemory() *SystemStat {
+ if m != nil {
+ return m.Memory
+ }
+ return nil
+}
+
+type StartBackgroundRequestRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StartBackgroundRequestRequest) Reset() { *m = StartBackgroundRequestRequest{} }
+func (m *StartBackgroundRequestRequest) String() string { return proto.CompactTextString(m) }
+func (*StartBackgroundRequestRequest) ProtoMessage() {}
+
+type StartBackgroundRequestResponse struct {
+ // Every /_ah/background request will have an X-AppEngine-BackgroundRequest
+ // header, whose value will be equal to this parameter, the request_id.
+ RequestId *string `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StartBackgroundRequestResponse) Reset() { *m = StartBackgroundRequestResponse{} }
+func (m *StartBackgroundRequestResponse) String() string { return proto.CompactTextString(m) }
+func (*StartBackgroundRequestResponse) ProtoMessage() {}
+
+func (m *StartBackgroundRequestResponse) GetRequestId() string {
+ if m != nil && m.RequestId != nil {
+ return *m.RequestId
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/system/system_service.proto b/vendor/google.golang.org/appengine/internal/system/system_service.proto
new file mode 100644
index 000000000..32c0bf859
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/system/system_service.proto
@@ -0,0 +1,49 @@
+syntax = "proto2";
+option go_package = "system";
+
+package appengine;
+
+message SystemServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INTERNAL_ERROR = 1;
+ BACKEND_REQUIRED = 2;
+ LIMIT_REACHED = 3;
+ }
+}
+
+message SystemStat {
+ // Instaneous value of this stat.
+ optional double current = 1;
+
+ // Average over time, if this stat has an instaneous value.
+ optional double average1m = 3;
+ optional double average10m = 4;
+
+ // Total value, if the stat accumulates over time.
+ optional double total = 2;
+
+ // Rate over time, if this stat accumulates.
+ optional double rate1m = 5;
+ optional double rate10m = 6;
+}
+
+message GetSystemStatsRequest {
+}
+
+message GetSystemStatsResponse {
+ // CPU used by this instance, in mcycles.
+ optional SystemStat cpu = 1;
+
+ // Physical memory (RAM) used by this instance, in megabytes.
+ optional SystemStat memory = 2;
+}
+
+message StartBackgroundRequestRequest {
+}
+
+message StartBackgroundRequestResponse {
+ // Every /_ah/background request will have an X-AppEngine-BackgroundRequest
+ // header, whose value will be equal to this parameter, the request_id.
+ optional string request_id = 1;
+}
diff --git a/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go
new file mode 100644
index 000000000..c3d428ec5
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go
@@ -0,0 +1,1888 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto
+// DO NOT EDIT!
+
+/*
+Package taskqueue is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto
+
+It has these top-level messages:
+ TaskQueueServiceError
+ TaskPayload
+ TaskQueueRetryParameters
+ TaskQueueAcl
+ TaskQueueHttpHeader
+ TaskQueueMode
+ TaskQueueAddRequest
+ TaskQueueAddResponse
+ TaskQueueBulkAddRequest
+ TaskQueueBulkAddResponse
+ TaskQueueDeleteRequest
+ TaskQueueDeleteResponse
+ TaskQueueForceRunRequest
+ TaskQueueForceRunResponse
+ TaskQueueUpdateQueueRequest
+ TaskQueueUpdateQueueResponse
+ TaskQueueFetchQueuesRequest
+ TaskQueueFetchQueuesResponse
+ TaskQueueFetchQueueStatsRequest
+ TaskQueueScannerQueueInfo
+ TaskQueueFetchQueueStatsResponse
+ TaskQueuePauseQueueRequest
+ TaskQueuePauseQueueResponse
+ TaskQueuePurgeQueueRequest
+ TaskQueuePurgeQueueResponse
+ TaskQueueDeleteQueueRequest
+ TaskQueueDeleteQueueResponse
+ TaskQueueDeleteGroupRequest
+ TaskQueueDeleteGroupResponse
+ TaskQueueQueryTasksRequest
+ TaskQueueQueryTasksResponse
+ TaskQueueFetchTaskRequest
+ TaskQueueFetchTaskResponse
+ TaskQueueUpdateStorageLimitRequest
+ TaskQueueUpdateStorageLimitResponse
+ TaskQueueQueryAndOwnTasksRequest
+ TaskQueueQueryAndOwnTasksResponse
+ TaskQueueModifyTaskLeaseRequest
+ TaskQueueModifyTaskLeaseResponse
+*/
+package taskqueue
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import appengine "google.golang.org/appengine/internal/datastore"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type TaskQueueServiceError_ErrorCode int32
+
+const (
+ TaskQueueServiceError_OK TaskQueueServiceError_ErrorCode = 0
+ TaskQueueServiceError_UNKNOWN_QUEUE TaskQueueServiceError_ErrorCode = 1
+ TaskQueueServiceError_TRANSIENT_ERROR TaskQueueServiceError_ErrorCode = 2
+ TaskQueueServiceError_INTERNAL_ERROR TaskQueueServiceError_ErrorCode = 3
+ TaskQueueServiceError_TASK_TOO_LARGE TaskQueueServiceError_ErrorCode = 4
+ TaskQueueServiceError_INVALID_TASK_NAME TaskQueueServiceError_ErrorCode = 5
+ TaskQueueServiceError_INVALID_QUEUE_NAME TaskQueueServiceError_ErrorCode = 6
+ TaskQueueServiceError_INVALID_URL TaskQueueServiceError_ErrorCode = 7
+ TaskQueueServiceError_INVALID_QUEUE_RATE TaskQueueServiceError_ErrorCode = 8
+ TaskQueueServiceError_PERMISSION_DENIED TaskQueueServiceError_ErrorCode = 9
+ TaskQueueServiceError_TASK_ALREADY_EXISTS TaskQueueServiceError_ErrorCode = 10
+ TaskQueueServiceError_TOMBSTONED_TASK TaskQueueServiceError_ErrorCode = 11
+ TaskQueueServiceError_INVALID_ETA TaskQueueServiceError_ErrorCode = 12
+ TaskQueueServiceError_INVALID_REQUEST TaskQueueServiceError_ErrorCode = 13
+ TaskQueueServiceError_UNKNOWN_TASK TaskQueueServiceError_ErrorCode = 14
+ TaskQueueServiceError_TOMBSTONED_QUEUE TaskQueueServiceError_ErrorCode = 15
+ TaskQueueServiceError_DUPLICATE_TASK_NAME TaskQueueServiceError_ErrorCode = 16
+ TaskQueueServiceError_SKIPPED TaskQueueServiceError_ErrorCode = 17
+ TaskQueueServiceError_TOO_MANY_TASKS TaskQueueServiceError_ErrorCode = 18
+ TaskQueueServiceError_INVALID_PAYLOAD TaskQueueServiceError_ErrorCode = 19
+ TaskQueueServiceError_INVALID_RETRY_PARAMETERS TaskQueueServiceError_ErrorCode = 20
+ TaskQueueServiceError_INVALID_QUEUE_MODE TaskQueueServiceError_ErrorCode = 21
+ TaskQueueServiceError_ACL_LOOKUP_ERROR TaskQueueServiceError_ErrorCode = 22
+ TaskQueueServiceError_TRANSACTIONAL_REQUEST_TOO_LARGE TaskQueueServiceError_ErrorCode = 23
+ TaskQueueServiceError_INCORRECT_CREATOR_NAME TaskQueueServiceError_ErrorCode = 24
+ TaskQueueServiceError_TASK_LEASE_EXPIRED TaskQueueServiceError_ErrorCode = 25
+ TaskQueueServiceError_QUEUE_PAUSED TaskQueueServiceError_ErrorCode = 26
+ TaskQueueServiceError_INVALID_TAG TaskQueueServiceError_ErrorCode = 27
+ // Reserved range for the Datastore error codes.
+ // Original Datastore error code is shifted by DATASTORE_ERROR offset.
+ TaskQueueServiceError_DATASTORE_ERROR TaskQueueServiceError_ErrorCode = 10000
+)
+
+var TaskQueueServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "UNKNOWN_QUEUE",
+ 2: "TRANSIENT_ERROR",
+ 3: "INTERNAL_ERROR",
+ 4: "TASK_TOO_LARGE",
+ 5: "INVALID_TASK_NAME",
+ 6: "INVALID_QUEUE_NAME",
+ 7: "INVALID_URL",
+ 8: "INVALID_QUEUE_RATE",
+ 9: "PERMISSION_DENIED",
+ 10: "TASK_ALREADY_EXISTS",
+ 11: "TOMBSTONED_TASK",
+ 12: "INVALID_ETA",
+ 13: "INVALID_REQUEST",
+ 14: "UNKNOWN_TASK",
+ 15: "TOMBSTONED_QUEUE",
+ 16: "DUPLICATE_TASK_NAME",
+ 17: "SKIPPED",
+ 18: "TOO_MANY_TASKS",
+ 19: "INVALID_PAYLOAD",
+ 20: "INVALID_RETRY_PARAMETERS",
+ 21: "INVALID_QUEUE_MODE",
+ 22: "ACL_LOOKUP_ERROR",
+ 23: "TRANSACTIONAL_REQUEST_TOO_LARGE",
+ 24: "INCORRECT_CREATOR_NAME",
+ 25: "TASK_LEASE_EXPIRED",
+ 26: "QUEUE_PAUSED",
+ 27: "INVALID_TAG",
+ 10000: "DATASTORE_ERROR",
+}
+var TaskQueueServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "UNKNOWN_QUEUE": 1,
+ "TRANSIENT_ERROR": 2,
+ "INTERNAL_ERROR": 3,
+ "TASK_TOO_LARGE": 4,
+ "INVALID_TASK_NAME": 5,
+ "INVALID_QUEUE_NAME": 6,
+ "INVALID_URL": 7,
+ "INVALID_QUEUE_RATE": 8,
+ "PERMISSION_DENIED": 9,
+ "TASK_ALREADY_EXISTS": 10,
+ "TOMBSTONED_TASK": 11,
+ "INVALID_ETA": 12,
+ "INVALID_REQUEST": 13,
+ "UNKNOWN_TASK": 14,
+ "TOMBSTONED_QUEUE": 15,
+ "DUPLICATE_TASK_NAME": 16,
+ "SKIPPED": 17,
+ "TOO_MANY_TASKS": 18,
+ "INVALID_PAYLOAD": 19,
+ "INVALID_RETRY_PARAMETERS": 20,
+ "INVALID_QUEUE_MODE": 21,
+ "ACL_LOOKUP_ERROR": 22,
+ "TRANSACTIONAL_REQUEST_TOO_LARGE": 23,
+ "INCORRECT_CREATOR_NAME": 24,
+ "TASK_LEASE_EXPIRED": 25,
+ "QUEUE_PAUSED": 26,
+ "INVALID_TAG": 27,
+ "DATASTORE_ERROR": 10000,
+}
+
+func (x TaskQueueServiceError_ErrorCode) Enum() *TaskQueueServiceError_ErrorCode {
+ p := new(TaskQueueServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x TaskQueueServiceError_ErrorCode) String() string {
+ return proto.EnumName(TaskQueueServiceError_ErrorCode_name, int32(x))
+}
+func (x *TaskQueueServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(TaskQueueServiceError_ErrorCode_value, data, "TaskQueueServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = TaskQueueServiceError_ErrorCode(value)
+ return nil
+}
+
+type TaskQueueMode_Mode int32
+
+const (
+ TaskQueueMode_PUSH TaskQueueMode_Mode = 0
+ TaskQueueMode_PULL TaskQueueMode_Mode = 1
+)
+
+var TaskQueueMode_Mode_name = map[int32]string{
+ 0: "PUSH",
+ 1: "PULL",
+}
+var TaskQueueMode_Mode_value = map[string]int32{
+ "PUSH": 0,
+ "PULL": 1,
+}
+
+func (x TaskQueueMode_Mode) Enum() *TaskQueueMode_Mode {
+ p := new(TaskQueueMode_Mode)
+ *p = x
+ return p
+}
+func (x TaskQueueMode_Mode) String() string {
+ return proto.EnumName(TaskQueueMode_Mode_name, int32(x))
+}
+func (x *TaskQueueMode_Mode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(TaskQueueMode_Mode_value, data, "TaskQueueMode_Mode")
+ if err != nil {
+ return err
+ }
+ *x = TaskQueueMode_Mode(value)
+ return nil
+}
+
+type TaskQueueAddRequest_RequestMethod int32
+
+const (
+ TaskQueueAddRequest_GET TaskQueueAddRequest_RequestMethod = 1
+ TaskQueueAddRequest_POST TaskQueueAddRequest_RequestMethod = 2
+ TaskQueueAddRequest_HEAD TaskQueueAddRequest_RequestMethod = 3
+ TaskQueueAddRequest_PUT TaskQueueAddRequest_RequestMethod = 4
+ TaskQueueAddRequest_DELETE TaskQueueAddRequest_RequestMethod = 5
+)
+
+var TaskQueueAddRequest_RequestMethod_name = map[int32]string{
+ 1: "GET",
+ 2: "POST",
+ 3: "HEAD",
+ 4: "PUT",
+ 5: "DELETE",
+}
+var TaskQueueAddRequest_RequestMethod_value = map[string]int32{
+ "GET": 1,
+ "POST": 2,
+ "HEAD": 3,
+ "PUT": 4,
+ "DELETE": 5,
+}
+
+func (x TaskQueueAddRequest_RequestMethod) Enum() *TaskQueueAddRequest_RequestMethod {
+ p := new(TaskQueueAddRequest_RequestMethod)
+ *p = x
+ return p
+}
+func (x TaskQueueAddRequest_RequestMethod) String() string {
+ return proto.EnumName(TaskQueueAddRequest_RequestMethod_name, int32(x))
+}
+func (x *TaskQueueAddRequest_RequestMethod) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(TaskQueueAddRequest_RequestMethod_value, data, "TaskQueueAddRequest_RequestMethod")
+ if err != nil {
+ return err
+ }
+ *x = TaskQueueAddRequest_RequestMethod(value)
+ return nil
+}
+
+type TaskQueueQueryTasksResponse_Task_RequestMethod int32
+
+const (
+ TaskQueueQueryTasksResponse_Task_GET TaskQueueQueryTasksResponse_Task_RequestMethod = 1
+ TaskQueueQueryTasksResponse_Task_POST TaskQueueQueryTasksResponse_Task_RequestMethod = 2
+ TaskQueueQueryTasksResponse_Task_HEAD TaskQueueQueryTasksResponse_Task_RequestMethod = 3
+ TaskQueueQueryTasksResponse_Task_PUT TaskQueueQueryTasksResponse_Task_RequestMethod = 4
+ TaskQueueQueryTasksResponse_Task_DELETE TaskQueueQueryTasksResponse_Task_RequestMethod = 5
+)
+
+var TaskQueueQueryTasksResponse_Task_RequestMethod_name = map[int32]string{
+ 1: "GET",
+ 2: "POST",
+ 3: "HEAD",
+ 4: "PUT",
+ 5: "DELETE",
+}
+var TaskQueueQueryTasksResponse_Task_RequestMethod_value = map[string]int32{
+ "GET": 1,
+ "POST": 2,
+ "HEAD": 3,
+ "PUT": 4,
+ "DELETE": 5,
+}
+
+func (x TaskQueueQueryTasksResponse_Task_RequestMethod) Enum() *TaskQueueQueryTasksResponse_Task_RequestMethod {
+ p := new(TaskQueueQueryTasksResponse_Task_RequestMethod)
+ *p = x
+ return p
+}
+func (x TaskQueueQueryTasksResponse_Task_RequestMethod) String() string {
+ return proto.EnumName(TaskQueueQueryTasksResponse_Task_RequestMethod_name, int32(x))
+}
+func (x *TaskQueueQueryTasksResponse_Task_RequestMethod) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(TaskQueueQueryTasksResponse_Task_RequestMethod_value, data, "TaskQueueQueryTasksResponse_Task_RequestMethod")
+ if err != nil {
+ return err
+ }
+ *x = TaskQueueQueryTasksResponse_Task_RequestMethod(value)
+ return nil
+}
+
+type TaskQueueServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueServiceError) Reset() { *m = TaskQueueServiceError{} }
+func (m *TaskQueueServiceError) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueServiceError) ProtoMessage() {}
+
+type TaskPayload struct {
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskPayload) Reset() { *m = TaskPayload{} }
+func (m *TaskPayload) String() string { return proto.CompactTextString(m) }
+func (*TaskPayload) ProtoMessage() {}
+
+func (m *TaskPayload) Marshal() ([]byte, error) {
+ return proto.MarshalMessageSet(m.ExtensionMap())
+}
+func (m *TaskPayload) Unmarshal(buf []byte) error {
+ return proto.UnmarshalMessageSet(buf, m.ExtensionMap())
+}
+func (m *TaskPayload) MarshalJSON() ([]byte, error) {
+ return proto.MarshalMessageSetJSON(m.XXX_extensions)
+}
+func (m *TaskPayload) UnmarshalJSON(buf []byte) error {
+ return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions)
+}
+
+// ensure TaskPayload satisfies proto.Marshaler and proto.Unmarshaler
+var _ proto.Marshaler = (*TaskPayload)(nil)
+var _ proto.Unmarshaler = (*TaskPayload)(nil)
+
+var extRange_TaskPayload = []proto.ExtensionRange{
+ {10, 2147483646},
+}
+
+func (*TaskPayload) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_TaskPayload
+}
+func (m *TaskPayload) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+type TaskQueueRetryParameters struct {
+ RetryLimit *int32 `protobuf:"varint,1,opt,name=retry_limit" json:"retry_limit,omitempty"`
+ AgeLimitSec *int64 `protobuf:"varint,2,opt,name=age_limit_sec" json:"age_limit_sec,omitempty"`
+ MinBackoffSec *float64 `protobuf:"fixed64,3,opt,name=min_backoff_sec,def=0.1" json:"min_backoff_sec,omitempty"`
+ MaxBackoffSec *float64 `protobuf:"fixed64,4,opt,name=max_backoff_sec,def=3600" json:"max_backoff_sec,omitempty"`
+ MaxDoublings *int32 `protobuf:"varint,5,opt,name=max_doublings,def=16" json:"max_doublings,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueRetryParameters) Reset() { *m = TaskQueueRetryParameters{} }
+func (m *TaskQueueRetryParameters) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueRetryParameters) ProtoMessage() {}
+
+const Default_TaskQueueRetryParameters_MinBackoffSec float64 = 0.1
+const Default_TaskQueueRetryParameters_MaxBackoffSec float64 = 3600
+const Default_TaskQueueRetryParameters_MaxDoublings int32 = 16
+
+func (m *TaskQueueRetryParameters) GetRetryLimit() int32 {
+ if m != nil && m.RetryLimit != nil {
+ return *m.RetryLimit
+ }
+ return 0
+}
+
+func (m *TaskQueueRetryParameters) GetAgeLimitSec() int64 {
+ if m != nil && m.AgeLimitSec != nil {
+ return *m.AgeLimitSec
+ }
+ return 0
+}
+
+func (m *TaskQueueRetryParameters) GetMinBackoffSec() float64 {
+ if m != nil && m.MinBackoffSec != nil {
+ return *m.MinBackoffSec
+ }
+ return Default_TaskQueueRetryParameters_MinBackoffSec
+}
+
+func (m *TaskQueueRetryParameters) GetMaxBackoffSec() float64 {
+ if m != nil && m.MaxBackoffSec != nil {
+ return *m.MaxBackoffSec
+ }
+ return Default_TaskQueueRetryParameters_MaxBackoffSec
+}
+
+func (m *TaskQueueRetryParameters) GetMaxDoublings() int32 {
+ if m != nil && m.MaxDoublings != nil {
+ return *m.MaxDoublings
+ }
+ return Default_TaskQueueRetryParameters_MaxDoublings
+}
+
+type TaskQueueAcl struct {
+ UserEmail [][]byte `protobuf:"bytes,1,rep,name=user_email" json:"user_email,omitempty"`
+ WriterEmail [][]byte `protobuf:"bytes,2,rep,name=writer_email" json:"writer_email,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAcl) Reset() { *m = TaskQueueAcl{} }
+func (m *TaskQueueAcl) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAcl) ProtoMessage() {}
+
+func (m *TaskQueueAcl) GetUserEmail() [][]byte {
+ if m != nil {
+ return m.UserEmail
+ }
+ return nil
+}
+
+func (m *TaskQueueAcl) GetWriterEmail() [][]byte {
+ if m != nil {
+ return m.WriterEmail
+ }
+ return nil
+}
+
+type TaskQueueHttpHeader struct {
+ Key []byte `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueHttpHeader) Reset() { *m = TaskQueueHttpHeader{} }
+func (m *TaskQueueHttpHeader) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueHttpHeader) ProtoMessage() {}
+
+func (m *TaskQueueHttpHeader) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TaskQueueHttpHeader) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type TaskQueueMode struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueMode) Reset() { *m = TaskQueueMode{} }
+func (m *TaskQueueMode) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueMode) ProtoMessage() {}
+
+type TaskQueueAddRequest struct {
+ QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"`
+ EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"`
+ Method *TaskQueueAddRequest_RequestMethod `protobuf:"varint,5,opt,name=method,enum=appengine.TaskQueueAddRequest_RequestMethod,def=2" json:"method,omitempty"`
+ Url []byte `protobuf:"bytes,4,opt,name=url" json:"url,omitempty"`
+ Header []*TaskQueueAddRequest_Header `protobuf:"group,6,rep,name=Header" json:"header,omitempty"`
+ Body []byte `protobuf:"bytes,9,opt,name=body" json:"body,omitempty"`
+ Transaction *appengine.Transaction `protobuf:"bytes,10,opt,name=transaction" json:"transaction,omitempty"`
+ AppId []byte `protobuf:"bytes,11,opt,name=app_id" json:"app_id,omitempty"`
+ Crontimetable *TaskQueueAddRequest_CronTimetable `protobuf:"group,12,opt,name=CronTimetable" json:"crontimetable,omitempty"`
+ Description []byte `protobuf:"bytes,15,opt,name=description" json:"description,omitempty"`
+ Payload *TaskPayload `protobuf:"bytes,16,opt,name=payload" json:"payload,omitempty"`
+ RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,17,opt,name=retry_parameters" json:"retry_parameters,omitempty"`
+ Mode *TaskQueueMode_Mode `protobuf:"varint,18,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"`
+ Tag []byte `protobuf:"bytes,19,opt,name=tag" json:"tag,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAddRequest) Reset() { *m = TaskQueueAddRequest{} }
+func (m *TaskQueueAddRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAddRequest) ProtoMessage() {}
+
+const Default_TaskQueueAddRequest_Method TaskQueueAddRequest_RequestMethod = TaskQueueAddRequest_POST
+const Default_TaskQueueAddRequest_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH
+
+func (m *TaskQueueAddRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetEtaUsec() int64 {
+ if m != nil && m.EtaUsec != nil {
+ return *m.EtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueAddRequest) GetMethod() TaskQueueAddRequest_RequestMethod {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return Default_TaskQueueAddRequest_Method
+}
+
+func (m *TaskQueueAddRequest) GetUrl() []byte {
+ if m != nil {
+ return m.Url
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetHeader() []*TaskQueueAddRequest_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetBody() []byte {
+ if m != nil {
+ return m.Body
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetTransaction() *appengine.Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetCrontimetable() *TaskQueueAddRequest_CronTimetable {
+ if m != nil {
+ return m.Crontimetable
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetDescription() []byte {
+ if m != nil {
+ return m.Description
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetPayload() *TaskPayload {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetRetryParameters() *TaskQueueRetryParameters {
+ if m != nil {
+ return m.RetryParameters
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetMode() TaskQueueMode_Mode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_TaskQueueAddRequest_Mode
+}
+
+func (m *TaskQueueAddRequest) GetTag() []byte {
+ if m != nil {
+ return m.Tag
+ }
+ return nil
+}
+
+type TaskQueueAddRequest_Header struct {
+ Key []byte `protobuf:"bytes,7,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,8,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAddRequest_Header) Reset() { *m = TaskQueueAddRequest_Header{} }
+func (m *TaskQueueAddRequest_Header) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAddRequest_Header) ProtoMessage() {}
+
+func (m *TaskQueueAddRequest_Header) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest_Header) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type TaskQueueAddRequest_CronTimetable struct {
+ Schedule []byte `protobuf:"bytes,13,req,name=schedule" json:"schedule,omitempty"`
+ Timezone []byte `protobuf:"bytes,14,req,name=timezone" json:"timezone,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAddRequest_CronTimetable) Reset() { *m = TaskQueueAddRequest_CronTimetable{} }
+func (m *TaskQueueAddRequest_CronTimetable) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAddRequest_CronTimetable) ProtoMessage() {}
+
+func (m *TaskQueueAddRequest_CronTimetable) GetSchedule() []byte {
+ if m != nil {
+ return m.Schedule
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest_CronTimetable) GetTimezone() []byte {
+ if m != nil {
+ return m.Timezone
+ }
+ return nil
+}
+
+type TaskQueueAddResponse struct {
+ ChosenTaskName []byte `protobuf:"bytes,1,opt,name=chosen_task_name" json:"chosen_task_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAddResponse) Reset() { *m = TaskQueueAddResponse{} }
+func (m *TaskQueueAddResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAddResponse) ProtoMessage() {}
+
+func (m *TaskQueueAddResponse) GetChosenTaskName() []byte {
+ if m != nil {
+ return m.ChosenTaskName
+ }
+ return nil
+}
+
+type TaskQueueBulkAddRequest struct {
+ AddRequest []*TaskQueueAddRequest `protobuf:"bytes,1,rep,name=add_request" json:"add_request,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueBulkAddRequest) Reset() { *m = TaskQueueBulkAddRequest{} }
+func (m *TaskQueueBulkAddRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueBulkAddRequest) ProtoMessage() {}
+
+func (m *TaskQueueBulkAddRequest) GetAddRequest() []*TaskQueueAddRequest {
+ if m != nil {
+ return m.AddRequest
+ }
+ return nil
+}
+
+type TaskQueueBulkAddResponse struct {
+ Taskresult []*TaskQueueBulkAddResponse_TaskResult `protobuf:"group,1,rep,name=TaskResult" json:"taskresult,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueBulkAddResponse) Reset() { *m = TaskQueueBulkAddResponse{} }
+func (m *TaskQueueBulkAddResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueBulkAddResponse) ProtoMessage() {}
+
+func (m *TaskQueueBulkAddResponse) GetTaskresult() []*TaskQueueBulkAddResponse_TaskResult {
+ if m != nil {
+ return m.Taskresult
+ }
+ return nil
+}
+
+type TaskQueueBulkAddResponse_TaskResult struct {
+ Result *TaskQueueServiceError_ErrorCode `protobuf:"varint,2,req,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"`
+ ChosenTaskName []byte `protobuf:"bytes,3,opt,name=chosen_task_name" json:"chosen_task_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueBulkAddResponse_TaskResult) Reset() { *m = TaskQueueBulkAddResponse_TaskResult{} }
+func (m *TaskQueueBulkAddResponse_TaskResult) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueBulkAddResponse_TaskResult) ProtoMessage() {}
+
+func (m *TaskQueueBulkAddResponse_TaskResult) GetResult() TaskQueueServiceError_ErrorCode {
+ if m != nil && m.Result != nil {
+ return *m.Result
+ }
+ return TaskQueueServiceError_OK
+}
+
+func (m *TaskQueueBulkAddResponse_TaskResult) GetChosenTaskName() []byte {
+ if m != nil {
+ return m.ChosenTaskName
+ }
+ return nil
+}
+
+type TaskQueueDeleteRequest struct {
+ QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName [][]byte `protobuf:"bytes,2,rep,name=task_name" json:"task_name,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteRequest) Reset() { *m = TaskQueueDeleteRequest{} }
+func (m *TaskQueueDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteRequest) ProtoMessage() {}
+
+func (m *TaskQueueDeleteRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueDeleteRequest) GetTaskName() [][]byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueDeleteRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type TaskQueueDeleteResponse struct {
+ Result []TaskQueueServiceError_ErrorCode `protobuf:"varint,3,rep,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteResponse) Reset() { *m = TaskQueueDeleteResponse{} }
+func (m *TaskQueueDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteResponse) ProtoMessage() {}
+
+func (m *TaskQueueDeleteResponse) GetResult() []TaskQueueServiceError_ErrorCode {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+type TaskQueueForceRunRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName []byte `protobuf:"bytes,3,req,name=task_name" json:"task_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueForceRunRequest) Reset() { *m = TaskQueueForceRunRequest{} }
+func (m *TaskQueueForceRunRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueForceRunRequest) ProtoMessage() {}
+
+func (m *TaskQueueForceRunRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueForceRunRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueForceRunRequest) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+type TaskQueueForceRunResponse struct {
+ Result *TaskQueueServiceError_ErrorCode `protobuf:"varint,3,req,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueForceRunResponse) Reset() { *m = TaskQueueForceRunResponse{} }
+func (m *TaskQueueForceRunResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueForceRunResponse) ProtoMessage() {}
+
+func (m *TaskQueueForceRunResponse) GetResult() TaskQueueServiceError_ErrorCode {
+ if m != nil && m.Result != nil {
+ return *m.Result
+ }
+ return TaskQueueServiceError_OK
+}
+
+type TaskQueueUpdateQueueRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ BucketRefillPerSecond *float64 `protobuf:"fixed64,3,req,name=bucket_refill_per_second" json:"bucket_refill_per_second,omitempty"`
+ BucketCapacity *int32 `protobuf:"varint,4,req,name=bucket_capacity" json:"bucket_capacity,omitempty"`
+ UserSpecifiedRate *string `protobuf:"bytes,5,opt,name=user_specified_rate" json:"user_specified_rate,omitempty"`
+ RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,6,opt,name=retry_parameters" json:"retry_parameters,omitempty"`
+ MaxConcurrentRequests *int32 `protobuf:"varint,7,opt,name=max_concurrent_requests" json:"max_concurrent_requests,omitempty"`
+ Mode *TaskQueueMode_Mode `protobuf:"varint,8,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"`
+ Acl *TaskQueueAcl `protobuf:"bytes,9,opt,name=acl" json:"acl,omitempty"`
+ HeaderOverride []*TaskQueueHttpHeader `protobuf:"bytes,10,rep,name=header_override" json:"header_override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueUpdateQueueRequest) Reset() { *m = TaskQueueUpdateQueueRequest{} }
+func (m *TaskQueueUpdateQueueRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueUpdateQueueRequest) ProtoMessage() {}
+
+const Default_TaskQueueUpdateQueueRequest_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH
+
+func (m *TaskQueueUpdateQueueRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetBucketRefillPerSecond() float64 {
+ if m != nil && m.BucketRefillPerSecond != nil {
+ return *m.BucketRefillPerSecond
+ }
+ return 0
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetBucketCapacity() int32 {
+ if m != nil && m.BucketCapacity != nil {
+ return *m.BucketCapacity
+ }
+ return 0
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetUserSpecifiedRate() string {
+ if m != nil && m.UserSpecifiedRate != nil {
+ return *m.UserSpecifiedRate
+ }
+ return ""
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetRetryParameters() *TaskQueueRetryParameters {
+ if m != nil {
+ return m.RetryParameters
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetMaxConcurrentRequests() int32 {
+ if m != nil && m.MaxConcurrentRequests != nil {
+ return *m.MaxConcurrentRequests
+ }
+ return 0
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetMode() TaskQueueMode_Mode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_TaskQueueUpdateQueueRequest_Mode
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetAcl() *TaskQueueAcl {
+ if m != nil {
+ return m.Acl
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetHeaderOverride() []*TaskQueueHttpHeader {
+ if m != nil {
+ return m.HeaderOverride
+ }
+ return nil
+}
+
+type TaskQueueUpdateQueueResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueUpdateQueueResponse) Reset() { *m = TaskQueueUpdateQueueResponse{} }
+func (m *TaskQueueUpdateQueueResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueUpdateQueueResponse) ProtoMessage() {}
+
+type TaskQueueFetchQueuesRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ MaxRows *int32 `protobuf:"varint,2,req,name=max_rows" json:"max_rows,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueuesRequest) Reset() { *m = TaskQueueFetchQueuesRequest{} }
+func (m *TaskQueueFetchQueuesRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueuesRequest) ProtoMessage() {}
+
+func (m *TaskQueueFetchQueuesRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesRequest) GetMaxRows() int32 {
+ if m != nil && m.MaxRows != nil {
+ return *m.MaxRows
+ }
+ return 0
+}
+
+type TaskQueueFetchQueuesResponse struct {
+ Queue []*TaskQueueFetchQueuesResponse_Queue `protobuf:"group,1,rep,name=Queue" json:"queue,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueuesResponse) Reset() { *m = TaskQueueFetchQueuesResponse{} }
+func (m *TaskQueueFetchQueuesResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueuesResponse) ProtoMessage() {}
+
+func (m *TaskQueueFetchQueuesResponse) GetQueue() []*TaskQueueFetchQueuesResponse_Queue {
+ if m != nil {
+ return m.Queue
+ }
+ return nil
+}
+
+type TaskQueueFetchQueuesResponse_Queue struct {
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ BucketRefillPerSecond *float64 `protobuf:"fixed64,3,req,name=bucket_refill_per_second" json:"bucket_refill_per_second,omitempty"`
+ BucketCapacity *float64 `protobuf:"fixed64,4,req,name=bucket_capacity" json:"bucket_capacity,omitempty"`
+ UserSpecifiedRate *string `protobuf:"bytes,5,opt,name=user_specified_rate" json:"user_specified_rate,omitempty"`
+ Paused *bool `protobuf:"varint,6,req,name=paused,def=0" json:"paused,omitempty"`
+ RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,7,opt,name=retry_parameters" json:"retry_parameters,omitempty"`
+ MaxConcurrentRequests *int32 `protobuf:"varint,8,opt,name=max_concurrent_requests" json:"max_concurrent_requests,omitempty"`
+ Mode *TaskQueueMode_Mode `protobuf:"varint,9,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"`
+ Acl *TaskQueueAcl `protobuf:"bytes,10,opt,name=acl" json:"acl,omitempty"`
+ HeaderOverride []*TaskQueueHttpHeader `protobuf:"bytes,11,rep,name=header_override" json:"header_override,omitempty"`
+ CreatorName *string `protobuf:"bytes,12,opt,name=creator_name,def=apphosting" json:"creator_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) Reset() { *m = TaskQueueFetchQueuesResponse_Queue{} }
+func (m *TaskQueueFetchQueuesResponse_Queue) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueuesResponse_Queue) ProtoMessage() {}
+
+const Default_TaskQueueFetchQueuesResponse_Queue_Paused bool = false
+const Default_TaskQueueFetchQueuesResponse_Queue_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH
+const Default_TaskQueueFetchQueuesResponse_Queue_CreatorName string = "apphosting"
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetBucketRefillPerSecond() float64 {
+ if m != nil && m.BucketRefillPerSecond != nil {
+ return *m.BucketRefillPerSecond
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetBucketCapacity() float64 {
+ if m != nil && m.BucketCapacity != nil {
+ return *m.BucketCapacity
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetUserSpecifiedRate() string {
+ if m != nil && m.UserSpecifiedRate != nil {
+ return *m.UserSpecifiedRate
+ }
+ return ""
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetPaused() bool {
+ if m != nil && m.Paused != nil {
+ return *m.Paused
+ }
+ return Default_TaskQueueFetchQueuesResponse_Queue_Paused
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetRetryParameters() *TaskQueueRetryParameters {
+ if m != nil {
+ return m.RetryParameters
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetMaxConcurrentRequests() int32 {
+ if m != nil && m.MaxConcurrentRequests != nil {
+ return *m.MaxConcurrentRequests
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetMode() TaskQueueMode_Mode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_TaskQueueFetchQueuesResponse_Queue_Mode
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetAcl() *TaskQueueAcl {
+ if m != nil {
+ return m.Acl
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetHeaderOverride() []*TaskQueueHttpHeader {
+ if m != nil {
+ return m.HeaderOverride
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetCreatorName() string {
+ if m != nil && m.CreatorName != nil {
+ return *m.CreatorName
+ }
+ return Default_TaskQueueFetchQueuesResponse_Queue_CreatorName
+}
+
+type TaskQueueFetchQueueStatsRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName [][]byte `protobuf:"bytes,2,rep,name=queue_name" json:"queue_name,omitempty"`
+ MaxNumTasks *int32 `protobuf:"varint,3,opt,name=max_num_tasks,def=0" json:"max_num_tasks,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueueStatsRequest) Reset() { *m = TaskQueueFetchQueueStatsRequest{} }
+func (m *TaskQueueFetchQueueStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueueStatsRequest) ProtoMessage() {}
+
+const Default_TaskQueueFetchQueueStatsRequest_MaxNumTasks int32 = 0
+
+func (m *TaskQueueFetchQueueStatsRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueueStatsRequest) GetQueueName() [][]byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueueStatsRequest) GetMaxNumTasks() int32 {
+ if m != nil && m.MaxNumTasks != nil {
+ return *m.MaxNumTasks
+ }
+ return Default_TaskQueueFetchQueueStatsRequest_MaxNumTasks
+}
+
+type TaskQueueScannerQueueInfo struct {
+ ExecutedLastMinute *int64 `protobuf:"varint,1,req,name=executed_last_minute" json:"executed_last_minute,omitempty"`
+ ExecutedLastHour *int64 `protobuf:"varint,2,req,name=executed_last_hour" json:"executed_last_hour,omitempty"`
+ SamplingDurationSeconds *float64 `protobuf:"fixed64,3,req,name=sampling_duration_seconds" json:"sampling_duration_seconds,omitempty"`
+ RequestsInFlight *int32 `protobuf:"varint,4,opt,name=requests_in_flight" json:"requests_in_flight,omitempty"`
+ EnforcedRate *float64 `protobuf:"fixed64,5,opt,name=enforced_rate" json:"enforced_rate,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueScannerQueueInfo) Reset() { *m = TaskQueueScannerQueueInfo{} }
+func (m *TaskQueueScannerQueueInfo) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueScannerQueueInfo) ProtoMessage() {}
+
+func (m *TaskQueueScannerQueueInfo) GetExecutedLastMinute() int64 {
+ if m != nil && m.ExecutedLastMinute != nil {
+ return *m.ExecutedLastMinute
+ }
+ return 0
+}
+
+func (m *TaskQueueScannerQueueInfo) GetExecutedLastHour() int64 {
+ if m != nil && m.ExecutedLastHour != nil {
+ return *m.ExecutedLastHour
+ }
+ return 0
+}
+
+func (m *TaskQueueScannerQueueInfo) GetSamplingDurationSeconds() float64 {
+ if m != nil && m.SamplingDurationSeconds != nil {
+ return *m.SamplingDurationSeconds
+ }
+ return 0
+}
+
+func (m *TaskQueueScannerQueueInfo) GetRequestsInFlight() int32 {
+ if m != nil && m.RequestsInFlight != nil {
+ return *m.RequestsInFlight
+ }
+ return 0
+}
+
+func (m *TaskQueueScannerQueueInfo) GetEnforcedRate() float64 {
+ if m != nil && m.EnforcedRate != nil {
+ return *m.EnforcedRate
+ }
+ return 0
+}
+
+type TaskQueueFetchQueueStatsResponse struct {
+ Queuestats []*TaskQueueFetchQueueStatsResponse_QueueStats `protobuf:"group,1,rep,name=QueueStats" json:"queuestats,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueueStatsResponse) Reset() { *m = TaskQueueFetchQueueStatsResponse{} }
+func (m *TaskQueueFetchQueueStatsResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueueStatsResponse) ProtoMessage() {}
+
+func (m *TaskQueueFetchQueueStatsResponse) GetQueuestats() []*TaskQueueFetchQueueStatsResponse_QueueStats {
+ if m != nil {
+ return m.Queuestats
+ }
+ return nil
+}
+
+type TaskQueueFetchQueueStatsResponse_QueueStats struct {
+ NumTasks *int32 `protobuf:"varint,2,req,name=num_tasks" json:"num_tasks,omitempty"`
+ OldestEtaUsec *int64 `protobuf:"varint,3,req,name=oldest_eta_usec" json:"oldest_eta_usec,omitempty"`
+ ScannerInfo *TaskQueueScannerQueueInfo `protobuf:"bytes,4,opt,name=scanner_info" json:"scanner_info,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) Reset() {
+ *m = TaskQueueFetchQueueStatsResponse_QueueStats{}
+}
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) String() string {
+ return proto.CompactTextString(m)
+}
+func (*TaskQueueFetchQueueStatsResponse_QueueStats) ProtoMessage() {}
+
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetNumTasks() int32 {
+ if m != nil && m.NumTasks != nil {
+ return *m.NumTasks
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetOldestEtaUsec() int64 {
+ if m != nil && m.OldestEtaUsec != nil {
+ return *m.OldestEtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetScannerInfo() *TaskQueueScannerQueueInfo {
+ if m != nil {
+ return m.ScannerInfo
+ }
+ return nil
+}
+
+type TaskQueuePauseQueueRequest struct {
+ AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ Pause *bool `protobuf:"varint,3,req,name=pause" json:"pause,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueuePauseQueueRequest) Reset() { *m = TaskQueuePauseQueueRequest{} }
+func (m *TaskQueuePauseQueueRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueuePauseQueueRequest) ProtoMessage() {}
+
+func (m *TaskQueuePauseQueueRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueuePauseQueueRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueuePauseQueueRequest) GetPause() bool {
+ if m != nil && m.Pause != nil {
+ return *m.Pause
+ }
+ return false
+}
+
+type TaskQueuePauseQueueResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueuePauseQueueResponse) Reset() { *m = TaskQueuePauseQueueResponse{} }
+func (m *TaskQueuePauseQueueResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueuePauseQueueResponse) ProtoMessage() {}
+
+type TaskQueuePurgeQueueRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueuePurgeQueueRequest) Reset() { *m = TaskQueuePurgeQueueRequest{} }
+func (m *TaskQueuePurgeQueueRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueuePurgeQueueRequest) ProtoMessage() {}
+
+func (m *TaskQueuePurgeQueueRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueuePurgeQueueRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+type TaskQueuePurgeQueueResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueuePurgeQueueResponse) Reset() { *m = TaskQueuePurgeQueueResponse{} }
+func (m *TaskQueuePurgeQueueResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueuePurgeQueueResponse) ProtoMessage() {}
+
+type TaskQueueDeleteQueueRequest struct {
+ AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteQueueRequest) Reset() { *m = TaskQueueDeleteQueueRequest{} }
+func (m *TaskQueueDeleteQueueRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteQueueRequest) ProtoMessage() {}
+
+func (m *TaskQueueDeleteQueueRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueDeleteQueueRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+type TaskQueueDeleteQueueResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteQueueResponse) Reset() { *m = TaskQueueDeleteQueueResponse{} }
+func (m *TaskQueueDeleteQueueResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteQueueResponse) ProtoMessage() {}
+
+type TaskQueueDeleteGroupRequest struct {
+ AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteGroupRequest) Reset() { *m = TaskQueueDeleteGroupRequest{} }
+func (m *TaskQueueDeleteGroupRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteGroupRequest) ProtoMessage() {}
+
+func (m *TaskQueueDeleteGroupRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type TaskQueueDeleteGroupResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteGroupResponse) Reset() { *m = TaskQueueDeleteGroupResponse{} }
+func (m *TaskQueueDeleteGroupResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteGroupResponse) ProtoMessage() {}
+
+type TaskQueueQueryTasksRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ StartTaskName []byte `protobuf:"bytes,3,opt,name=start_task_name" json:"start_task_name,omitempty"`
+ StartEtaUsec *int64 `protobuf:"varint,4,opt,name=start_eta_usec" json:"start_eta_usec,omitempty"`
+ StartTag []byte `protobuf:"bytes,6,opt,name=start_tag" json:"start_tag,omitempty"`
+ MaxRows *int32 `protobuf:"varint,5,opt,name=max_rows,def=1" json:"max_rows,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksRequest) Reset() { *m = TaskQueueQueryTasksRequest{} }
+func (m *TaskQueueQueryTasksRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksRequest) ProtoMessage() {}
+
+const Default_TaskQueueQueryTasksRequest_MaxRows int32 = 1
+
+func (m *TaskQueueQueryTasksRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksRequest) GetStartTaskName() []byte {
+ if m != nil {
+ return m.StartTaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksRequest) GetStartEtaUsec() int64 {
+ if m != nil && m.StartEtaUsec != nil {
+ return *m.StartEtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksRequest) GetStartTag() []byte {
+ if m != nil {
+ return m.StartTag
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksRequest) GetMaxRows() int32 {
+ if m != nil && m.MaxRows != nil {
+ return *m.MaxRows
+ }
+ return Default_TaskQueueQueryTasksRequest_MaxRows
+}
+
+type TaskQueueQueryTasksResponse struct {
+ Task []*TaskQueueQueryTasksResponse_Task `protobuf:"group,1,rep,name=Task" json:"task,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse) Reset() { *m = TaskQueueQueryTasksResponse{} }
+func (m *TaskQueueQueryTasksResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksResponse) ProtoMessage() {}
+
+func (m *TaskQueueQueryTasksResponse) GetTask() []*TaskQueueQueryTasksResponse_Task {
+ if m != nil {
+ return m.Task
+ }
+ return nil
+}
+
+type TaskQueueQueryTasksResponse_Task struct {
+ TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"`
+ EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"`
+ Url []byte `protobuf:"bytes,4,opt,name=url" json:"url,omitempty"`
+ Method *TaskQueueQueryTasksResponse_Task_RequestMethod `protobuf:"varint,5,opt,name=method,enum=appengine.TaskQueueQueryTasksResponse_Task_RequestMethod" json:"method,omitempty"`
+ RetryCount *int32 `protobuf:"varint,6,opt,name=retry_count,def=0" json:"retry_count,omitempty"`
+ Header []*TaskQueueQueryTasksResponse_Task_Header `protobuf:"group,7,rep,name=Header" json:"header,omitempty"`
+ BodySize *int32 `protobuf:"varint,10,opt,name=body_size" json:"body_size,omitempty"`
+ Body []byte `protobuf:"bytes,11,opt,name=body" json:"body,omitempty"`
+ CreationTimeUsec *int64 `protobuf:"varint,12,req,name=creation_time_usec" json:"creation_time_usec,omitempty"`
+ Crontimetable *TaskQueueQueryTasksResponse_Task_CronTimetable `protobuf:"group,13,opt,name=CronTimetable" json:"crontimetable,omitempty"`
+ Runlog *TaskQueueQueryTasksResponse_Task_RunLog `protobuf:"group,16,opt,name=RunLog" json:"runlog,omitempty"`
+ Description []byte `protobuf:"bytes,21,opt,name=description" json:"description,omitempty"`
+ Payload *TaskPayload `protobuf:"bytes,22,opt,name=payload" json:"payload,omitempty"`
+ RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,23,opt,name=retry_parameters" json:"retry_parameters,omitempty"`
+ FirstTryUsec *int64 `protobuf:"varint,24,opt,name=first_try_usec" json:"first_try_usec,omitempty"`
+ Tag []byte `protobuf:"bytes,25,opt,name=tag" json:"tag,omitempty"`
+ ExecutionCount *int32 `protobuf:"varint,26,opt,name=execution_count,def=0" json:"execution_count,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) Reset() { *m = TaskQueueQueryTasksResponse_Task{} }
+func (m *TaskQueueQueryTasksResponse_Task) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksResponse_Task) ProtoMessage() {}
+
+const Default_TaskQueueQueryTasksResponse_Task_RetryCount int32 = 0
+const Default_TaskQueueQueryTasksResponse_Task_ExecutionCount int32 = 0
+
+func (m *TaskQueueQueryTasksResponse_Task) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetEtaUsec() int64 {
+ if m != nil && m.EtaUsec != nil {
+ return *m.EtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetUrl() []byte {
+ if m != nil {
+ return m.Url
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetMethod() TaskQueueQueryTasksResponse_Task_RequestMethod {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return TaskQueueQueryTasksResponse_Task_GET
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetRetryCount() int32 {
+ if m != nil && m.RetryCount != nil {
+ return *m.RetryCount
+ }
+ return Default_TaskQueueQueryTasksResponse_Task_RetryCount
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetHeader() []*TaskQueueQueryTasksResponse_Task_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetBodySize() int32 {
+ if m != nil && m.BodySize != nil {
+ return *m.BodySize
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetBody() []byte {
+ if m != nil {
+ return m.Body
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetCreationTimeUsec() int64 {
+ if m != nil && m.CreationTimeUsec != nil {
+ return *m.CreationTimeUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetCrontimetable() *TaskQueueQueryTasksResponse_Task_CronTimetable {
+ if m != nil {
+ return m.Crontimetable
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetRunlog() *TaskQueueQueryTasksResponse_Task_RunLog {
+ if m != nil {
+ return m.Runlog
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetDescription() []byte {
+ if m != nil {
+ return m.Description
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetPayload() *TaskPayload {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetRetryParameters() *TaskQueueRetryParameters {
+ if m != nil {
+ return m.RetryParameters
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetFirstTryUsec() int64 {
+ if m != nil && m.FirstTryUsec != nil {
+ return *m.FirstTryUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetTag() []byte {
+ if m != nil {
+ return m.Tag
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetExecutionCount() int32 {
+ if m != nil && m.ExecutionCount != nil {
+ return *m.ExecutionCount
+ }
+ return Default_TaskQueueQueryTasksResponse_Task_ExecutionCount
+}
+
+type TaskQueueQueryTasksResponse_Task_Header struct {
+ Key []byte `protobuf:"bytes,8,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,9,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_Header) Reset() {
+ *m = TaskQueueQueryTasksResponse_Task_Header{}
+}
+func (m *TaskQueueQueryTasksResponse_Task_Header) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksResponse_Task_Header) ProtoMessage() {}
+
+func (m *TaskQueueQueryTasksResponse_Task_Header) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_Header) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type TaskQueueQueryTasksResponse_Task_CronTimetable struct {
+ Schedule []byte `protobuf:"bytes,14,req,name=schedule" json:"schedule,omitempty"`
+ Timezone []byte `protobuf:"bytes,15,req,name=timezone" json:"timezone,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) Reset() {
+ *m = TaskQueueQueryTasksResponse_Task_CronTimetable{}
+}
+func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) String() string {
+ return proto.CompactTextString(m)
+}
+func (*TaskQueueQueryTasksResponse_Task_CronTimetable) ProtoMessage() {}
+
+func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) GetSchedule() []byte {
+ if m != nil {
+ return m.Schedule
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) GetTimezone() []byte {
+ if m != nil {
+ return m.Timezone
+ }
+ return nil
+}
+
+type TaskQueueQueryTasksResponse_Task_RunLog struct {
+ DispatchedUsec *int64 `protobuf:"varint,17,req,name=dispatched_usec" json:"dispatched_usec,omitempty"`
+ LagUsec *int64 `protobuf:"varint,18,req,name=lag_usec" json:"lag_usec,omitempty"`
+ ElapsedUsec *int64 `protobuf:"varint,19,req,name=elapsed_usec" json:"elapsed_usec,omitempty"`
+ ResponseCode *int64 `protobuf:"varint,20,opt,name=response_code" json:"response_code,omitempty"`
+ RetryReason *string `protobuf:"bytes,27,opt,name=retry_reason" json:"retry_reason,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) Reset() {
+ *m = TaskQueueQueryTasksResponse_Task_RunLog{}
+}
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksResponse_Task_RunLog) ProtoMessage() {}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetDispatchedUsec() int64 {
+ if m != nil && m.DispatchedUsec != nil {
+ return *m.DispatchedUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetLagUsec() int64 {
+ if m != nil && m.LagUsec != nil {
+ return *m.LagUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetElapsedUsec() int64 {
+ if m != nil && m.ElapsedUsec != nil {
+ return *m.ElapsedUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetResponseCode() int64 {
+ if m != nil && m.ResponseCode != nil {
+ return *m.ResponseCode
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetRetryReason() string {
+ if m != nil && m.RetryReason != nil {
+ return *m.RetryReason
+ }
+ return ""
+}
+
+type TaskQueueFetchTaskRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName []byte `protobuf:"bytes,3,req,name=task_name" json:"task_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchTaskRequest) Reset() { *m = TaskQueueFetchTaskRequest{} }
+func (m *TaskQueueFetchTaskRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchTaskRequest) ProtoMessage() {}
+
+func (m *TaskQueueFetchTaskRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchTaskRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchTaskRequest) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+type TaskQueueFetchTaskResponse struct {
+ Task *TaskQueueQueryTasksResponse `protobuf:"bytes,1,req,name=task" json:"task,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchTaskResponse) Reset() { *m = TaskQueueFetchTaskResponse{} }
+func (m *TaskQueueFetchTaskResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchTaskResponse) ProtoMessage() {}
+
+func (m *TaskQueueFetchTaskResponse) GetTask() *TaskQueueQueryTasksResponse {
+ if m != nil {
+ return m.Task
+ }
+ return nil
+}
+
+type TaskQueueUpdateStorageLimitRequest struct {
+ AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ Limit *int64 `protobuf:"varint,2,req,name=limit" json:"limit,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueUpdateStorageLimitRequest) Reset() { *m = TaskQueueUpdateStorageLimitRequest{} }
+func (m *TaskQueueUpdateStorageLimitRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueUpdateStorageLimitRequest) ProtoMessage() {}
+
+func (m *TaskQueueUpdateStorageLimitRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateStorageLimitRequest) GetLimit() int64 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+type TaskQueueUpdateStorageLimitResponse struct {
+ NewLimit *int64 `protobuf:"varint,1,req,name=new_limit" json:"new_limit,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueUpdateStorageLimitResponse) Reset() { *m = TaskQueueUpdateStorageLimitResponse{} }
+func (m *TaskQueueUpdateStorageLimitResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueUpdateStorageLimitResponse) ProtoMessage() {}
+
+func (m *TaskQueueUpdateStorageLimitResponse) GetNewLimit() int64 {
+ if m != nil && m.NewLimit != nil {
+ return *m.NewLimit
+ }
+ return 0
+}
+
+type TaskQueueQueryAndOwnTasksRequest struct {
+ QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"`
+ LeaseSeconds *float64 `protobuf:"fixed64,2,req,name=lease_seconds" json:"lease_seconds,omitempty"`
+ MaxTasks *int64 `protobuf:"varint,3,req,name=max_tasks" json:"max_tasks,omitempty"`
+ GroupByTag *bool `protobuf:"varint,4,opt,name=group_by_tag,def=0" json:"group_by_tag,omitempty"`
+ Tag []byte `protobuf:"bytes,5,opt,name=tag" json:"tag,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) Reset() { *m = TaskQueueQueryAndOwnTasksRequest{} }
+func (m *TaskQueueQueryAndOwnTasksRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryAndOwnTasksRequest) ProtoMessage() {}
+
+const Default_TaskQueueQueryAndOwnTasksRequest_GroupByTag bool = false
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetLeaseSeconds() float64 {
+ if m != nil && m.LeaseSeconds != nil {
+ return *m.LeaseSeconds
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetMaxTasks() int64 {
+ if m != nil && m.MaxTasks != nil {
+ return *m.MaxTasks
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetGroupByTag() bool {
+ if m != nil && m.GroupByTag != nil {
+ return *m.GroupByTag
+ }
+ return Default_TaskQueueQueryAndOwnTasksRequest_GroupByTag
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetTag() []byte {
+ if m != nil {
+ return m.Tag
+ }
+ return nil
+}
+
+type TaskQueueQueryAndOwnTasksResponse struct {
+ Task []*TaskQueueQueryAndOwnTasksResponse_Task `protobuf:"group,1,rep,name=Task" json:"task,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse) Reset() { *m = TaskQueueQueryAndOwnTasksResponse{} }
+func (m *TaskQueueQueryAndOwnTasksResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryAndOwnTasksResponse) ProtoMessage() {}
+
+func (m *TaskQueueQueryAndOwnTasksResponse) GetTask() []*TaskQueueQueryAndOwnTasksResponse_Task {
+ if m != nil {
+ return m.Task
+ }
+ return nil
+}
+
+type TaskQueueQueryAndOwnTasksResponse_Task struct {
+ TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"`
+ EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"`
+ RetryCount *int32 `protobuf:"varint,4,opt,name=retry_count,def=0" json:"retry_count,omitempty"`
+ Body []byte `protobuf:"bytes,5,opt,name=body" json:"body,omitempty"`
+ Tag []byte `protobuf:"bytes,6,opt,name=tag" json:"tag,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) Reset() {
+ *m = TaskQueueQueryAndOwnTasksResponse_Task{}
+}
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryAndOwnTasksResponse_Task) ProtoMessage() {}
+
+const Default_TaskQueueQueryAndOwnTasksResponse_Task_RetryCount int32 = 0
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetEtaUsec() int64 {
+ if m != nil && m.EtaUsec != nil {
+ return *m.EtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetRetryCount() int32 {
+ if m != nil && m.RetryCount != nil {
+ return *m.RetryCount
+ }
+ return Default_TaskQueueQueryAndOwnTasksResponse_Task_RetryCount
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetBody() []byte {
+ if m != nil {
+ return m.Body
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetTag() []byte {
+ if m != nil {
+ return m.Tag
+ }
+ return nil
+}
+
+type TaskQueueModifyTaskLeaseRequest struct {
+ QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"`
+ EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"`
+ LeaseSeconds *float64 `protobuf:"fixed64,4,req,name=lease_seconds" json:"lease_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueModifyTaskLeaseRequest) Reset() { *m = TaskQueueModifyTaskLeaseRequest{} }
+func (m *TaskQueueModifyTaskLeaseRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueModifyTaskLeaseRequest) ProtoMessage() {}
+
+func (m *TaskQueueModifyTaskLeaseRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueModifyTaskLeaseRequest) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueModifyTaskLeaseRequest) GetEtaUsec() int64 {
+ if m != nil && m.EtaUsec != nil {
+ return *m.EtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueModifyTaskLeaseRequest) GetLeaseSeconds() float64 {
+ if m != nil && m.LeaseSeconds != nil {
+ return *m.LeaseSeconds
+ }
+ return 0
+}
+
+type TaskQueueModifyTaskLeaseResponse struct {
+ UpdatedEtaUsec *int64 `protobuf:"varint,1,req,name=updated_eta_usec" json:"updated_eta_usec,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueModifyTaskLeaseResponse) Reset() { *m = TaskQueueModifyTaskLeaseResponse{} }
+func (m *TaskQueueModifyTaskLeaseResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueModifyTaskLeaseResponse) ProtoMessage() {}
+
+func (m *TaskQueueModifyTaskLeaseResponse) GetUpdatedEtaUsec() int64 {
+ if m != nil && m.UpdatedEtaUsec != nil {
+ return *m.UpdatedEtaUsec
+ }
+ return 0
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto
new file mode 100644
index 000000000..419aaf570
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto
@@ -0,0 +1,342 @@
+syntax = "proto2";
+option go_package = "taskqueue";
+
+import "google.golang.org/appengine/internal/datastore/datastore_v3.proto";
+
+package appengine;
+
+message TaskQueueServiceError {
+ enum ErrorCode {
+ OK = 0;
+ UNKNOWN_QUEUE = 1;
+ TRANSIENT_ERROR = 2;
+ INTERNAL_ERROR = 3;
+ TASK_TOO_LARGE = 4;
+ INVALID_TASK_NAME = 5;
+ INVALID_QUEUE_NAME = 6;
+ INVALID_URL = 7;
+ INVALID_QUEUE_RATE = 8;
+ PERMISSION_DENIED = 9;
+ TASK_ALREADY_EXISTS = 10;
+ TOMBSTONED_TASK = 11;
+ INVALID_ETA = 12;
+ INVALID_REQUEST = 13;
+ UNKNOWN_TASK = 14;
+ TOMBSTONED_QUEUE = 15;
+ DUPLICATE_TASK_NAME = 16;
+ SKIPPED = 17;
+ TOO_MANY_TASKS = 18;
+ INVALID_PAYLOAD = 19;
+ INVALID_RETRY_PARAMETERS = 20;
+ INVALID_QUEUE_MODE = 21;
+ ACL_LOOKUP_ERROR = 22;
+ TRANSACTIONAL_REQUEST_TOO_LARGE = 23;
+ INCORRECT_CREATOR_NAME = 24;
+ TASK_LEASE_EXPIRED = 25;
+ QUEUE_PAUSED = 26;
+ INVALID_TAG = 27;
+
+ // Reserved range for the Datastore error codes.
+ // Original Datastore error code is shifted by DATASTORE_ERROR offset.
+ DATASTORE_ERROR = 10000;
+ }
+}
+
+message TaskPayload {
+ extensions 10 to max;
+ option message_set_wire_format = true;
+}
+
+message TaskQueueRetryParameters {
+ optional int32 retry_limit = 1;
+ optional int64 age_limit_sec = 2;
+
+ optional double min_backoff_sec = 3 [default = 0.1];
+ optional double max_backoff_sec = 4 [default = 3600];
+ optional int32 max_doublings = 5 [default = 16];
+}
+
+message TaskQueueAcl {
+ repeated bytes user_email = 1;
+ repeated bytes writer_email = 2;
+}
+
+message TaskQueueHttpHeader {
+ required bytes key = 1;
+ required bytes value = 2;
+}
+
+message TaskQueueMode {
+ enum Mode {
+ PUSH = 0;
+ PULL = 1;
+ }
+}
+
+message TaskQueueAddRequest {
+ required bytes queue_name = 1;
+ required bytes task_name = 2;
+ required int64 eta_usec = 3;
+
+ enum RequestMethod {
+ GET = 1;
+ POST = 2;
+ HEAD = 3;
+ PUT = 4;
+ DELETE = 5;
+ }
+ optional RequestMethod method = 5 [default=POST];
+
+ optional bytes url = 4;
+
+ repeated group Header = 6 {
+ required bytes key = 7;
+ required bytes value = 8;
+ }
+
+ optional bytes body = 9 [ctype=CORD];
+ optional Transaction transaction = 10;
+ optional bytes app_id = 11;
+
+ optional group CronTimetable = 12 {
+ required bytes schedule = 13;
+ required bytes timezone = 14;
+ }
+
+ optional bytes description = 15;
+ optional TaskPayload payload = 16;
+ optional TaskQueueRetryParameters retry_parameters = 17;
+ optional TaskQueueMode.Mode mode = 18 [default=PUSH];
+ optional bytes tag = 19;
+}
+
+message TaskQueueAddResponse {
+ optional bytes chosen_task_name = 1;
+}
+
+message TaskQueueBulkAddRequest {
+ repeated TaskQueueAddRequest add_request = 1;
+}
+
+message TaskQueueBulkAddResponse {
+ repeated group TaskResult = 1 {
+ required TaskQueueServiceError.ErrorCode result = 2;
+ optional bytes chosen_task_name = 3;
+ }
+}
+
+message TaskQueueDeleteRequest {
+ required bytes queue_name = 1;
+ repeated bytes task_name = 2;
+ optional bytes app_id = 3;
+}
+
+message TaskQueueDeleteResponse {
+ repeated TaskQueueServiceError.ErrorCode result = 3;
+}
+
+message TaskQueueForceRunRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+ required bytes task_name = 3;
+}
+
+message TaskQueueForceRunResponse {
+ required TaskQueueServiceError.ErrorCode result = 3;
+}
+
+message TaskQueueUpdateQueueRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+ required double bucket_refill_per_second = 3;
+ required int32 bucket_capacity = 4;
+ optional string user_specified_rate = 5;
+ optional TaskQueueRetryParameters retry_parameters = 6;
+ optional int32 max_concurrent_requests = 7;
+ optional TaskQueueMode.Mode mode = 8 [default = PUSH];
+ optional TaskQueueAcl acl = 9;
+ repeated TaskQueueHttpHeader header_override = 10;
+}
+
+message TaskQueueUpdateQueueResponse {
+}
+
+message TaskQueueFetchQueuesRequest {
+ optional bytes app_id = 1;
+ required int32 max_rows = 2;
+}
+
+message TaskQueueFetchQueuesResponse {
+ repeated group Queue = 1 {
+ required bytes queue_name = 2;
+ required double bucket_refill_per_second = 3;
+ required double bucket_capacity = 4;
+ optional string user_specified_rate = 5;
+ required bool paused = 6 [default=false];
+ optional TaskQueueRetryParameters retry_parameters = 7;
+ optional int32 max_concurrent_requests = 8;
+ optional TaskQueueMode.Mode mode = 9 [default = PUSH];
+ optional TaskQueueAcl acl = 10;
+ repeated TaskQueueHttpHeader header_override = 11;
+ optional string creator_name = 12 [ctype=CORD, default="apphosting"];
+ }
+}
+
+message TaskQueueFetchQueueStatsRequest {
+ optional bytes app_id = 1;
+ repeated bytes queue_name = 2;
+ optional int32 max_num_tasks = 3 [default = 0];
+}
+
+message TaskQueueScannerQueueInfo {
+ required int64 executed_last_minute = 1;
+ required int64 executed_last_hour = 2;
+ required double sampling_duration_seconds = 3;
+ optional int32 requests_in_flight = 4;
+ optional double enforced_rate = 5;
+}
+
+message TaskQueueFetchQueueStatsResponse {
+ repeated group QueueStats = 1 {
+ required int32 num_tasks = 2;
+ required int64 oldest_eta_usec = 3;
+ optional TaskQueueScannerQueueInfo scanner_info = 4;
+ }
+}
+message TaskQueuePauseQueueRequest {
+ required bytes app_id = 1;
+ required bytes queue_name = 2;
+ required bool pause = 3;
+}
+
+message TaskQueuePauseQueueResponse {
+}
+
+message TaskQueuePurgeQueueRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+}
+
+message TaskQueuePurgeQueueResponse {
+}
+
+message TaskQueueDeleteQueueRequest {
+ required bytes app_id = 1;
+ required bytes queue_name = 2;
+}
+
+message TaskQueueDeleteQueueResponse {
+}
+
+message TaskQueueDeleteGroupRequest {
+ required bytes app_id = 1;
+}
+
+message TaskQueueDeleteGroupResponse {
+}
+
+message TaskQueueQueryTasksRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+
+ optional bytes start_task_name = 3;
+ optional int64 start_eta_usec = 4;
+ optional bytes start_tag = 6;
+ optional int32 max_rows = 5 [default = 1];
+}
+
+message TaskQueueQueryTasksResponse {
+ repeated group Task = 1 {
+ required bytes task_name = 2;
+ required int64 eta_usec = 3;
+ optional bytes url = 4;
+
+ enum RequestMethod {
+ GET = 1;
+ POST = 2;
+ HEAD = 3;
+ PUT = 4;
+ DELETE = 5;
+ }
+ optional RequestMethod method = 5;
+
+ optional int32 retry_count = 6 [default=0];
+
+ repeated group Header = 7 {
+ required bytes key = 8;
+ required bytes value = 9;
+ }
+
+ optional int32 body_size = 10;
+ optional bytes body = 11 [ctype=CORD];
+ required int64 creation_time_usec = 12;
+
+ optional group CronTimetable = 13 {
+ required bytes schedule = 14;
+ required bytes timezone = 15;
+ }
+
+ optional group RunLog = 16 {
+ required int64 dispatched_usec = 17;
+ required int64 lag_usec = 18;
+ required int64 elapsed_usec = 19;
+ optional int64 response_code = 20;
+ optional string retry_reason = 27;
+ }
+
+ optional bytes description = 21;
+ optional TaskPayload payload = 22;
+ optional TaskQueueRetryParameters retry_parameters = 23;
+ optional int64 first_try_usec = 24;
+ optional bytes tag = 25;
+ optional int32 execution_count = 26 [default=0];
+ }
+}
+
+message TaskQueueFetchTaskRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+ required bytes task_name = 3;
+}
+
+message TaskQueueFetchTaskResponse {
+ required TaskQueueQueryTasksResponse task = 1;
+}
+
+message TaskQueueUpdateStorageLimitRequest {
+ required bytes app_id = 1;
+ required int64 limit = 2;
+}
+
+message TaskQueueUpdateStorageLimitResponse {
+ required int64 new_limit = 1;
+}
+
+message TaskQueueQueryAndOwnTasksRequest {
+ required bytes queue_name = 1;
+ required double lease_seconds = 2;
+ required int64 max_tasks = 3;
+ optional bool group_by_tag = 4 [default=false];
+ optional bytes tag = 5;
+}
+
+message TaskQueueQueryAndOwnTasksResponse {
+ repeated group Task = 1 {
+ required bytes task_name = 2;
+ required int64 eta_usec = 3;
+ optional int32 retry_count = 4 [default=0];
+ optional bytes body = 5 [ctype=CORD];
+ optional bytes tag = 6;
+ }
+}
+
+message TaskQueueModifyTaskLeaseRequest {
+ required bytes queue_name = 1;
+ required bytes task_name = 2;
+ required int64 eta_usec = 3;
+ required double lease_seconds = 4;
+}
+
+message TaskQueueModifyTaskLeaseResponse {
+ required int64 updated_eta_usec = 1;
+}
diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go
new file mode 100644
index 000000000..28a6d1812
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/transaction.go
@@ -0,0 +1,107 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file implements hooks for applying datastore transactions.
+
+import (
+ "errors"
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+
+ basepb "google.golang.org/appengine/internal/base"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+var transactionSetters = make(map[reflect.Type]reflect.Value)
+
+// RegisterTransactionSetter registers a function that sets transaction information
+// in a protocol buffer message. f should be a function with two arguments,
+// the first being a protocol buffer type, and the second being *datastore.Transaction.
+func RegisterTransactionSetter(f interface{}) {
+ v := reflect.ValueOf(f)
+ transactionSetters[v.Type().In(0)] = v
+}
+
+// applyTransaction applies the transaction t to message pb
+// by using the relevant setter passed to RegisterTransactionSetter.
+func applyTransaction(pb proto.Message, t *pb.Transaction) {
+ v := reflect.ValueOf(pb)
+ if f, ok := transactionSetters[v.Type()]; ok {
+ f.Call([]reflect.Value{v, reflect.ValueOf(t)})
+ }
+}
+
+var transactionKey = "used for *Transaction"
+
+func transactionFromContext(ctx netcontext.Context) *transaction {
+ t, _ := ctx.Value(&transactionKey).(*transaction)
+ return t
+}
+
+func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context {
+ return netcontext.WithValue(ctx, &transactionKey, t)
+}
+
+type transaction struct {
+ transaction pb.Transaction
+ finished bool
+}
+
+var ErrConcurrentTransaction = errors.New("internal: concurrent transaction")
+
+func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool) error {
+ if transactionFromContext(c) != nil {
+ return errors.New("nested transactions are not supported")
+ }
+
+ // Begin the transaction.
+ t := &transaction{}
+ req := &pb.BeginTransactionRequest{
+ App: proto.String(FullyQualifiedAppID(c)),
+ }
+ if xg {
+ req.AllowMultipleEg = proto.Bool(true)
+ }
+ if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil {
+ return err
+ }
+
+ // Call f, rolling back the transaction if f returns a non-nil error, or panics.
+ // The panic is not recovered.
+ defer func() {
+ if t.finished {
+ return
+ }
+ t.finished = true
+ // Ignore the error return value, since we are already returning a non-nil
+ // error (or we're panicking).
+ Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{})
+ }()
+ if err := f(withTransaction(c, t)); err != nil {
+ return err
+ }
+ t.finished = true
+
+ // Commit the transaction.
+ res := &pb.CommitResponse{}
+ err := Call(c, "datastore_v3", "Commit", &t.transaction, res)
+ if ae, ok := err.(*APIError); ok {
+ /* TODO: restore this conditional
+ if appengine.IsDevAppServer() {
+ */
+ // The Python Dev AppServer raises an ApplicationError with error code 2 (which is
+ // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.".
+ if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." {
+ return ErrConcurrentTransaction
+ }
+ if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {
+ return ErrConcurrentTransaction
+ }
+ }
+ return err
+}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
new file mode 100644
index 000000000..af463fbb2
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
@@ -0,0 +1,355 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
+// DO NOT EDIT!
+
+/*
+Package urlfetch is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
+
+It has these top-level messages:
+ URLFetchServiceError
+ URLFetchRequest
+ URLFetchResponse
+*/
+package urlfetch
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type URLFetchServiceError_ErrorCode int32
+
+const (
+ URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0
+ URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1
+ URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2
+ URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3
+ URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4
+ URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5
+ URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6
+ URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7
+ URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8
+ URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9
+ URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10
+ URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11
+ URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12
+)
+
+var URLFetchServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_URL",
+ 2: "FETCH_ERROR",
+ 3: "UNSPECIFIED_ERROR",
+ 4: "RESPONSE_TOO_LARGE",
+ 5: "DEADLINE_EXCEEDED",
+ 6: "SSL_CERTIFICATE_ERROR",
+ 7: "DNS_ERROR",
+ 8: "CLOSED",
+ 9: "INTERNAL_TRANSIENT_ERROR",
+ 10: "TOO_MANY_REDIRECTS",
+ 11: "MALFORMED_REPLY",
+ 12: "CONNECTION_ERROR",
+}
+var URLFetchServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_URL": 1,
+ "FETCH_ERROR": 2,
+ "UNSPECIFIED_ERROR": 3,
+ "RESPONSE_TOO_LARGE": 4,
+ "DEADLINE_EXCEEDED": 5,
+ "SSL_CERTIFICATE_ERROR": 6,
+ "DNS_ERROR": 7,
+ "CLOSED": 8,
+ "INTERNAL_TRANSIENT_ERROR": 9,
+ "TOO_MANY_REDIRECTS": 10,
+ "MALFORMED_REPLY": 11,
+ "CONNECTION_ERROR": 12,
+}
+
+func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode {
+ p := new(URLFetchServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x URLFetchServiceError_ErrorCode) String() string {
+ return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x))
+}
+func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = URLFetchServiceError_ErrorCode(value)
+ return nil
+}
+
+type URLFetchRequest_RequestMethod int32
+
+const (
+ URLFetchRequest_GET URLFetchRequest_RequestMethod = 1
+ URLFetchRequest_POST URLFetchRequest_RequestMethod = 2
+ URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3
+ URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4
+ URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5
+ URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6
+)
+
+var URLFetchRequest_RequestMethod_name = map[int32]string{
+ 1: "GET",
+ 2: "POST",
+ 3: "HEAD",
+ 4: "PUT",
+ 5: "DELETE",
+ 6: "PATCH",
+}
+var URLFetchRequest_RequestMethod_value = map[string]int32{
+ "GET": 1,
+ "POST": 2,
+ "HEAD": 3,
+ "PUT": 4,
+ "DELETE": 5,
+ "PATCH": 6,
+}
+
+func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod {
+ p := new(URLFetchRequest_RequestMethod)
+ *p = x
+ return p
+}
+func (x URLFetchRequest_RequestMethod) String() string {
+ return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x))
+}
+func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod")
+ if err != nil {
+ return err
+ }
+ *x = URLFetchRequest_RequestMethod(value)
+ return nil
+}
+
+type URLFetchServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} }
+func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) }
+func (*URLFetchServiceError) ProtoMessage() {}
+
+type URLFetchRequest struct {
+ Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"`
+ Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"`
+ Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"`
+ Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"`
+ FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"`
+ Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"`
+ MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} }
+func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) }
+func (*URLFetchRequest) ProtoMessage() {}
+
+const Default_URLFetchRequest_FollowRedirects bool = true
+const Default_URLFetchRequest_MustValidateServerCertificate bool = true
+
+func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return URLFetchRequest_GET
+}
+
+func (m *URLFetchRequest) GetUrl() string {
+ if m != nil && m.Url != nil {
+ return *m.Url
+ }
+ return ""
+}
+
+func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *URLFetchRequest) GetPayload() []byte {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (m *URLFetchRequest) GetFollowRedirects() bool {
+ if m != nil && m.FollowRedirects != nil {
+ return *m.FollowRedirects
+ }
+ return Default_URLFetchRequest_FollowRedirects
+}
+
+func (m *URLFetchRequest) GetDeadline() float64 {
+ if m != nil && m.Deadline != nil {
+ return *m.Deadline
+ }
+ return 0
+}
+
+func (m *URLFetchRequest) GetMustValidateServerCertificate() bool {
+ if m != nil && m.MustValidateServerCertificate != nil {
+ return *m.MustValidateServerCertificate
+ }
+ return Default_URLFetchRequest_MustValidateServerCertificate
+}
+
+type URLFetchRequest_Header struct {
+ Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
+ Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} }
+func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) }
+func (*URLFetchRequest_Header) ProtoMessage() {}
+
+func (m *URLFetchRequest_Header) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *URLFetchRequest_Header) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type URLFetchResponse struct {
+ Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"`
+ StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"`
+ Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"`
+ ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"`
+ ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"`
+ ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"`
+ FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"`
+ ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"`
+ ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"`
+ ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} }
+func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) }
+func (*URLFetchResponse) ProtoMessage() {}
+
+const Default_URLFetchResponse_ContentWasTruncated bool = false
+const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0
+const Default_URLFetchResponse_ApiBytesSent int64 = 0
+const Default_URLFetchResponse_ApiBytesReceived int64 = 0
+
+func (m *URLFetchResponse) GetContent() []byte {
+ if m != nil {
+ return m.Content
+ }
+ return nil
+}
+
+func (m *URLFetchResponse) GetStatusCode() int32 {
+ if m != nil && m.StatusCode != nil {
+ return *m.StatusCode
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *URLFetchResponse) GetContentWasTruncated() bool {
+ if m != nil && m.ContentWasTruncated != nil {
+ return *m.ContentWasTruncated
+ }
+ return Default_URLFetchResponse_ContentWasTruncated
+}
+
+func (m *URLFetchResponse) GetExternalBytesSent() int64 {
+ if m != nil && m.ExternalBytesSent != nil {
+ return *m.ExternalBytesSent
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetExternalBytesReceived() int64 {
+ if m != nil && m.ExternalBytesReceived != nil {
+ return *m.ExternalBytesReceived
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetFinalUrl() string {
+ if m != nil && m.FinalUrl != nil {
+ return *m.FinalUrl
+ }
+ return ""
+}
+
+func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 {
+ if m != nil && m.ApiCpuMilliseconds != nil {
+ return *m.ApiCpuMilliseconds
+ }
+ return Default_URLFetchResponse_ApiCpuMilliseconds
+}
+
+func (m *URLFetchResponse) GetApiBytesSent() int64 {
+ if m != nil && m.ApiBytesSent != nil {
+ return *m.ApiBytesSent
+ }
+ return Default_URLFetchResponse_ApiBytesSent
+}
+
+func (m *URLFetchResponse) GetApiBytesReceived() int64 {
+ if m != nil && m.ApiBytesReceived != nil {
+ return *m.ApiBytesReceived
+ }
+ return Default_URLFetchResponse_ApiBytesReceived
+}
+
+type URLFetchResponse_Header struct {
+ Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
+ Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} }
+func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) }
+func (*URLFetchResponse_Header) ProtoMessage() {}
+
+func (m *URLFetchResponse_Header) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *URLFetchResponse_Header) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
new file mode 100644
index 000000000..f695edf6a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
@@ -0,0 +1,64 @@
+syntax = "proto2";
+option go_package = "urlfetch";
+
+package appengine;
+
+message URLFetchServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_URL = 1;
+ FETCH_ERROR = 2;
+ UNSPECIFIED_ERROR = 3;
+ RESPONSE_TOO_LARGE = 4;
+ DEADLINE_EXCEEDED = 5;
+ SSL_CERTIFICATE_ERROR = 6;
+ DNS_ERROR = 7;
+ CLOSED = 8;
+ INTERNAL_TRANSIENT_ERROR = 9;
+ TOO_MANY_REDIRECTS = 10;
+ MALFORMED_REPLY = 11;
+ CONNECTION_ERROR = 12;
+ }
+}
+
+message URLFetchRequest {
+ enum RequestMethod {
+ GET = 1;
+ POST = 2;
+ HEAD = 3;
+ PUT = 4;
+ DELETE = 5;
+ PATCH = 6;
+ }
+ required RequestMethod Method = 1;
+ required string Url = 2;
+ repeated group Header = 3 {
+ required string Key = 4;
+ required string Value = 5;
+ }
+ optional bytes Payload = 6 [ctype=CORD];
+
+ optional bool FollowRedirects = 7 [default=true];
+
+ optional double Deadline = 8;
+
+ optional bool MustValidateServerCertificate = 9 [default=true];
+}
+
+message URLFetchResponse {
+ optional bytes Content = 1;
+ required int32 StatusCode = 2;
+ repeated group Header = 3 {
+ required string Key = 4;
+ required string Value = 5;
+ }
+ optional bool ContentWasTruncated = 6 [default=false];
+ optional int64 ExternalBytesSent = 7;
+ optional int64 ExternalBytesReceived = 8;
+
+ optional string FinalUrl = 9;
+
+ optional int64 ApiCpuMilliseconds = 10 [default=0];
+ optional int64 ApiBytesSent = 11 [default=0];
+ optional int64 ApiBytesReceived = 12 [default=0];
+}
diff --git a/vendor/google.golang.org/appengine/internal/user/user_service.pb.go b/vendor/google.golang.org/appengine/internal/user/user_service.pb.go
new file mode 100644
index 000000000..6b52ffcce
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/user/user_service.pb.go
@@ -0,0 +1,289 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/user/user_service.proto
+// DO NOT EDIT!
+
+/*
+Package user is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/user/user_service.proto
+
+It has these top-level messages:
+ UserServiceError
+ CreateLoginURLRequest
+ CreateLoginURLResponse
+ CreateLogoutURLRequest
+ CreateLogoutURLResponse
+ GetOAuthUserRequest
+ GetOAuthUserResponse
+ CheckOAuthSignatureRequest
+ CheckOAuthSignatureResponse
+*/
+package user
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type UserServiceError_ErrorCode int32
+
+const (
+ UserServiceError_OK UserServiceError_ErrorCode = 0
+ UserServiceError_REDIRECT_URL_TOO_LONG UserServiceError_ErrorCode = 1
+ UserServiceError_NOT_ALLOWED UserServiceError_ErrorCode = 2
+ UserServiceError_OAUTH_INVALID_TOKEN UserServiceError_ErrorCode = 3
+ UserServiceError_OAUTH_INVALID_REQUEST UserServiceError_ErrorCode = 4
+ UserServiceError_OAUTH_ERROR UserServiceError_ErrorCode = 5
+)
+
+var UserServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "REDIRECT_URL_TOO_LONG",
+ 2: "NOT_ALLOWED",
+ 3: "OAUTH_INVALID_TOKEN",
+ 4: "OAUTH_INVALID_REQUEST",
+ 5: "OAUTH_ERROR",
+}
+var UserServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "REDIRECT_URL_TOO_LONG": 1,
+ "NOT_ALLOWED": 2,
+ "OAUTH_INVALID_TOKEN": 3,
+ "OAUTH_INVALID_REQUEST": 4,
+ "OAUTH_ERROR": 5,
+}
+
+func (x UserServiceError_ErrorCode) Enum() *UserServiceError_ErrorCode {
+ p := new(UserServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x UserServiceError_ErrorCode) String() string {
+ return proto.EnumName(UserServiceError_ErrorCode_name, int32(x))
+}
+func (x *UserServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(UserServiceError_ErrorCode_value, data, "UserServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = UserServiceError_ErrorCode(value)
+ return nil
+}
+
+type UserServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UserServiceError) Reset() { *m = UserServiceError{} }
+func (m *UserServiceError) String() string { return proto.CompactTextString(m) }
+func (*UserServiceError) ProtoMessage() {}
+
+type CreateLoginURLRequest struct {
+ DestinationUrl *string `protobuf:"bytes,1,req,name=destination_url" json:"destination_url,omitempty"`
+ AuthDomain *string `protobuf:"bytes,2,opt,name=auth_domain" json:"auth_domain,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,3,opt,name=federated_identity,def=" json:"federated_identity,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateLoginURLRequest) Reset() { *m = CreateLoginURLRequest{} }
+func (m *CreateLoginURLRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateLoginURLRequest) ProtoMessage() {}
+
+func (m *CreateLoginURLRequest) GetDestinationUrl() string {
+ if m != nil && m.DestinationUrl != nil {
+ return *m.DestinationUrl
+ }
+ return ""
+}
+
+func (m *CreateLoginURLRequest) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *CreateLoginURLRequest) GetFederatedIdentity() string {
+ if m != nil && m.FederatedIdentity != nil {
+ return *m.FederatedIdentity
+ }
+ return ""
+}
+
+type CreateLoginURLResponse struct {
+ LoginUrl *string `protobuf:"bytes,1,req,name=login_url" json:"login_url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateLoginURLResponse) Reset() { *m = CreateLoginURLResponse{} }
+func (m *CreateLoginURLResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateLoginURLResponse) ProtoMessage() {}
+
+func (m *CreateLoginURLResponse) GetLoginUrl() string {
+ if m != nil && m.LoginUrl != nil {
+ return *m.LoginUrl
+ }
+ return ""
+}
+
+type CreateLogoutURLRequest struct {
+ DestinationUrl *string `protobuf:"bytes,1,req,name=destination_url" json:"destination_url,omitempty"`
+ AuthDomain *string `protobuf:"bytes,2,opt,name=auth_domain" json:"auth_domain,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateLogoutURLRequest) Reset() { *m = CreateLogoutURLRequest{} }
+func (m *CreateLogoutURLRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateLogoutURLRequest) ProtoMessage() {}
+
+func (m *CreateLogoutURLRequest) GetDestinationUrl() string {
+ if m != nil && m.DestinationUrl != nil {
+ return *m.DestinationUrl
+ }
+ return ""
+}
+
+func (m *CreateLogoutURLRequest) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+type CreateLogoutURLResponse struct {
+ LogoutUrl *string `protobuf:"bytes,1,req,name=logout_url" json:"logout_url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateLogoutURLResponse) Reset() { *m = CreateLogoutURLResponse{} }
+func (m *CreateLogoutURLResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateLogoutURLResponse) ProtoMessage() {}
+
+func (m *CreateLogoutURLResponse) GetLogoutUrl() string {
+ if m != nil && m.LogoutUrl != nil {
+ return *m.LogoutUrl
+ }
+ return ""
+}
+
+type GetOAuthUserRequest struct {
+ Scope *string `protobuf:"bytes,1,opt,name=scope" json:"scope,omitempty"`
+ Scopes []string `protobuf:"bytes,2,rep,name=scopes" json:"scopes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetOAuthUserRequest) Reset() { *m = GetOAuthUserRequest{} }
+func (m *GetOAuthUserRequest) String() string { return proto.CompactTextString(m) }
+func (*GetOAuthUserRequest) ProtoMessage() {}
+
+func (m *GetOAuthUserRequest) GetScope() string {
+ if m != nil && m.Scope != nil {
+ return *m.Scope
+ }
+ return ""
+}
+
+func (m *GetOAuthUserRequest) GetScopes() []string {
+ if m != nil {
+ return m.Scopes
+ }
+ return nil
+}
+
+type GetOAuthUserResponse struct {
+ Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
+ UserId *string `protobuf:"bytes,2,req,name=user_id" json:"user_id,omitempty"`
+ AuthDomain *string `protobuf:"bytes,3,req,name=auth_domain" json:"auth_domain,omitempty"`
+ UserOrganization *string `protobuf:"bytes,4,opt,name=user_organization,def=" json:"user_organization,omitempty"`
+ IsAdmin *bool `protobuf:"varint,5,opt,name=is_admin,def=0" json:"is_admin,omitempty"`
+ ClientId *string `protobuf:"bytes,6,opt,name=client_id,def=" json:"client_id,omitempty"`
+ Scopes []string `protobuf:"bytes,7,rep,name=scopes" json:"scopes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetOAuthUserResponse) Reset() { *m = GetOAuthUserResponse{} }
+func (m *GetOAuthUserResponse) String() string { return proto.CompactTextString(m) }
+func (*GetOAuthUserResponse) ProtoMessage() {}
+
+const Default_GetOAuthUserResponse_IsAdmin bool = false
+
+func (m *GetOAuthUserResponse) GetEmail() string {
+ if m != nil && m.Email != nil {
+ return *m.Email
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetUserId() string {
+ if m != nil && m.UserId != nil {
+ return *m.UserId
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetUserOrganization() string {
+ if m != nil && m.UserOrganization != nil {
+ return *m.UserOrganization
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetIsAdmin() bool {
+ if m != nil && m.IsAdmin != nil {
+ return *m.IsAdmin
+ }
+ return Default_GetOAuthUserResponse_IsAdmin
+}
+
+func (m *GetOAuthUserResponse) GetClientId() string {
+ if m != nil && m.ClientId != nil {
+ return *m.ClientId
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetScopes() []string {
+ if m != nil {
+ return m.Scopes
+ }
+ return nil
+}
+
+type CheckOAuthSignatureRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CheckOAuthSignatureRequest) Reset() { *m = CheckOAuthSignatureRequest{} }
+func (m *CheckOAuthSignatureRequest) String() string { return proto.CompactTextString(m) }
+func (*CheckOAuthSignatureRequest) ProtoMessage() {}
+
+type CheckOAuthSignatureResponse struct {
+ OauthConsumerKey *string `protobuf:"bytes,1,req,name=oauth_consumer_key" json:"oauth_consumer_key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CheckOAuthSignatureResponse) Reset() { *m = CheckOAuthSignatureResponse{} }
+func (m *CheckOAuthSignatureResponse) String() string { return proto.CompactTextString(m) }
+func (*CheckOAuthSignatureResponse) ProtoMessage() {}
+
+func (m *CheckOAuthSignatureResponse) GetOauthConsumerKey() string {
+ if m != nil && m.OauthConsumerKey != nil {
+ return *m.OauthConsumerKey
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/user/user_service.proto b/vendor/google.golang.org/appengine/internal/user/user_service.proto
new file mode 100644
index 000000000..f3e969346
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/user/user_service.proto
@@ -0,0 +1,58 @@
+syntax = "proto2";
+option go_package = "user";
+
+package appengine;
+
+message UserServiceError {
+ enum ErrorCode {
+ OK = 0;
+ REDIRECT_URL_TOO_LONG = 1;
+ NOT_ALLOWED = 2;
+ OAUTH_INVALID_TOKEN = 3;
+ OAUTH_INVALID_REQUEST = 4;
+ OAUTH_ERROR = 5;
+ }
+}
+
+message CreateLoginURLRequest {
+ required string destination_url = 1;
+ optional string auth_domain = 2;
+ optional string federated_identity = 3 [default = ""];
+}
+
+message CreateLoginURLResponse {
+ required string login_url = 1;
+}
+
+message CreateLogoutURLRequest {
+ required string destination_url = 1;
+ optional string auth_domain = 2;
+}
+
+message CreateLogoutURLResponse {
+ required string logout_url = 1;
+}
+
+message GetOAuthUserRequest {
+ optional string scope = 1;
+
+ repeated string scopes = 2;
+}
+
+message GetOAuthUserResponse {
+ required string email = 1;
+ required string user_id = 2;
+ required string auth_domain = 3;
+ optional string user_organization = 4 [default = ""];
+ optional bool is_admin = 5 [default = false];
+ optional string client_id = 6 [default = ""];
+
+ repeated string scopes = 7;
+}
+
+message CheckOAuthSignatureRequest {
+}
+
+message CheckOAuthSignatureResponse {
+ required string oauth_consumer_key = 1;
+}
diff --git a/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go
new file mode 100644
index 000000000..6d5b0ae65
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go
@@ -0,0 +1,427 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/xmpp/xmpp_service.proto
+// DO NOT EDIT!
+
+/*
+Package xmpp is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/xmpp/xmpp_service.proto
+
+It has these top-level messages:
+ XmppServiceError
+ PresenceRequest
+ PresenceResponse
+ BulkPresenceRequest
+ BulkPresenceResponse
+ XmppMessageRequest
+ XmppMessageResponse
+ XmppSendPresenceRequest
+ XmppSendPresenceResponse
+ XmppInviteRequest
+ XmppInviteResponse
+*/
+package xmpp
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type XmppServiceError_ErrorCode int32
+
+const (
+ XmppServiceError_UNSPECIFIED_ERROR XmppServiceError_ErrorCode = 1
+ XmppServiceError_INVALID_JID XmppServiceError_ErrorCode = 2
+ XmppServiceError_NO_BODY XmppServiceError_ErrorCode = 3
+ XmppServiceError_INVALID_XML XmppServiceError_ErrorCode = 4
+ XmppServiceError_INVALID_TYPE XmppServiceError_ErrorCode = 5
+ XmppServiceError_INVALID_SHOW XmppServiceError_ErrorCode = 6
+ XmppServiceError_EXCEEDED_MAX_SIZE XmppServiceError_ErrorCode = 7
+ XmppServiceError_APPID_ALIAS_REQUIRED XmppServiceError_ErrorCode = 8
+ XmppServiceError_NONDEFAULT_MODULE XmppServiceError_ErrorCode = 9
+)
+
+var XmppServiceError_ErrorCode_name = map[int32]string{
+ 1: "UNSPECIFIED_ERROR",
+ 2: "INVALID_JID",
+ 3: "NO_BODY",
+ 4: "INVALID_XML",
+ 5: "INVALID_TYPE",
+ 6: "INVALID_SHOW",
+ 7: "EXCEEDED_MAX_SIZE",
+ 8: "APPID_ALIAS_REQUIRED",
+ 9: "NONDEFAULT_MODULE",
+}
+var XmppServiceError_ErrorCode_value = map[string]int32{
+ "UNSPECIFIED_ERROR": 1,
+ "INVALID_JID": 2,
+ "NO_BODY": 3,
+ "INVALID_XML": 4,
+ "INVALID_TYPE": 5,
+ "INVALID_SHOW": 6,
+ "EXCEEDED_MAX_SIZE": 7,
+ "APPID_ALIAS_REQUIRED": 8,
+ "NONDEFAULT_MODULE": 9,
+}
+
+func (x XmppServiceError_ErrorCode) Enum() *XmppServiceError_ErrorCode {
+ p := new(XmppServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x XmppServiceError_ErrorCode) String() string {
+ return proto.EnumName(XmppServiceError_ErrorCode_name, int32(x))
+}
+func (x *XmppServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(XmppServiceError_ErrorCode_value, data, "XmppServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = XmppServiceError_ErrorCode(value)
+ return nil
+}
+
+type PresenceResponse_SHOW int32
+
+const (
+ PresenceResponse_NORMAL PresenceResponse_SHOW = 0
+ PresenceResponse_AWAY PresenceResponse_SHOW = 1
+ PresenceResponse_DO_NOT_DISTURB PresenceResponse_SHOW = 2
+ PresenceResponse_CHAT PresenceResponse_SHOW = 3
+ PresenceResponse_EXTENDED_AWAY PresenceResponse_SHOW = 4
+)
+
+var PresenceResponse_SHOW_name = map[int32]string{
+ 0: "NORMAL",
+ 1: "AWAY",
+ 2: "DO_NOT_DISTURB",
+ 3: "CHAT",
+ 4: "EXTENDED_AWAY",
+}
+var PresenceResponse_SHOW_value = map[string]int32{
+ "NORMAL": 0,
+ "AWAY": 1,
+ "DO_NOT_DISTURB": 2,
+ "CHAT": 3,
+ "EXTENDED_AWAY": 4,
+}
+
+func (x PresenceResponse_SHOW) Enum() *PresenceResponse_SHOW {
+ p := new(PresenceResponse_SHOW)
+ *p = x
+ return p
+}
+func (x PresenceResponse_SHOW) String() string {
+ return proto.EnumName(PresenceResponse_SHOW_name, int32(x))
+}
+func (x *PresenceResponse_SHOW) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(PresenceResponse_SHOW_value, data, "PresenceResponse_SHOW")
+ if err != nil {
+ return err
+ }
+ *x = PresenceResponse_SHOW(value)
+ return nil
+}
+
+type XmppMessageResponse_XmppMessageStatus int32
+
+const (
+ XmppMessageResponse_NO_ERROR XmppMessageResponse_XmppMessageStatus = 0
+ XmppMessageResponse_INVALID_JID XmppMessageResponse_XmppMessageStatus = 1
+ XmppMessageResponse_OTHER_ERROR XmppMessageResponse_XmppMessageStatus = 2
+)
+
+var XmppMessageResponse_XmppMessageStatus_name = map[int32]string{
+ 0: "NO_ERROR",
+ 1: "INVALID_JID",
+ 2: "OTHER_ERROR",
+}
+var XmppMessageResponse_XmppMessageStatus_value = map[string]int32{
+ "NO_ERROR": 0,
+ "INVALID_JID": 1,
+ "OTHER_ERROR": 2,
+}
+
+func (x XmppMessageResponse_XmppMessageStatus) Enum() *XmppMessageResponse_XmppMessageStatus {
+ p := new(XmppMessageResponse_XmppMessageStatus)
+ *p = x
+ return p
+}
+func (x XmppMessageResponse_XmppMessageStatus) String() string {
+ return proto.EnumName(XmppMessageResponse_XmppMessageStatus_name, int32(x))
+}
+func (x *XmppMessageResponse_XmppMessageStatus) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(XmppMessageResponse_XmppMessageStatus_value, data, "XmppMessageResponse_XmppMessageStatus")
+ if err != nil {
+ return err
+ }
+ *x = XmppMessageResponse_XmppMessageStatus(value)
+ return nil
+}
+
+type XmppServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppServiceError) Reset() { *m = XmppServiceError{} }
+func (m *XmppServiceError) String() string { return proto.CompactTextString(m) }
+func (*XmppServiceError) ProtoMessage() {}
+
+type PresenceRequest struct {
+ Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"`
+ FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PresenceRequest) Reset() { *m = PresenceRequest{} }
+func (m *PresenceRequest) String() string { return proto.CompactTextString(m) }
+func (*PresenceRequest) ProtoMessage() {}
+
+func (m *PresenceRequest) GetJid() string {
+ if m != nil && m.Jid != nil {
+ return *m.Jid
+ }
+ return ""
+}
+
+func (m *PresenceRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type PresenceResponse struct {
+ IsAvailable *bool `protobuf:"varint,1,req,name=is_available" json:"is_available,omitempty"`
+ Presence *PresenceResponse_SHOW `protobuf:"varint,2,opt,name=presence,enum=appengine.PresenceResponse_SHOW" json:"presence,omitempty"`
+ Valid *bool `protobuf:"varint,3,opt,name=valid" json:"valid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PresenceResponse) Reset() { *m = PresenceResponse{} }
+func (m *PresenceResponse) String() string { return proto.CompactTextString(m) }
+func (*PresenceResponse) ProtoMessage() {}
+
+func (m *PresenceResponse) GetIsAvailable() bool {
+ if m != nil && m.IsAvailable != nil {
+ return *m.IsAvailable
+ }
+ return false
+}
+
+func (m *PresenceResponse) GetPresence() PresenceResponse_SHOW {
+ if m != nil && m.Presence != nil {
+ return *m.Presence
+ }
+ return PresenceResponse_NORMAL
+}
+
+func (m *PresenceResponse) GetValid() bool {
+ if m != nil && m.Valid != nil {
+ return *m.Valid
+ }
+ return false
+}
+
+type BulkPresenceRequest struct {
+ Jid []string `protobuf:"bytes,1,rep,name=jid" json:"jid,omitempty"`
+ FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BulkPresenceRequest) Reset() { *m = BulkPresenceRequest{} }
+func (m *BulkPresenceRequest) String() string { return proto.CompactTextString(m) }
+func (*BulkPresenceRequest) ProtoMessage() {}
+
+func (m *BulkPresenceRequest) GetJid() []string {
+ if m != nil {
+ return m.Jid
+ }
+ return nil
+}
+
+func (m *BulkPresenceRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type BulkPresenceResponse struct {
+ PresenceResponse []*PresenceResponse `protobuf:"bytes,1,rep,name=presence_response" json:"presence_response,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BulkPresenceResponse) Reset() { *m = BulkPresenceResponse{} }
+func (m *BulkPresenceResponse) String() string { return proto.CompactTextString(m) }
+func (*BulkPresenceResponse) ProtoMessage() {}
+
+func (m *BulkPresenceResponse) GetPresenceResponse() []*PresenceResponse {
+ if m != nil {
+ return m.PresenceResponse
+ }
+ return nil
+}
+
+type XmppMessageRequest struct {
+ Jid []string `protobuf:"bytes,1,rep,name=jid" json:"jid,omitempty"`
+ Body *string `protobuf:"bytes,2,req,name=body" json:"body,omitempty"`
+ RawXml *bool `protobuf:"varint,3,opt,name=raw_xml,def=0" json:"raw_xml,omitempty"`
+ Type *string `protobuf:"bytes,4,opt,name=type,def=chat" json:"type,omitempty"`
+ FromJid *string `protobuf:"bytes,5,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppMessageRequest) Reset() { *m = XmppMessageRequest{} }
+func (m *XmppMessageRequest) String() string { return proto.CompactTextString(m) }
+func (*XmppMessageRequest) ProtoMessage() {}
+
+const Default_XmppMessageRequest_RawXml bool = false
+const Default_XmppMessageRequest_Type string = "chat"
+
+func (m *XmppMessageRequest) GetJid() []string {
+ if m != nil {
+ return m.Jid
+ }
+ return nil
+}
+
+func (m *XmppMessageRequest) GetBody() string {
+ if m != nil && m.Body != nil {
+ return *m.Body
+ }
+ return ""
+}
+
+func (m *XmppMessageRequest) GetRawXml() bool {
+ if m != nil && m.RawXml != nil {
+ return *m.RawXml
+ }
+ return Default_XmppMessageRequest_RawXml
+}
+
+func (m *XmppMessageRequest) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_XmppMessageRequest_Type
+}
+
+func (m *XmppMessageRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type XmppMessageResponse struct {
+ Status []XmppMessageResponse_XmppMessageStatus `protobuf:"varint,1,rep,name=status,enum=appengine.XmppMessageResponse_XmppMessageStatus" json:"status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppMessageResponse) Reset() { *m = XmppMessageResponse{} }
+func (m *XmppMessageResponse) String() string { return proto.CompactTextString(m) }
+func (*XmppMessageResponse) ProtoMessage() {}
+
+func (m *XmppMessageResponse) GetStatus() []XmppMessageResponse_XmppMessageStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+type XmppSendPresenceRequest struct {
+ Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"`
+ Type *string `protobuf:"bytes,2,opt,name=type" json:"type,omitempty"`
+ Show *string `protobuf:"bytes,3,opt,name=show" json:"show,omitempty"`
+ Status *string `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"`
+ FromJid *string `protobuf:"bytes,5,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppSendPresenceRequest) Reset() { *m = XmppSendPresenceRequest{} }
+func (m *XmppSendPresenceRequest) String() string { return proto.CompactTextString(m) }
+func (*XmppSendPresenceRequest) ProtoMessage() {}
+
+func (m *XmppSendPresenceRequest) GetJid() string {
+ if m != nil && m.Jid != nil {
+ return *m.Jid
+ }
+ return ""
+}
+
+func (m *XmppSendPresenceRequest) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+func (m *XmppSendPresenceRequest) GetShow() string {
+ if m != nil && m.Show != nil {
+ return *m.Show
+ }
+ return ""
+}
+
+func (m *XmppSendPresenceRequest) GetStatus() string {
+ if m != nil && m.Status != nil {
+ return *m.Status
+ }
+ return ""
+}
+
+func (m *XmppSendPresenceRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type XmppSendPresenceResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppSendPresenceResponse) Reset() { *m = XmppSendPresenceResponse{} }
+func (m *XmppSendPresenceResponse) String() string { return proto.CompactTextString(m) }
+func (*XmppSendPresenceResponse) ProtoMessage() {}
+
+type XmppInviteRequest struct {
+ Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"`
+ FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppInviteRequest) Reset() { *m = XmppInviteRequest{} }
+func (m *XmppInviteRequest) String() string { return proto.CompactTextString(m) }
+func (*XmppInviteRequest) ProtoMessage() {}
+
+func (m *XmppInviteRequest) GetJid() string {
+ if m != nil && m.Jid != nil {
+ return *m.Jid
+ }
+ return ""
+}
+
+func (m *XmppInviteRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type XmppInviteResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppInviteResponse) Reset() { *m = XmppInviteResponse{} }
+func (m *XmppInviteResponse) String() string { return proto.CompactTextString(m) }
+func (*XmppInviteResponse) ProtoMessage() {}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto
new file mode 100644
index 000000000..472d52ebf
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto
@@ -0,0 +1,83 @@
+syntax = "proto2";
+option go_package = "xmpp";
+
+package appengine;
+
+message XmppServiceError {
+ enum ErrorCode {
+ UNSPECIFIED_ERROR = 1;
+ INVALID_JID = 2;
+ NO_BODY = 3;
+ INVALID_XML = 4;
+ INVALID_TYPE = 5;
+ INVALID_SHOW = 6;
+ EXCEEDED_MAX_SIZE = 7;
+ APPID_ALIAS_REQUIRED = 8;
+ NONDEFAULT_MODULE = 9;
+ }
+}
+
+message PresenceRequest {
+ required string jid = 1;
+ optional string from_jid = 2;
+}
+
+message PresenceResponse {
+ enum SHOW {
+ NORMAL = 0;
+ AWAY = 1;
+ DO_NOT_DISTURB = 2;
+ CHAT = 3;
+ EXTENDED_AWAY = 4;
+ }
+
+ required bool is_available = 1;
+ optional SHOW presence = 2;
+ optional bool valid = 3;
+}
+
+message BulkPresenceRequest {
+ repeated string jid = 1;
+ optional string from_jid = 2;
+}
+
+message BulkPresenceResponse {
+ repeated PresenceResponse presence_response = 1;
+}
+
+message XmppMessageRequest {
+ repeated string jid = 1;
+ required string body = 2;
+ optional bool raw_xml = 3 [ default = false ];
+ optional string type = 4 [ default = "chat" ];
+ optional string from_jid = 5;
+}
+
+message XmppMessageResponse {
+ enum XmppMessageStatus {
+ NO_ERROR = 0;
+ INVALID_JID = 1;
+ OTHER_ERROR = 2;
+ }
+
+ repeated XmppMessageStatus status = 1;
+}
+
+message XmppSendPresenceRequest {
+ required string jid = 1;
+ optional string type = 2;
+ optional string show = 3;
+ optional string status = 4;
+ optional string from_jid = 5;
+}
+
+message XmppSendPresenceResponse {
+}
+
+message XmppInviteRequest {
+ required string jid = 1;
+ optional string from_jid = 2;
+}
+
+message XmppInviteResponse {
+}
diff --git a/vendor/google.golang.org/appengine/log/api.go b/vendor/google.golang.org/appengine/log/api.go
new file mode 100644
index 000000000..24d58601b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/log/api.go
@@ -0,0 +1,40 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package log
+
+// This file implements the logging API.
+
+import (
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// Debugf formats its arguments according to the format, analogous to fmt.Printf,
+// and records the text as a log message at Debug level. The message will be associated
+// with the request linked with the provided context.
+func Debugf(ctx context.Context, format string, args ...interface{}) {
+ internal.Logf(ctx, 0, format, args...)
+}
+
+// Infof is like Debugf, but at Info level.
+func Infof(ctx context.Context, format string, args ...interface{}) {
+ internal.Logf(ctx, 1, format, args...)
+}
+
+// Warningf is like Debugf, but at Warning level.
+func Warningf(ctx context.Context, format string, args ...interface{}) {
+ internal.Logf(ctx, 2, format, args...)
+}
+
+// Errorf is like Debugf, but at Error level.
+func Errorf(ctx context.Context, format string, args ...interface{}) {
+ internal.Logf(ctx, 3, format, args...)
+}
+
+// Criticalf is like Debugf, but at Critical level.
+func Criticalf(ctx context.Context, format string, args ...interface{}) {
+ internal.Logf(ctx, 4, format, args...)
+}
diff --git a/vendor/google.golang.org/appengine/log/log.go b/vendor/google.golang.org/appengine/log/log.go
new file mode 100644
index 000000000..731ad8c36
--- /dev/null
+++ b/vendor/google.golang.org/appengine/log/log.go
@@ -0,0 +1,323 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package log provides the means of writing and querying an application's logs
+from within an App Engine application.
+
+Example:
+ c := appengine.NewContext(r)
+ query := &log.Query{
+ AppLogs: true,
+ Versions: []string{"1"},
+ }
+
+ for results := query.Run(c); ; {
+ record, err := results.Next()
+ if err == log.Done {
+ log.Infof(c, "Done processing results")
+ break
+ }
+ if err != nil {
+ log.Errorf(c, "Failed to retrieve next log: %v", err)
+ break
+ }
+ log.Infof(c, "Saw record %v", record)
+ }
+*/
+package log // import "google.golang.org/appengine/log"
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/log"
+)
+
+// Query defines a logs query.
+type Query struct {
+ // Start time specifies the earliest log to return (inclusive).
+ StartTime time.Time
+
+ // End time specifies the latest log to return (exclusive).
+ EndTime time.Time
+
+ // Offset specifies a position within the log stream to resume reading from,
+ // and should come from a previously returned Record's field of the same name.
+ Offset []byte
+
+ // Incomplete controls whether active (incomplete) requests should be included.
+ Incomplete bool
+
+ // AppLogs indicates if application-level logs should be included.
+ AppLogs bool
+
+ // ApplyMinLevel indicates if MinLevel should be used to filter results.
+ ApplyMinLevel bool
+
+ // If ApplyMinLevel is true, only logs for requests with at least one
+ // application log of MinLevel or higher will be returned.
+ MinLevel int
+
+ // Versions is the major version IDs whose logs should be retrieved.
+ // Logs for specific modules can be retrieved by the specifying versions
+ // in the form "module:version"; the default module is used if no module
+ // is specified.
+ Versions []string
+
+ // A list of requests to search for instead of a time-based scan. Cannot be
+ // combined with filtering options such as StartTime, EndTime, Offset,
+ // Incomplete, ApplyMinLevel, or Versions.
+ RequestIDs []string
+}
+
+// AppLog represents a single application-level log.
+type AppLog struct {
+ Time time.Time
+ Level int
+ Message string
+}
+
+// Record contains all the information for a single web request.
+type Record struct {
+ AppID string
+ ModuleID string
+ VersionID string
+ RequestID []byte
+ IP string
+ Nickname string
+ AppEngineRelease string
+
+ // The time when this request started.
+ StartTime time.Time
+
+ // The time when this request finished.
+ EndTime time.Time
+
+ // Opaque cursor into the result stream.
+ Offset []byte
+
+ // The time required to process the request.
+ Latency time.Duration
+ MCycles int64
+ Method string
+ Resource string
+ HTTPVersion string
+ Status int32
+
+ // The size of the request sent back to the client, in bytes.
+ ResponseSize int64
+ Referrer string
+ UserAgent string
+ URLMapEntry string
+ Combined string
+ Host string
+
+ // The estimated cost of this request, in dollars.
+ Cost float64
+ TaskQueueName string
+ TaskName string
+ WasLoadingRequest bool
+ PendingTime time.Duration
+ Finished bool
+ AppLogs []AppLog
+
+ // Mostly-unique identifier for the instance that handled the request if available.
+ InstanceID string
+}
+
+// Result represents the result of a query.
+type Result struct {
+ logs []*Record
+ context context.Context
+ request *pb.LogReadRequest
+ resultsSeen bool
+ err error
+}
+
+// Next returns the next log record,
+func (qr *Result) Next() (*Record, error) {
+ if qr.err != nil {
+ return nil, qr.err
+ }
+ if len(qr.logs) > 0 {
+ lr := qr.logs[0]
+ qr.logs = qr.logs[1:]
+ return lr, nil
+ }
+
+ if qr.request.Offset == nil && qr.resultsSeen {
+ return nil, Done
+ }
+
+ if err := qr.run(); err != nil {
+ // Errors here may be retried, so don't store the error.
+ return nil, err
+ }
+
+ return qr.Next()
+}
+
+// Done is returned when a query iteration has completed.
+var Done = errors.New("log: query has no more results")
+
+// protoToAppLogs takes as input an array of pointers to LogLines, the internal
+// Protocol Buffer representation of a single application-level log,
+// and converts it to an array of AppLogs, the external representation
+// of an application-level log.
+func protoToAppLogs(logLines []*pb.LogLine) []AppLog {
+ appLogs := make([]AppLog, len(logLines))
+
+ for i, line := range logLines {
+ appLogs[i] = AppLog{
+ Time: time.Unix(0, *line.Time*1e3),
+ Level: int(*line.Level),
+ Message: *line.LogMessage,
+ }
+ }
+
+ return appLogs
+}
+
+// protoToRecord converts a RequestLog, the internal Protocol Buffer
+// representation of a single request-level log, to a Record, its
+// corresponding external representation.
+func protoToRecord(rl *pb.RequestLog) *Record {
+ offset, err := proto.Marshal(rl.Offset)
+ if err != nil {
+ offset = nil
+ }
+ return &Record{
+ AppID: *rl.AppId,
+ ModuleID: rl.GetModuleId(),
+ VersionID: *rl.VersionId,
+ RequestID: rl.RequestId,
+ Offset: offset,
+ IP: *rl.Ip,
+ Nickname: rl.GetNickname(),
+ AppEngineRelease: string(rl.GetAppEngineRelease()),
+ StartTime: time.Unix(0, *rl.StartTime*1e3),
+ EndTime: time.Unix(0, *rl.EndTime*1e3),
+ Latency: time.Duration(*rl.Latency) * time.Microsecond,
+ MCycles: *rl.Mcycles,
+ Method: *rl.Method,
+ Resource: *rl.Resource,
+ HTTPVersion: *rl.HttpVersion,
+ Status: *rl.Status,
+ ResponseSize: *rl.ResponseSize,
+ Referrer: rl.GetReferrer(),
+ UserAgent: rl.GetUserAgent(),
+ URLMapEntry: *rl.UrlMapEntry,
+ Combined: *rl.Combined,
+ Host: rl.GetHost(),
+ Cost: rl.GetCost(),
+ TaskQueueName: rl.GetTaskQueueName(),
+ TaskName: rl.GetTaskName(),
+ WasLoadingRequest: rl.GetWasLoadingRequest(),
+ PendingTime: time.Duration(rl.GetPendingTime()) * time.Microsecond,
+ Finished: rl.GetFinished(),
+ AppLogs: protoToAppLogs(rl.Line),
+ InstanceID: string(rl.GetCloneKey()),
+ }
+}
+
+// Run starts a query for log records, which contain request and application
+// level log information.
+func (params *Query) Run(c context.Context) *Result {
+ req, err := makeRequest(params, internal.FullyQualifiedAppID(c), appengine.VersionID(c))
+ return &Result{
+ context: c,
+ request: req,
+ err: err,
+ }
+}
+
+func makeRequest(params *Query, appID, versionID string) (*pb.LogReadRequest, error) {
+ req := &pb.LogReadRequest{}
+ req.AppId = &appID
+ if !params.StartTime.IsZero() {
+ req.StartTime = proto.Int64(params.StartTime.UnixNano() / 1e3)
+ }
+ if !params.EndTime.IsZero() {
+ req.EndTime = proto.Int64(params.EndTime.UnixNano() / 1e3)
+ }
+ if len(params.Offset) > 0 {
+ var offset pb.LogOffset
+ if err := proto.Unmarshal(params.Offset, &offset); err != nil {
+ return nil, fmt.Errorf("bad Offset: %v", err)
+ }
+ req.Offset = &offset
+ }
+ if params.Incomplete {
+ req.IncludeIncomplete = &params.Incomplete
+ }
+ if params.AppLogs {
+ req.IncludeAppLogs = &params.AppLogs
+ }
+ if params.ApplyMinLevel {
+ req.MinimumLogLevel = proto.Int32(int32(params.MinLevel))
+ }
+ if params.Versions == nil {
+ // If no versions were specified, default to the default module at
+ // the major version being used by this module.
+ if i := strings.Index(versionID, "."); i >= 0 {
+ versionID = versionID[:i]
+ }
+ req.VersionId = []string{versionID}
+ } else {
+ req.ModuleVersion = make([]*pb.LogModuleVersion, 0, len(params.Versions))
+ for _, v := range params.Versions {
+ var m *string
+ if i := strings.Index(v, ":"); i >= 0 {
+ m, v = proto.String(v[:i]), v[i+1:]
+ }
+ req.ModuleVersion = append(req.ModuleVersion, &pb.LogModuleVersion{
+ ModuleId: m,
+ VersionId: proto.String(v),
+ })
+ }
+ }
+ if params.RequestIDs != nil {
+ ids := make([][]byte, len(params.RequestIDs))
+ for i, v := range params.RequestIDs {
+ ids[i] = []byte(v)
+ }
+ req.RequestId = ids
+ }
+
+ return req, nil
+}
+
+// run takes the query Result produced by a call to Run and updates it with
+// more Records. The updated Result contains a new set of logs as well as an
+// offset to where more logs can be found. We also convert the items in the
+// response from their internal representations to external versions of the
+// same structs.
+func (r *Result) run() error {
+ res := &pb.LogReadResponse{}
+ if err := internal.Call(r.context, "logservice", "Read", r.request, res); err != nil {
+ return err
+ }
+
+ r.logs = make([]*Record, len(res.Log))
+ r.request.Offset = res.Offset
+ r.resultsSeen = true
+
+ for i, log := range res.Log {
+ r.logs[i] = protoToRecord(log)
+ }
+
+ return nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("logservice", pb.LogServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/log/log_test.go b/vendor/google.golang.org/appengine/log/log_test.go
new file mode 100644
index 000000000..726468e23
--- /dev/null
+++ b/vendor/google.golang.org/appengine/log/log_test.go
@@ -0,0 +1,112 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package log
+
+import (
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ pb "google.golang.org/appengine/internal/log"
+)
+
+func TestQueryToRequest(t *testing.T) {
+ testCases := []struct {
+ desc string
+ query *Query
+ want *pb.LogReadRequest
+ }{
+ {
+ desc: "Empty",
+ query: &Query{},
+ want: &pb.LogReadRequest{
+ AppId: proto.String("s~fake"),
+ VersionId: []string{"v12"},
+ },
+ },
+ {
+ desc: "Versions",
+ query: &Query{
+ Versions: []string{"alpha", "backend:beta"},
+ },
+ want: &pb.LogReadRequest{
+ AppId: proto.String("s~fake"),
+ ModuleVersion: []*pb.LogModuleVersion{
+ {
+ VersionId: proto.String("alpha"),
+ }, {
+ ModuleId: proto.String("backend"),
+ VersionId: proto.String("beta"),
+ },
+ },
+ },
+ },
+ }
+
+ for _, tt := range testCases {
+ req, err := makeRequest(tt.query, "s~fake", "v12")
+
+ if err != nil {
+ t.Errorf("%s: got err %v, want nil", tt.desc, err)
+ continue
+ }
+ if !proto.Equal(req, tt.want) {
+ t.Errorf("%s request:\ngot %v\nwant %v", tt.desc, req, tt.want)
+ }
+ }
+}
+
+func TestProtoToRecord(t *testing.T) {
+ // We deliberately leave ModuleId and other optional fields unset.
+ p := &pb.RequestLog{
+ AppId: proto.String("s~fake"),
+ VersionId: proto.String("1"),
+ RequestId: []byte("deadbeef"),
+ Ip: proto.String("127.0.0.1"),
+ StartTime: proto.Int64(431044244000000),
+ EndTime: proto.Int64(431044724000000),
+ Latency: proto.Int64(480000000),
+ Mcycles: proto.Int64(7),
+ Method: proto.String("GET"),
+ Resource: proto.String("/app"),
+ HttpVersion: proto.String("1.1"),
+ Status: proto.Int32(418),
+ ResponseSize: proto.Int64(1337),
+ UrlMapEntry: proto.String("_go_app"),
+ Combined: proto.String("apache log"),
+ }
+ // Sanity check that all required fields are set.
+ if _, err := proto.Marshal(p); err != nil {
+ t.Fatalf("proto.Marshal: %v", err)
+ }
+ want := &Record{
+ AppID: "s~fake",
+ ModuleID: "default",
+ VersionID: "1",
+ RequestID: []byte("deadbeef"),
+ IP: "127.0.0.1",
+ StartTime: time.Date(1983, 8, 29, 22, 30, 44, 0, time.UTC),
+ EndTime: time.Date(1983, 8, 29, 22, 38, 44, 0, time.UTC),
+ Latency: 8 * time.Minute,
+ MCycles: 7,
+ Method: "GET",
+ Resource: "/app",
+ HTTPVersion: "1.1",
+ Status: 418,
+ ResponseSize: 1337,
+ URLMapEntry: "_go_app",
+ Combined: "apache log",
+ Finished: true,
+ AppLogs: []AppLog{},
+ }
+ got := protoToRecord(p)
+ // Coerce locations to UTC since otherwise they will be in local.
+ got.StartTime, got.EndTime = got.StartTime.UTC(), got.EndTime.UTC()
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("protoToRecord:\ngot: %v\nwant: %v", got, want)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/mail/mail.go b/vendor/google.golang.org/appengine/mail/mail.go
new file mode 100644
index 000000000..1ce1e8706
--- /dev/null
+++ b/vendor/google.golang.org/appengine/mail/mail.go
@@ -0,0 +1,123 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package mail provides the means of sending email from an
+App Engine application.
+
+Example:
+ msg := &mail.Message{
+ Sender: "romeo@montague.com",
+ To: []string{"Juliet <juliet@capulet.org>"},
+ Subject: "See you tonight",
+ Body: "Don't forget our plans. Hark, 'til later.",
+ }
+ if err := mail.Send(c, msg); err != nil {
+ log.Errorf(c, "Alas, my user, the email failed to sendeth: %v", err)
+ }
+*/
+package mail // import "google.golang.org/appengine/mail"
+
+import (
+ "net/mail"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ bpb "google.golang.org/appengine/internal/base"
+ pb "google.golang.org/appengine/internal/mail"
+)
+
+// A Message represents an email message.
+// Addresses may be of any form permitted by RFC 822.
+type Message struct {
+ // Sender must be set, and must be either an application admin
+ // or the currently signed-in user.
+ Sender string
+ ReplyTo string // may be empty
+
+ // At least one of these slices must have a non-zero length,
+ // except when calling SendToAdmins.
+ To, Cc, Bcc []string
+
+ Subject string
+
+ // At least one of Body or HTMLBody must be non-empty.
+ Body string
+ HTMLBody string
+
+ Attachments []Attachment
+
+ // Extra mail headers.
+ // See https://cloud.google.com/appengine/docs/standard/go/mail/
+ // for permissible headers.
+ Headers mail.Header
+}
+
+// An Attachment represents an email attachment.
+type Attachment struct {
+ // Name must be set to a valid file name.
+ Name string
+ Data []byte
+ ContentID string
+}
+
+// Send sends an email message.
+func Send(c context.Context, msg *Message) error {
+ return send(c, "Send", msg)
+}
+
+// SendToAdmins sends an email message to the application's administrators.
+func SendToAdmins(c context.Context, msg *Message) error {
+ return send(c, "SendToAdmins", msg)
+}
+
+func send(c context.Context, method string, msg *Message) error {
+ req := &pb.MailMessage{
+ Sender: &msg.Sender,
+ To: msg.To,
+ Cc: msg.Cc,
+ Bcc: msg.Bcc,
+ Subject: &msg.Subject,
+ }
+ if msg.ReplyTo != "" {
+ req.ReplyTo = &msg.ReplyTo
+ }
+ if msg.Body != "" {
+ req.TextBody = &msg.Body
+ }
+ if msg.HTMLBody != "" {
+ req.HtmlBody = &msg.HTMLBody
+ }
+ if len(msg.Attachments) > 0 {
+ req.Attachment = make([]*pb.MailAttachment, len(msg.Attachments))
+ for i, att := range msg.Attachments {
+ req.Attachment[i] = &pb.MailAttachment{
+ FileName: proto.String(att.Name),
+ Data: att.Data,
+ }
+ if att.ContentID != "" {
+ req.Attachment[i].ContentID = proto.String(att.ContentID)
+ }
+ }
+ }
+ for key, vs := range msg.Headers {
+ for _, v := range vs {
+ req.Header = append(req.Header, &pb.MailHeader{
+ Name: proto.String(key),
+ Value: proto.String(v),
+ })
+ }
+ }
+ res := &bpb.VoidProto{}
+ if err := internal.Call(c, "mail", method, req, res); err != nil {
+ return err
+ }
+ return nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("mail", pb.MailServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/mail/mail_test.go b/vendor/google.golang.org/appengine/mail/mail_test.go
new file mode 100644
index 000000000..7502c5973
--- /dev/null
+++ b/vendor/google.golang.org/appengine/mail/mail_test.go
@@ -0,0 +1,65 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package mail
+
+import (
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine/internal/aetesting"
+ basepb "google.golang.org/appengine/internal/base"
+ pb "google.golang.org/appengine/internal/mail"
+)
+
+func TestMessageConstruction(t *testing.T) {
+ var got *pb.MailMessage
+ c := aetesting.FakeSingleContext(t, "mail", "Send", func(in *pb.MailMessage, out *basepb.VoidProto) error {
+ got = in
+ return nil
+ })
+
+ msg := &Message{
+ Sender: "dsymonds@example.com",
+ To: []string{"nigeltao@example.com"},
+ Body: "Hey, lunch time?",
+ Attachments: []Attachment{
+ // Regression test for a prod bug. The address of a range variable was used when
+ // constructing the outgoing proto, so multiple attachments used the same name.
+ {
+ Name: "att1.txt",
+ Data: []byte("data1"),
+ ContentID: "<att1>",
+ },
+ {
+ Name: "att2.txt",
+ Data: []byte("data2"),
+ },
+ },
+ }
+ if err := Send(c, msg); err != nil {
+ t.Fatalf("Send: %v", err)
+ }
+ want := &pb.MailMessage{
+ Sender: proto.String("dsymonds@example.com"),
+ To: []string{"nigeltao@example.com"},
+ Subject: proto.String(""),
+ TextBody: proto.String("Hey, lunch time?"),
+ Attachment: []*pb.MailAttachment{
+ {
+ FileName: proto.String("att1.txt"),
+ Data: []byte("data1"),
+ ContentID: proto.String("<att1>"),
+ },
+ {
+ FileName: proto.String("att2.txt"),
+ Data: []byte("data2"),
+ },
+ },
+ }
+ if !proto.Equal(got, want) {
+ t.Errorf("Bad proto for %+v\n got %v\nwant %v", msg, got, want)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/memcache/memcache.go b/vendor/google.golang.org/appengine/memcache/memcache.go
new file mode 100644
index 000000000..d8eed4be7
--- /dev/null
+++ b/vendor/google.golang.org/appengine/memcache/memcache.go
@@ -0,0 +1,526 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package memcache provides a client for App Engine's distributed in-memory
+// key-value store for small chunks of arbitrary data.
+//
+// The fundamental operations get and set items, keyed by a string.
+//
+// item0, err := memcache.Get(c, "key")
+// if err != nil && err != memcache.ErrCacheMiss {
+// return err
+// }
+// if err == nil {
+// fmt.Fprintf(w, "memcache hit: Key=%q Val=[% x]\n", item0.Key, item0.Value)
+// } else {
+// fmt.Fprintf(w, "memcache miss\n")
+// }
+//
+// and
+//
+// item1 := &memcache.Item{
+// Key: "foo",
+// Value: []byte("bar"),
+// }
+// if err := memcache.Set(c, item1); err != nil {
+// return err
+// }
+package memcache // import "google.golang.org/appengine/memcache"
+
+import (
+ "bytes"
+ "encoding/gob"
+ "encoding/json"
+ "errors"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/memcache"
+)
+
+var (
+ // ErrCacheMiss means that an operation failed
+ // because the item wasn't present.
+ ErrCacheMiss = errors.New("memcache: cache miss")
+ // ErrCASConflict means that a CompareAndSwap call failed due to the
+ // cached value being modified between the Get and the CompareAndSwap.
+ // If the cached value was simply evicted rather than replaced,
+ // ErrNotStored will be returned instead.
+ ErrCASConflict = errors.New("memcache: compare-and-swap conflict")
+ // ErrNoStats means that no statistics were available.
+ ErrNoStats = errors.New("memcache: no statistics available")
+ // ErrNotStored means that a conditional write operation (i.e. Add or
+ // CompareAndSwap) failed because the condition was not satisfied.
+ ErrNotStored = errors.New("memcache: item not stored")
+ // ErrServerError means that a server error occurred.
+ ErrServerError = errors.New("memcache: server error")
+)
+
+// Item is the unit of memcache gets and sets.
+type Item struct {
+ // Key is the Item's key (250 bytes maximum).
+ Key string
+ // Value is the Item's value.
+ Value []byte
+ // Object is the Item's value for use with a Codec.
+ Object interface{}
+ // Flags are server-opaque flags whose semantics are entirely up to the
+ // App Engine app.
+ Flags uint32
+ // Expiration is the maximum duration that the item will stay
+ // in the cache.
+ // The zero value means the Item has no expiration time.
+ // Subsecond precision is ignored.
+ // This is not set when getting items.
+ Expiration time.Duration
+ // casID is a client-opaque value used for compare-and-swap operations.
+ // Zero means that compare-and-swap is not used.
+ casID uint64
+}
+
+const (
+ secondsIn30Years = 60 * 60 * 24 * 365 * 30 // from memcache server code
+ thirtyYears = time.Duration(secondsIn30Years) * time.Second
+)
+
+// protoToItem converts a protocol buffer item to a Go struct.
+func protoToItem(p *pb.MemcacheGetResponse_Item) *Item {
+ return &Item{
+ Key: string(p.Key),
+ Value: p.Value,
+ Flags: p.GetFlags(),
+ casID: p.GetCasId(),
+ }
+}
+
+// If err is an appengine.MultiError, return its first element. Otherwise, return err.
+func singleError(err error) error {
+ if me, ok := err.(appengine.MultiError); ok {
+ return me[0]
+ }
+ return err
+}
+
+// Get gets the item for the given key. ErrCacheMiss is returned for a memcache
+// cache miss. The key must be at most 250 bytes in length.
+func Get(c context.Context, key string) (*Item, error) {
+ m, err := GetMulti(c, []string{key})
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := m[key]; !ok {
+ return nil, ErrCacheMiss
+ }
+ return m[key], nil
+}
+
+// GetMulti is a batch version of Get. The returned map from keys to items may
+// have fewer elements than the input slice, due to memcache cache misses.
+// Each key must be at most 250 bytes in length.
+func GetMulti(c context.Context, key []string) (map[string]*Item, error) {
+ if len(key) == 0 {
+ return nil, nil
+ }
+ keyAsBytes := make([][]byte, len(key))
+ for i, k := range key {
+ keyAsBytes[i] = []byte(k)
+ }
+ req := &pb.MemcacheGetRequest{
+ Key: keyAsBytes,
+ ForCas: proto.Bool(true),
+ }
+ res := &pb.MemcacheGetResponse{}
+ if err := internal.Call(c, "memcache", "Get", req, res); err != nil {
+ return nil, err
+ }
+ m := make(map[string]*Item, len(res.Item))
+ for _, p := range res.Item {
+ t := protoToItem(p)
+ m[t.Key] = t
+ }
+ return m, nil
+}
+
+// Delete deletes the item for the given key.
+// ErrCacheMiss is returned if the specified item can not be found.
+// The key must be at most 250 bytes in length.
+func Delete(c context.Context, key string) error {
+ return singleError(DeleteMulti(c, []string{key}))
+}
+
+// DeleteMulti is a batch version of Delete.
+// If any keys cannot be found, an appengine.MultiError is returned.
+// Each key must be at most 250 bytes in length.
+func DeleteMulti(c context.Context, key []string) error {
+ if len(key) == 0 {
+ return nil
+ }
+ req := &pb.MemcacheDeleteRequest{
+ Item: make([]*pb.MemcacheDeleteRequest_Item, len(key)),
+ }
+ for i, k := range key {
+ req.Item[i] = &pb.MemcacheDeleteRequest_Item{Key: []byte(k)}
+ }
+ res := &pb.MemcacheDeleteResponse{}
+ if err := internal.Call(c, "memcache", "Delete", req, res); err != nil {
+ return err
+ }
+ if len(res.DeleteStatus) != len(key) {
+ return ErrServerError
+ }
+ me, any := make(appengine.MultiError, len(key)), false
+ for i, s := range res.DeleteStatus {
+ switch s {
+ case pb.MemcacheDeleteResponse_DELETED:
+ // OK
+ case pb.MemcacheDeleteResponse_NOT_FOUND:
+ me[i] = ErrCacheMiss
+ any = true
+ default:
+ me[i] = ErrServerError
+ any = true
+ }
+ }
+ if any {
+ return me
+ }
+ return nil
+}
+
+// Increment atomically increments the decimal value in the given key
+// by delta and returns the new value. The value must fit in a uint64.
+// Overflow wraps around, and underflow is capped to zero. The
+// provided delta may be negative. If the key doesn't exist in
+// memcache, the provided initial value is used to atomically
+// populate it before the delta is applied.
+// The key must be at most 250 bytes in length.
+func Increment(c context.Context, key string, delta int64, initialValue uint64) (newValue uint64, err error) {
+ return incr(c, key, delta, &initialValue)
+}
+
+// IncrementExisting works like Increment but assumes that the key
+// already exists in memcache and doesn't take an initial value.
+// IncrementExisting can save work if calculating the initial value is
+// expensive.
+// An error is returned if the specified item can not be found.
+func IncrementExisting(c context.Context, key string, delta int64) (newValue uint64, err error) {
+ return incr(c, key, delta, nil)
+}
+
+func incr(c context.Context, key string, delta int64, initialValue *uint64) (newValue uint64, err error) {
+ req := &pb.MemcacheIncrementRequest{
+ Key: []byte(key),
+ InitialValue: initialValue,
+ }
+ if delta >= 0 {
+ req.Delta = proto.Uint64(uint64(delta))
+ } else {
+ req.Delta = proto.Uint64(uint64(-delta))
+ req.Direction = pb.MemcacheIncrementRequest_DECREMENT.Enum()
+ }
+ res := &pb.MemcacheIncrementResponse{}
+ err = internal.Call(c, "memcache", "Increment", req, res)
+ if err != nil {
+ return
+ }
+ if res.NewValue == nil {
+ return 0, ErrCacheMiss
+ }
+ return *res.NewValue, nil
+}
+
+// set sets the given items using the given conflict resolution policy.
+// appengine.MultiError may be returned.
+func set(c context.Context, item []*Item, value [][]byte, policy pb.MemcacheSetRequest_SetPolicy) error {
+ if len(item) == 0 {
+ return nil
+ }
+ req := &pb.MemcacheSetRequest{
+ Item: make([]*pb.MemcacheSetRequest_Item, len(item)),
+ }
+ for i, t := range item {
+ p := &pb.MemcacheSetRequest_Item{
+ Key: []byte(t.Key),
+ }
+ if value == nil {
+ p.Value = t.Value
+ } else {
+ p.Value = value[i]
+ }
+ if t.Flags != 0 {
+ p.Flags = proto.Uint32(t.Flags)
+ }
+ if t.Expiration != 0 {
+ // In the .proto file, MemcacheSetRequest_Item uses a fixed32 (i.e. unsigned)
+ // for expiration time, while MemcacheGetRequest_Item uses int32 (i.e. signed).
+ // Throughout this .go file, we use int32.
+ // Also, in the proto, the expiration value is either a duration (in seconds)
+ // or an absolute Unix timestamp (in seconds), depending on whether the
+ // value is less than or greater than or equal to 30 years, respectively.
+ if t.Expiration < time.Second {
+ // Because an Expiration of 0 means no expiration, we take
+ // care here to translate an item with an expiration
+ // Duration between 0-1 seconds as immediately expiring
+ // (saying it expired a few seconds ago), rather than
+ // rounding it down to 0 and making it live forever.
+ p.ExpirationTime = proto.Uint32(uint32(time.Now().Unix()) - 5)
+ } else if t.Expiration >= thirtyYears {
+ p.ExpirationTime = proto.Uint32(uint32(time.Now().Unix()) + uint32(t.Expiration/time.Second))
+ } else {
+ p.ExpirationTime = proto.Uint32(uint32(t.Expiration / time.Second))
+ }
+ }
+ if t.casID != 0 {
+ p.CasId = proto.Uint64(t.casID)
+ p.ForCas = proto.Bool(true)
+ }
+ p.SetPolicy = policy.Enum()
+ req.Item[i] = p
+ }
+ res := &pb.MemcacheSetResponse{}
+ if err := internal.Call(c, "memcache", "Set", req, res); err != nil {
+ return err
+ }
+ if len(res.SetStatus) != len(item) {
+ return ErrServerError
+ }
+ me, any := make(appengine.MultiError, len(item)), false
+ for i, st := range res.SetStatus {
+ var err error
+ switch st {
+ case pb.MemcacheSetResponse_STORED:
+ // OK
+ case pb.MemcacheSetResponse_NOT_STORED:
+ err = ErrNotStored
+ case pb.MemcacheSetResponse_EXISTS:
+ err = ErrCASConflict
+ default:
+ err = ErrServerError
+ }
+ if err != nil {
+ me[i] = err
+ any = true
+ }
+ }
+ if any {
+ return me
+ }
+ return nil
+}
+
+// Set writes the given item, unconditionally.
+func Set(c context.Context, item *Item) error {
+ return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_SET))
+}
+
+// SetMulti is a batch version of Set.
+// appengine.MultiError may be returned.
+func SetMulti(c context.Context, item []*Item) error {
+ return set(c, item, nil, pb.MemcacheSetRequest_SET)
+}
+
+// Add writes the given item, if no value already exists for its key.
+// ErrNotStored is returned if that condition is not met.
+func Add(c context.Context, item *Item) error {
+ return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_ADD))
+}
+
+// AddMulti is a batch version of Add.
+// appengine.MultiError may be returned.
+func AddMulti(c context.Context, item []*Item) error {
+ return set(c, item, nil, pb.MemcacheSetRequest_ADD)
+}
+
+// CompareAndSwap writes the given item that was previously returned by Get,
+// if the value was neither modified or evicted between the Get and the
+// CompareAndSwap calls. The item's Key should not change between calls but
+// all other item fields may differ.
+// ErrCASConflict is returned if the value was modified in between the calls.
+// ErrNotStored is returned if the value was evicted in between the calls.
+func CompareAndSwap(c context.Context, item *Item) error {
+ return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_CAS))
+}
+
+// CompareAndSwapMulti is a batch version of CompareAndSwap.
+// appengine.MultiError may be returned.
+func CompareAndSwapMulti(c context.Context, item []*Item) error {
+ return set(c, item, nil, pb.MemcacheSetRequest_CAS)
+}
+
+// Codec represents a symmetric pair of functions that implement a codec.
+// Items stored into or retrieved from memcache using a Codec have their
+// values marshaled or unmarshaled.
+//
+// All the methods provided for Codec behave analogously to the package level
+// function with same name.
+type Codec struct {
+ Marshal func(interface{}) ([]byte, error)
+ Unmarshal func([]byte, interface{}) error
+}
+
+// Get gets the item for the given key and decodes the obtained value into v.
+// ErrCacheMiss is returned for a memcache cache miss.
+// The key must be at most 250 bytes in length.
+func (cd Codec) Get(c context.Context, key string, v interface{}) (*Item, error) {
+ i, err := Get(c, key)
+ if err != nil {
+ return nil, err
+ }
+ if err := cd.Unmarshal(i.Value, v); err != nil {
+ return nil, err
+ }
+ return i, nil
+}
+
+func (cd Codec) set(c context.Context, items []*Item, policy pb.MemcacheSetRequest_SetPolicy) error {
+ var vs [][]byte
+ var me appengine.MultiError
+ for i, item := range items {
+ v, err := cd.Marshal(item.Object)
+ if err != nil {
+ if me == nil {
+ me = make(appengine.MultiError, len(items))
+ }
+ me[i] = err
+ continue
+ }
+ if me == nil {
+ vs = append(vs, v)
+ }
+ }
+ if me != nil {
+ return me
+ }
+
+ return set(c, items, vs, policy)
+}
+
+// Set writes the given item, unconditionally.
+func (cd Codec) Set(c context.Context, item *Item) error {
+ return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_SET))
+}
+
+// SetMulti is a batch version of Set.
+// appengine.MultiError may be returned.
+func (cd Codec) SetMulti(c context.Context, items []*Item) error {
+ return cd.set(c, items, pb.MemcacheSetRequest_SET)
+}
+
+// Add writes the given item, if no value already exists for its key.
+// ErrNotStored is returned if that condition is not met.
+func (cd Codec) Add(c context.Context, item *Item) error {
+ return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_ADD))
+}
+
+// AddMulti is a batch version of Add.
+// appengine.MultiError may be returned.
+func (cd Codec) AddMulti(c context.Context, items []*Item) error {
+ return cd.set(c, items, pb.MemcacheSetRequest_ADD)
+}
+
+// CompareAndSwap writes the given item that was previously returned by Get,
+// if the value was neither modified or evicted between the Get and the
+// CompareAndSwap calls. The item's Key should not change between calls but
+// all other item fields may differ.
+// ErrCASConflict is returned if the value was modified in between the calls.
+// ErrNotStored is returned if the value was evicted in between the calls.
+func (cd Codec) CompareAndSwap(c context.Context, item *Item) error {
+ return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_CAS))
+}
+
+// CompareAndSwapMulti is a batch version of CompareAndSwap.
+// appengine.MultiError may be returned.
+func (cd Codec) CompareAndSwapMulti(c context.Context, items []*Item) error {
+ return cd.set(c, items, pb.MemcacheSetRequest_CAS)
+}
+
+var (
+ // Gob is a Codec that uses the gob package.
+ Gob = Codec{gobMarshal, gobUnmarshal}
+ // JSON is a Codec that uses the json package.
+ JSON = Codec{json.Marshal, json.Unmarshal}
+)
+
+func gobMarshal(v interface{}) ([]byte, error) {
+ var buf bytes.Buffer
+ if err := gob.NewEncoder(&buf).Encode(v); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func gobUnmarshal(data []byte, v interface{}) error {
+ return gob.NewDecoder(bytes.NewBuffer(data)).Decode(v)
+}
+
+// Statistics represents a set of statistics about the memcache cache.
+// This may include items that have expired but have not yet been removed from the cache.
+type Statistics struct {
+ Hits uint64 // Counter of cache hits
+ Misses uint64 // Counter of cache misses
+ ByteHits uint64 // Counter of bytes transferred for gets
+
+ Items uint64 // Items currently in the cache
+ Bytes uint64 // Size of all items currently in the cache
+
+ Oldest int64 // Age of access of the oldest item, in seconds
+}
+
+// Stats retrieves the current memcache statistics.
+func Stats(c context.Context) (*Statistics, error) {
+ req := &pb.MemcacheStatsRequest{}
+ res := &pb.MemcacheStatsResponse{}
+ if err := internal.Call(c, "memcache", "Stats", req, res); err != nil {
+ return nil, err
+ }
+ if res.Stats == nil {
+ return nil, ErrNoStats
+ }
+ return &Statistics{
+ Hits: *res.Stats.Hits,
+ Misses: *res.Stats.Misses,
+ ByteHits: *res.Stats.ByteHits,
+ Items: *res.Stats.Items,
+ Bytes: *res.Stats.Bytes,
+ Oldest: int64(*res.Stats.OldestItemAge),
+ }, nil
+}
+
+// Flush flushes all items from memcache.
+func Flush(c context.Context) error {
+ req := &pb.MemcacheFlushRequest{}
+ res := &pb.MemcacheFlushResponse{}
+ return internal.Call(c, "memcache", "FlushAll", req, res)
+}
+
+func namespaceMod(m proto.Message, namespace string) {
+ switch m := m.(type) {
+ case *pb.MemcacheDeleteRequest:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ case *pb.MemcacheGetRequest:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ case *pb.MemcacheIncrementRequest:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ case *pb.MemcacheSetRequest:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ // MemcacheFlushRequest, MemcacheStatsRequest do not apply namespace.
+ }
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("memcache", pb.MemcacheServiceError_ErrorCode_name)
+ internal.NamespaceMods["memcache"] = namespaceMod
+}
diff --git a/vendor/google.golang.org/appengine/memcache/memcache_test.go b/vendor/google.golang.org/appengine/memcache/memcache_test.go
new file mode 100644
index 000000000..1dc7da471
--- /dev/null
+++ b/vendor/google.golang.org/appengine/memcache/memcache_test.go
@@ -0,0 +1,263 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package memcache
+
+import (
+ "fmt"
+ "testing"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/memcache"
+)
+
+var errRPC = fmt.Errorf("RPC error")
+
+func TestGetRequest(t *testing.T) {
+ serviceCalled := false
+ apiKey := "lyric"
+
+ c := aetesting.FakeSingleContext(t, "memcache", "Get", func(req *pb.MemcacheGetRequest, _ *pb.MemcacheGetResponse) error {
+ // Test request.
+ if n := len(req.Key); n != 1 {
+ t.Errorf("got %d want 1", n)
+ return nil
+ }
+ if k := string(req.Key[0]); k != apiKey {
+ t.Errorf("got %q want %q", k, apiKey)
+ }
+
+ serviceCalled = true
+ return nil
+ })
+
+ // Test the "forward" path from the API call parameters to the
+ // protobuf request object. (The "backward" path from the
+ // protobuf response object to the API call response,
+ // including the error response, are handled in the next few
+ // tests).
+ Get(c, apiKey)
+ if !serviceCalled {
+ t.Error("Service was not called as expected")
+ }
+}
+
+func TestGetResponseHit(t *testing.T) {
+ key := "lyric"
+ value := "Where the buffalo roam"
+
+ c := aetesting.FakeSingleContext(t, "memcache", "Get", func(_ *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error {
+ res.Item = []*pb.MemcacheGetResponse_Item{
+ {Key: []byte(key), Value: []byte(value)},
+ }
+ return nil
+ })
+ apiItem, err := Get(c, key)
+ if apiItem == nil || apiItem.Key != key || string(apiItem.Value) != value {
+ t.Errorf("got %q, %q want {%q,%q}, nil", apiItem, err, key, value)
+ }
+}
+
+func TestGetResponseMiss(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Get", func(_ *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error {
+ // don't fill in any of the response
+ return nil
+ })
+ _, err := Get(c, "something")
+ if err != ErrCacheMiss {
+ t.Errorf("got %v want ErrCacheMiss", err)
+ }
+}
+
+func TestGetResponseRPCError(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Get", func(_ *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error {
+ return errRPC
+ })
+
+ if _, err := Get(c, "something"); err != errRPC {
+ t.Errorf("got %v want errRPC", err)
+ }
+}
+
+func TestAddRequest(t *testing.T) {
+ var apiItem = &Item{
+ Key: "lyric",
+ Value: []byte("Oh, give me a home"),
+ }
+
+ serviceCalled := false
+
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(req *pb.MemcacheSetRequest, _ *pb.MemcacheSetResponse) error {
+ // Test request.
+ pbItem := req.Item[0]
+ if k := string(pbItem.Key); k != apiItem.Key {
+ t.Errorf("got %q want %q", k, apiItem.Key)
+ }
+ if v := string(apiItem.Value); v != string(pbItem.Value) {
+ t.Errorf("got %q want %q", v, string(pbItem.Value))
+ }
+ if p := *pbItem.SetPolicy; p != pb.MemcacheSetRequest_ADD {
+ t.Errorf("got %v want %v", p, pb.MemcacheSetRequest_ADD)
+ }
+
+ serviceCalled = true
+ return nil
+ })
+
+ Add(c, apiItem)
+ if !serviceCalled {
+ t.Error("Service was not called as expected")
+ }
+}
+
+func TestAddResponseStored(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_STORED}
+ return nil
+ })
+
+ if err := Add(c, &Item{}); err != nil {
+ t.Errorf("got %v want nil", err)
+ }
+}
+
+func TestAddResponseNotStored(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_NOT_STORED}
+ return nil
+ })
+
+ if err := Add(c, &Item{}); err != ErrNotStored {
+ t.Errorf("got %v want ErrNotStored", err)
+ }
+}
+
+func TestAddResponseError(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_ERROR}
+ return nil
+ })
+
+ if err := Add(c, &Item{}); err != ErrServerError {
+ t.Errorf("got %v want ErrServerError", err)
+ }
+}
+
+func TestAddResponseRPCError(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ return errRPC
+ })
+
+ if err := Add(c, &Item{}); err != errRPC {
+ t.Errorf("got %v want errRPC", err)
+ }
+}
+
+func TestSetRequest(t *testing.T) {
+ var apiItem = &Item{
+ Key: "lyric",
+ Value: []byte("Where the buffalo roam"),
+ }
+
+ serviceCalled := false
+
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(req *pb.MemcacheSetRequest, _ *pb.MemcacheSetResponse) error {
+ // Test request.
+ if n := len(req.Item); n != 1 {
+ t.Errorf("got %d want 1", n)
+ return nil
+ }
+ pbItem := req.Item[0]
+ if k := string(pbItem.Key); k != apiItem.Key {
+ t.Errorf("got %q want %q", k, apiItem.Key)
+ }
+ if v := string(pbItem.Value); v != string(apiItem.Value) {
+ t.Errorf("got %q want %q", v, string(apiItem.Value))
+ }
+ if p := *pbItem.SetPolicy; p != pb.MemcacheSetRequest_SET {
+ t.Errorf("got %v want %v", p, pb.MemcacheSetRequest_SET)
+ }
+
+ serviceCalled = true
+ return nil
+ })
+
+ Set(c, apiItem)
+ if !serviceCalled {
+ t.Error("Service was not called as expected")
+ }
+}
+
+func TestSetResponse(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_STORED}
+ return nil
+ })
+
+ if err := Set(c, &Item{}); err != nil {
+ t.Errorf("got %v want nil", err)
+ }
+}
+
+func TestSetResponseError(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_ERROR}
+ return nil
+ })
+
+ if err := Set(c, &Item{}); err != ErrServerError {
+ t.Errorf("got %v want ErrServerError", err)
+ }
+}
+
+func TestNamespaceResetting(t *testing.T) {
+ namec := make(chan *string, 1)
+ c0 := aetesting.FakeSingleContext(t, "memcache", "Get", func(req *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error {
+ namec <- req.NameSpace
+ return errRPC
+ })
+
+ // Check that wrapping c0 in a namespace twice works correctly.
+ c1, err := appengine.Namespace(c0, "A")
+ if err != nil {
+ t.Fatalf("appengine.Namespace: %v", err)
+ }
+ c2, err := appengine.Namespace(c1, "") // should act as the original context
+ if err != nil {
+ t.Fatalf("appengine.Namespace: %v", err)
+ }
+
+ Get(c0, "key")
+ if ns := <-namec; ns != nil {
+ t.Errorf(`Get with c0: ns = %q, want nil`, *ns)
+ }
+
+ Get(c1, "key")
+ if ns := <-namec; ns == nil {
+ t.Error(`Get with c1: ns = nil, want "A"`)
+ } else if *ns != "A" {
+ t.Errorf(`Get with c1: ns = %q, want "A"`, *ns)
+ }
+
+ Get(c2, "key")
+ if ns := <-namec; ns != nil {
+ t.Errorf(`Get with c2: ns = %q, want nil`, *ns)
+ }
+}
+
+func TestGetMultiEmpty(t *testing.T) {
+ serviceCalled := false
+ c := aetesting.FakeSingleContext(t, "memcache", "Get", func(req *pb.MemcacheGetRequest, _ *pb.MemcacheGetResponse) error {
+ serviceCalled = true
+ return nil
+ })
+
+ // Test that the Memcache service is not called when
+ // GetMulti is passed an empty slice of keys.
+ GetMulti(c, []string{})
+ if serviceCalled {
+ t.Error("Service was called but should not have been")
+ }
+}
diff --git a/vendor/google.golang.org/appengine/module/module.go b/vendor/google.golang.org/appengine/module/module.go
new file mode 100644
index 000000000..88e6629ac
--- /dev/null
+++ b/vendor/google.golang.org/appengine/module/module.go
@@ -0,0 +1,113 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package module provides functions for interacting with modules.
+
+The appengine package contains functions that report the identity of the app,
+including the module name.
+*/
+package module // import "google.golang.org/appengine/module"
+
+import (
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/modules"
+)
+
+// List returns the names of modules belonging to this application.
+func List(c context.Context) ([]string, error) {
+ req := &pb.GetModulesRequest{}
+ res := &pb.GetModulesResponse{}
+ err := internal.Call(c, "modules", "GetModules", req, res)
+ return res.Module, err
+}
+
+// NumInstances returns the number of instances of the given module/version.
+// If either argument is the empty string it means the default.
+func NumInstances(c context.Context, module, version string) (int, error) {
+ req := &pb.GetNumInstancesRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ res := &pb.GetNumInstancesResponse{}
+
+ if err := internal.Call(c, "modules", "GetNumInstances", req, res); err != nil {
+ return 0, err
+ }
+ return int(*res.Instances), nil
+}
+
+// SetNumInstances sets the number of instances of the given module.version to the
+// specified value. If either module or version are the empty string it means the
+// default.
+func SetNumInstances(c context.Context, module, version string, instances int) error {
+ req := &pb.SetNumInstancesRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ req.Instances = proto.Int64(int64(instances))
+ res := &pb.SetNumInstancesResponse{}
+ return internal.Call(c, "modules", "SetNumInstances", req, res)
+}
+
+// Versions returns the names of the versions that belong to the specified module.
+// If module is the empty string, it means the default module.
+func Versions(c context.Context, module string) ([]string, error) {
+ req := &pb.GetVersionsRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ res := &pb.GetVersionsResponse{}
+ err := internal.Call(c, "modules", "GetVersions", req, res)
+ return res.GetVersion(), err
+}
+
+// DefaultVersion returns the default version of the specified module.
+// If module is the empty string, it means the default module.
+func DefaultVersion(c context.Context, module string) (string, error) {
+ req := &pb.GetDefaultVersionRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ res := &pb.GetDefaultVersionResponse{}
+ err := internal.Call(c, "modules", "GetDefaultVersion", req, res)
+ return res.GetVersion(), err
+}
+
+// Start starts the specified version of the specified module.
+// If either module or version are the empty string, it means the default.
+func Start(c context.Context, module, version string) error {
+ req := &pb.StartModuleRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ res := &pb.StartModuleResponse{}
+ return internal.Call(c, "modules", "StartModule", req, res)
+}
+
+// Stop stops the specified version of the specified module.
+// If either module or version are the empty string, it means the default.
+func Stop(c context.Context, module, version string) error {
+ req := &pb.StopModuleRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ res := &pb.StopModuleResponse{}
+ return internal.Call(c, "modules", "StopModule", req, res)
+}
diff --git a/vendor/google.golang.org/appengine/module/module_test.go b/vendor/google.golang.org/appengine/module/module_test.go
new file mode 100644
index 000000000..73e8971dc
--- /dev/null
+++ b/vendor/google.golang.org/appengine/module/module_test.go
@@ -0,0 +1,124 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package module
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/modules"
+)
+
+const version = "test-version"
+const module = "test-module"
+const instances = 3
+
+func TestList(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "GetModules", func(req *pb.GetModulesRequest, res *pb.GetModulesResponse) error {
+ res.Module = []string{"default", "mod1"}
+ return nil
+ })
+ got, err := List(c)
+ if err != nil {
+ t.Fatalf("List: %v", err)
+ }
+ want := []string{"default", "mod1"}
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("List = %v, want %v", got, want)
+ }
+}
+
+func TestSetNumInstances(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "SetNumInstances", func(req *pb.SetNumInstancesRequest, res *pb.SetNumInstancesResponse) error {
+ if *req.Module != module {
+ t.Errorf("Module = %v, want %v", req.Module, module)
+ }
+ if *req.Version != version {
+ t.Errorf("Version = %v, want %v", req.Version, version)
+ }
+ if *req.Instances != instances {
+ t.Errorf("Instances = %v, want %d", req.Instances, instances)
+ }
+ return nil
+ })
+ err := SetNumInstances(c, module, version, instances)
+ if err != nil {
+ t.Fatalf("SetNumInstances: %v", err)
+ }
+}
+
+func TestVersions(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "GetVersions", func(req *pb.GetVersionsRequest, res *pb.GetVersionsResponse) error {
+ if *req.Module != module {
+ t.Errorf("Module = %v, want %v", req.Module, module)
+ }
+ res.Version = []string{"v1", "v2", "v3"}
+ return nil
+ })
+ got, err := Versions(c, module)
+ if err != nil {
+ t.Fatalf("Versions: %v", err)
+ }
+ want := []string{"v1", "v2", "v3"}
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("Versions = %v, want %v", got, want)
+ }
+}
+
+func TestDefaultVersion(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "GetDefaultVersion", func(req *pb.GetDefaultVersionRequest, res *pb.GetDefaultVersionResponse) error {
+ if *req.Module != module {
+ t.Errorf("Module = %v, want %v", req.Module, module)
+ }
+ res.Version = proto.String(version)
+ return nil
+ })
+ got, err := DefaultVersion(c, module)
+ if err != nil {
+ t.Fatalf("DefaultVersion: %v", err)
+ }
+ if got != version {
+ t.Errorf("Version = %v, want %v", got, version)
+ }
+}
+
+func TestStart(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "StartModule", func(req *pb.StartModuleRequest, res *pb.StartModuleResponse) error {
+ if *req.Module != module {
+ t.Errorf("Module = %v, want %v", req.Module, module)
+ }
+ if *req.Version != version {
+ t.Errorf("Version = %v, want %v", req.Version, version)
+ }
+ return nil
+ })
+
+ err := Start(c, module, version)
+ if err != nil {
+ t.Fatalf("Start: %v", err)
+ }
+}
+
+func TestStop(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "StopModule", func(req *pb.StopModuleRequest, res *pb.StopModuleResponse) error {
+ version := "test-version"
+ module := "test-module"
+ if *req.Module != module {
+ t.Errorf("Module = %v, want %v", req.Module, module)
+ }
+ if *req.Version != version {
+ t.Errorf("Version = %v, want %v", req.Version, version)
+ }
+ return nil
+ })
+
+ err := Stop(c, module, version)
+ if err != nil {
+ t.Fatalf("Stop: %v", err)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go
new file mode 100644
index 000000000..21860ca08
--- /dev/null
+++ b/vendor/google.golang.org/appengine/namespace.go
@@ -0,0 +1,25 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+ "fmt"
+ "regexp"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// Namespace returns a replacement context that operates within the given namespace.
+func Namespace(c context.Context, namespace string) (context.Context, error) {
+ if !validNamespace.MatchString(namespace) {
+ return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace)
+ }
+ return internal.NamespacedContext(c, namespace), nil
+}
+
+// validNamespace matches valid namespace names.
+var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`)
diff --git a/vendor/google.golang.org/appengine/namespace_test.go b/vendor/google.golang.org/appengine/namespace_test.go
new file mode 100644
index 000000000..847f640bd
--- /dev/null
+++ b/vendor/google.golang.org/appengine/namespace_test.go
@@ -0,0 +1,39 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+ "testing"
+
+ "golang.org/x/net/context"
+)
+
+func TestNamespaceValidity(t *testing.T) {
+ testCases := []struct {
+ namespace string
+ ok bool
+ }{
+ // data from Python's namespace_manager_test.py
+ {"", true},
+ {"__a.namespace.123__", true},
+ {"-_A....NAMESPACE-_", true},
+ {"-", true},
+ {".", true},
+ {".-", true},
+
+ {"?", false},
+ {"+", false},
+ {"!", false},
+ {" ", false},
+ }
+ for _, tc := range testCases {
+ _, err := Namespace(context.Background(), tc.namespace)
+ if err == nil && !tc.ok {
+ t.Errorf("Namespace %q should be rejected, but wasn't", tc.namespace)
+ } else if err != nil && tc.ok {
+ t.Errorf("Namespace %q should be accepted, but wasn't", tc.namespace)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/remote_api/client.go b/vendor/google.golang.org/appengine/remote_api/client.go
new file mode 100644
index 000000000..ce8aab562
--- /dev/null
+++ b/vendor/google.golang.org/appengine/remote_api/client.go
@@ -0,0 +1,194 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package remote_api
+
+// This file provides the client for connecting remotely to a user's production
+// application.
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "math/rand"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/remote_api"
+)
+
+// Client is a connection to the production APIs for an application.
+type Client struct {
+ hc *http.Client
+ url string
+ appID string
+}
+
+// NewClient returns a client for the given host. All communication will
+// be performed over SSL unless the host is localhost.
+func NewClient(host string, client *http.Client) (*Client, error) {
+ // Add an appcfg header to outgoing requests.
+ wrapClient := new(http.Client)
+ *wrapClient = *client
+ t := client.Transport
+ if t == nil {
+ t = http.DefaultTransport
+ }
+ wrapClient.Transport = &headerAddingRoundTripper{t}
+
+ url := url.URL{
+ Scheme: "https",
+ Host: host,
+ Path: "/_ah/remote_api",
+ }
+ if host == "localhost" || strings.HasPrefix(host, "localhost:") {
+ url.Scheme = "http"
+ }
+ u := url.String()
+ appID, err := getAppID(wrapClient, u)
+ if err != nil {
+ return nil, fmt.Errorf("unable to contact server: %v", err)
+ }
+ return &Client{
+ hc: wrapClient,
+ url: u,
+ appID: appID,
+ }, nil
+}
+
+// NewContext returns a copy of parent that will cause App Engine API
+// calls to be sent to the client's remote host.
+func (c *Client) NewContext(parent context.Context) context.Context {
+ ctx := internal.WithCallOverride(parent, c.call)
+ ctx = internal.WithLogOverride(ctx, c.logf)
+ ctx = internal.WithAppIDOverride(ctx, c.appID)
+ return ctx
+}
+
+// NewRemoteContext returns a context that gives access to the production
+// APIs for the application at the given host. All communication will be
+// performed over SSL unless the host is localhost.
+func NewRemoteContext(host string, client *http.Client) (context.Context, error) {
+ c, err := NewClient(host, client)
+ if err != nil {
+ return nil, err
+ }
+ return c.NewContext(context.Background()), nil
+}
+
+var logLevels = map[int64]string{
+ 0: "DEBUG",
+ 1: "INFO",
+ 2: "WARNING",
+ 3: "ERROR",
+ 4: "CRITICAL",
+}
+
+func (c *Client) logf(level int64, format string, args ...interface{}) {
+ log.Printf(logLevels[level]+": "+format, args...)
+}
+
+func (c *Client) call(ctx context.Context, service, method string, in, out proto.Message) error {
+ req, err := proto.Marshal(in)
+ if err != nil {
+ return fmt.Errorf("error marshalling request: %v", err)
+ }
+
+ remReq := &pb.Request{
+ ServiceName: proto.String(service),
+ Method: proto.String(method),
+ Request: req,
+ // NOTE(djd): RequestId is unused in the server.
+ }
+
+ req, err = proto.Marshal(remReq)
+ if err != nil {
+ return fmt.Errorf("proto.Marshal: %v", err)
+ }
+
+ // TODO(djd): Respect ctx.Deadline()?
+ resp, err := c.hc.Post(c.url, "application/octet-stream", bytes.NewReader(req))
+ if err != nil {
+ return fmt.Errorf("error sending request: %v", err)
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("bad response %d; body: %q", resp.StatusCode, body)
+ }
+ if err != nil {
+ return fmt.Errorf("failed reading response: %v", err)
+ }
+ remResp := &pb.Response{}
+ if err := proto.Unmarshal(body, remResp); err != nil {
+ return fmt.Errorf("error unmarshalling response: %v", err)
+ }
+
+ if ae := remResp.GetApplicationError(); ae != nil {
+ return &internal.APIError{
+ Code: ae.GetCode(),
+ Detail: ae.GetDetail(),
+ Service: service,
+ }
+ }
+
+ if remResp.Response == nil {
+ return fmt.Errorf("unexpected response: %s", proto.MarshalTextString(remResp))
+ }
+
+ return proto.Unmarshal(remResp.Response, out)
+}
+
+// This is a forgiving regexp designed to parse the app ID from YAML.
+var appIDRE = regexp.MustCompile(`app_id["']?\s*:\s*['"]?([-a-z0-9.:~]+)`)
+
+func getAppID(client *http.Client, url string) (string, error) {
+ // Generate a pseudo-random token for handshaking.
+ token := strconv.Itoa(rand.New(rand.NewSource(time.Now().UnixNano())).Int())
+
+ resp, err := client.Get(fmt.Sprintf("%s?rtok=%s", url, token))
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if resp.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("bad response %d; body: %q", resp.StatusCode, body)
+ }
+ if err != nil {
+ return "", fmt.Errorf("failed reading response: %v", err)
+ }
+
+ // Check the token is present in response.
+ if !bytes.Contains(body, []byte(token)) {
+ return "", fmt.Errorf("token not found: want %q; body %q", token, body)
+ }
+
+ match := appIDRE.FindSubmatch(body)
+ if match == nil {
+ return "", fmt.Errorf("app ID not found: body %q", body)
+ }
+
+ return string(match[1]), nil
+}
+
+type headerAddingRoundTripper struct {
+ Wrapped http.RoundTripper
+}
+
+func (t *headerAddingRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
+ r.Header.Set("X-Appcfg-Api-Version", "1")
+ return t.Wrapped.RoundTrip(r)
+}
diff --git a/vendor/google.golang.org/appengine/remote_api/client_test.go b/vendor/google.golang.org/appengine/remote_api/client_test.go
new file mode 100644
index 000000000..7f4bdcf3c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/remote_api/client_test.go
@@ -0,0 +1,43 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package remote_api
+
+import (
+ "log"
+ "net/http"
+ "testing"
+
+ "golang.org/x/net/context"
+ "google.golang.org/appengine/datastore"
+)
+
+func TestAppIDRE(t *testing.T) {
+ appID := "s~my-appid-539"
+ tests := []string{
+ "{rtok: 8306111115908860449, app_id: s~my-appid-539}\n",
+ "{rtok: 8306111115908860449, app_id: 's~my-appid-539'}\n",
+ `{rtok: 8306111115908860449, app_id: "s~my-appid-539"}`,
+ `{rtok: 8306111115908860449, "app_id":"s~my-appid-539"}`,
+ }
+ for _, v := range tests {
+ if g := appIDRE.FindStringSubmatch(v); g == nil || g[1] != appID {
+ t.Errorf("appIDRE.FindStringSubmatch(%s) got %q, want %q", v, g, appID)
+ }
+ }
+}
+
+func ExampleClient() {
+ c, err := NewClient("example.appspot.com", http.DefaultClient)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ ctx := context.Background() // or from a request
+ ctx = c.NewContext(ctx)
+ _, err = datastore.Put(ctx, datastore.NewIncompleteKey(ctx, "Foo", nil), struct{ Bar int }{42})
+ if err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/remote_api/remote_api.go b/vendor/google.golang.org/appengine/remote_api/remote_api.go
new file mode 100644
index 000000000..3d2880d64
--- /dev/null
+++ b/vendor/google.golang.org/appengine/remote_api/remote_api.go
@@ -0,0 +1,152 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package remote_api implements the /_ah/remote_api endpoint.
+This endpoint is used by offline tools such as the bulk loader.
+*/
+package remote_api // import "google.golang.org/appengine/remote_api"
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strconv"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/remote_api"
+ "google.golang.org/appengine/log"
+ "google.golang.org/appengine/user"
+)
+
+func init() {
+ http.HandleFunc("/_ah/remote_api", handle)
+}
+
+func handle(w http.ResponseWriter, req *http.Request) {
+ c := appengine.NewContext(req)
+
+ u := user.Current(c)
+ if u == nil {
+ u, _ = user.CurrentOAuth(c,
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/appengine.apis",
+ )
+ }
+
+ if !appengine.IsDevAppServer() && (u == nil || !u.Admin) {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.WriteHeader(http.StatusUnauthorized)
+ io.WriteString(w, "You must be logged in as an administrator to access this.\n")
+ return
+ }
+ if req.Header.Get("X-Appcfg-Api-Version") == "" {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.WriteHeader(http.StatusForbidden)
+ io.WriteString(w, "This request did not contain a necessary header.\n")
+ return
+ }
+
+ if req.Method != "POST" {
+ // Response must be YAML.
+ rtok := req.FormValue("rtok")
+ if rtok == "" {
+ rtok = "0"
+ }
+ w.Header().Set("Content-Type", "text/yaml; charset=utf-8")
+ fmt.Fprintf(w, `{app_id: %q, rtok: %q}`, internal.FullyQualifiedAppID(c), rtok)
+ return
+ }
+
+ defer req.Body.Close()
+ body, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ w.WriteHeader(http.StatusBadRequest)
+ log.Errorf(c, "Failed reading body: %v", err)
+ return
+ }
+ remReq := &pb.Request{}
+ if err := proto.Unmarshal(body, remReq); err != nil {
+ w.WriteHeader(http.StatusBadRequest)
+ log.Errorf(c, "Bad body: %v", err)
+ return
+ }
+
+ service, method := *remReq.ServiceName, *remReq.Method
+ if !requestSupported(service, method) {
+ w.WriteHeader(http.StatusBadRequest)
+ log.Errorf(c, "Unsupported RPC /%s.%s", service, method)
+ return
+ }
+
+ rawReq := &rawMessage{remReq.Request}
+ rawRes := &rawMessage{}
+ err = internal.Call(c, service, method, rawReq, rawRes)
+
+ remRes := &pb.Response{}
+ if err == nil {
+ remRes.Response = rawRes.buf
+ } else if ae, ok := err.(*internal.APIError); ok {
+ remRes.ApplicationError = &pb.ApplicationError{
+ Code: &ae.Code,
+ Detail: &ae.Detail,
+ }
+ } else {
+ // This shouldn't normally happen.
+ log.Errorf(c, "appengine/remote_api: Unexpected error of type %T: %v", err, err)
+ remRes.ApplicationError = &pb.ApplicationError{
+ Code: proto.Int32(0),
+ Detail: proto.String(err.Error()),
+ }
+ }
+ out, err := proto.Marshal(remRes)
+ if err != nil {
+ // This should not be possible.
+ w.WriteHeader(500)
+ log.Errorf(c, "proto.Marshal: %v", err)
+ return
+ }
+
+ log.Infof(c, "Spooling %d bytes of response to /%s.%s", len(out), service, method)
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Header().Set("Content-Length", strconv.Itoa(len(out)))
+ w.Write(out)
+}
+
+// rawMessage is a protocol buffer type that is already serialised.
+// This allows the remote_api code here to handle messages
+// without having to know the real type.
+type rawMessage struct {
+ buf []byte
+}
+
+func (rm *rawMessage) Marshal() ([]byte, error) {
+ return rm.buf, nil
+}
+
+func (rm *rawMessage) Unmarshal(buf []byte) error {
+ rm.buf = make([]byte, len(buf))
+ copy(rm.buf, buf)
+ return nil
+}
+
+func requestSupported(service, method string) bool {
+ // This list of supported services is taken from SERVICE_PB_MAP in remote_api_services.py
+ switch service {
+ case "app_identity_service", "blobstore", "capability_service", "channel", "datastore_v3",
+ "datastore_v4", "file", "images", "logservice", "mail", "matcher", "memcache", "remote_datastore",
+ "remote_socket", "search", "modules", "system", "taskqueue", "urlfetch", "user", "xmpp":
+ return true
+ }
+ return false
+}
+
+// Methods to satisfy proto.Message.
+func (rm *rawMessage) Reset() { rm.buf = nil }
+func (rm *rawMessage) String() string { return strconv.Quote(string(rm.buf)) }
+func (*rawMessage) ProtoMessage() {}
diff --git a/vendor/google.golang.org/appengine/runtime/runtime.go b/vendor/google.golang.org/appengine/runtime/runtime.go
new file mode 100644
index 000000000..fa6c12b79
--- /dev/null
+++ b/vendor/google.golang.org/appengine/runtime/runtime.go
@@ -0,0 +1,148 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package runtime exposes information about the resource usage of the application.
+It also provides a way to run code in a new background context of a module.
+
+This package does not work on App Engine "flexible environment".
+*/
+package runtime // import "google.golang.org/appengine/runtime"
+
+import (
+ "net/http"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/system"
+)
+
+// Statistics represents the system's statistics.
+type Statistics struct {
+ // CPU records the CPU consumed by this instance, in megacycles.
+ CPU struct {
+ Total float64
+ Rate1M float64 // consumption rate over one minute
+ Rate10M float64 // consumption rate over ten minutes
+ }
+ // RAM records the memory used by the instance, in megabytes.
+ RAM struct {
+ Current float64
+ Average1M float64 // average usage over one minute
+ Average10M float64 // average usage over ten minutes
+ }
+}
+
+func Stats(c context.Context) (*Statistics, error) {
+ req := &pb.GetSystemStatsRequest{}
+ res := &pb.GetSystemStatsResponse{}
+ if err := internal.Call(c, "system", "GetSystemStats", req, res); err != nil {
+ return nil, err
+ }
+ s := &Statistics{}
+ if res.Cpu != nil {
+ s.CPU.Total = res.Cpu.GetTotal()
+ s.CPU.Rate1M = res.Cpu.GetRate1M()
+ s.CPU.Rate10M = res.Cpu.GetRate10M()
+ }
+ if res.Memory != nil {
+ s.RAM.Current = res.Memory.GetCurrent()
+ s.RAM.Average1M = res.Memory.GetAverage1M()
+ s.RAM.Average10M = res.Memory.GetAverage10M()
+ }
+ return s, nil
+}
+
+/*
+RunInBackground makes an API call that triggers an /_ah/background request.
+
+There are two independent code paths that need to make contact:
+the RunInBackground code, and the /_ah/background handler. The matchmaker
+loop arranges for the two paths to meet. The RunInBackground code passes
+a send to the matchmaker, the /_ah/background passes a recv to the matchmaker,
+and the matchmaker hooks them up.
+*/
+
+func init() {
+ http.HandleFunc("/_ah/background", handleBackground)
+
+ sc := make(chan send)
+ rc := make(chan recv)
+ sendc, recvc = sc, rc
+ go matchmaker(sc, rc)
+}
+
+var (
+ sendc chan<- send // RunInBackground sends to this
+ recvc chan<- recv // handleBackground sends to this
+)
+
+type send struct {
+ id string
+ f func(context.Context)
+}
+
+type recv struct {
+ id string
+ ch chan<- func(context.Context)
+}
+
+func matchmaker(sendc <-chan send, recvc <-chan recv) {
+ // When one side of the match arrives before the other
+ // it is inserted in the corresponding map.
+ waitSend := make(map[string]send)
+ waitRecv := make(map[string]recv)
+
+ for {
+ select {
+ case s := <-sendc:
+ if r, ok := waitRecv[s.id]; ok {
+ // meet!
+ delete(waitRecv, s.id)
+ r.ch <- s.f
+ } else {
+ // waiting for r
+ waitSend[s.id] = s
+ }
+ case r := <-recvc:
+ if s, ok := waitSend[r.id]; ok {
+ // meet!
+ delete(waitSend, r.id)
+ r.ch <- s.f
+ } else {
+ // waiting for s
+ waitRecv[r.id] = r
+ }
+ }
+ }
+}
+
+var newContext = appengine.NewContext // for testing
+
+func handleBackground(w http.ResponseWriter, req *http.Request) {
+ id := req.Header.Get("X-AppEngine-BackgroundRequest")
+
+ ch := make(chan func(context.Context))
+ recvc <- recv{id, ch}
+ (<-ch)(newContext(req))
+}
+
+// RunInBackground runs f in a background goroutine in this process.
+// f is provided a context that may outlast the context provided to RunInBackground.
+// This is only valid to invoke from a service set to basic or manual scaling.
+func RunInBackground(c context.Context, f func(c context.Context)) error {
+ req := &pb.StartBackgroundRequestRequest{}
+ res := &pb.StartBackgroundRequestResponse{}
+ if err := internal.Call(c, "system", "StartBackgroundRequest", req, res); err != nil {
+ return err
+ }
+ sendc <- send{res.GetRequestId(), f}
+ return nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("system", pb.SystemServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/runtime/runtime_test.go b/vendor/google.golang.org/appengine/runtime/runtime_test.go
new file mode 100644
index 000000000..8f3a124d2
--- /dev/null
+++ b/vendor/google.golang.org/appengine/runtime/runtime_test.go
@@ -0,0 +1,101 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/system"
+)
+
+func TestRunInBackgroundSendFirst(t *testing.T) { testRunInBackground(t, true) }
+func TestRunInBackgroundRecvFirst(t *testing.T) { testRunInBackground(t, false) }
+
+func testRunInBackground(t *testing.T, sendFirst bool) {
+ srv := httptest.NewServer(nil)
+ defer srv.Close()
+
+ const id = "f00bar"
+ sendWait, recvWait := make(chan bool), make(chan bool)
+ sbr := make(chan bool) // strobed when system.StartBackgroundRequest has started
+
+ calls := 0
+ c := aetesting.FakeSingleContext(t, "system", "StartBackgroundRequest", func(req *pb.StartBackgroundRequestRequest, res *pb.StartBackgroundRequestResponse) error {
+ calls++
+ if calls > 1 {
+ t.Errorf("Too many calls to system.StartBackgroundRequest")
+ }
+ sbr <- true
+ res.RequestId = proto.String(id)
+ <-sendWait
+ return nil
+ })
+
+ var c2 context.Context // a fake
+ newContext = func(*http.Request) context.Context {
+ return c2
+ }
+
+ var fRun int
+ f := func(c3 context.Context) {
+ fRun++
+ if c3 != c2 {
+ t.Errorf("f got a different context than expected")
+ }
+ }
+
+ ribErrc := make(chan error)
+ go func() {
+ ribErrc <- RunInBackground(c, f)
+ }()
+
+ brErrc := make(chan error)
+ go func() {
+ <-sbr
+ req, err := http.NewRequest("GET", srv.URL+"/_ah/background", nil)
+ if err != nil {
+ brErrc <- fmt.Errorf("http.NewRequest: %v", err)
+ return
+ }
+ req.Header.Set("X-AppEngine-BackgroundRequest", id)
+ client := &http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ },
+ }
+
+ <-recvWait
+ _, err = client.Do(req)
+ brErrc <- err
+ }()
+
+ // Send and receive are both waiting at this point.
+ waits := [2]chan bool{sendWait, recvWait}
+ if !sendFirst {
+ waits[0], waits[1] = waits[1], waits[0]
+ }
+ waits[0] <- true
+ time.Sleep(100 * time.Millisecond)
+ waits[1] <- true
+
+ if err := <-ribErrc; err != nil {
+ t.Fatalf("RunInBackground: %v", err)
+ }
+ if err := <-brErrc; err != nil {
+ t.Fatalf("background request: %v", err)
+ }
+
+ if fRun != 1 {
+ t.Errorf("Got %d runs of f, want 1", fRun)
+ }
+}
diff --git a/vendor/google.golang.org/appengine/search/doc.go b/vendor/google.golang.org/appengine/search/doc.go
new file mode 100644
index 000000000..5208f18f6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/doc.go
@@ -0,0 +1,209 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package search provides a client for App Engine's search service.
+
+
+Basic Operations
+
+Indexes contain documents. Each index is identified by its name: a
+human-readable ASCII string.
+
+Within an index, documents are associated with an ID, which is also
+a human-readable ASCII string. A document's contents are a mapping from
+case-sensitive field names to values. Valid types for field values are:
+ - string,
+ - search.Atom,
+ - search.HTML,
+ - time.Time (stored with millisecond precision),
+ - float64 (value between -2,147,483,647 and 2,147,483,647 inclusive),
+ - appengine.GeoPoint.
+
+The Get and Put methods on an Index load and save a document.
+A document's contents are typically represented by a struct pointer.
+
+Example code:
+
+ type Doc struct {
+ Author string
+ Comment string
+ Creation time.Time
+ }
+
+ index, err := search.Open("comments")
+ if err != nil {
+ return err
+ }
+ newID, err := index.Put(ctx, "", &Doc{
+ Author: "gopher",
+ Comment: "the truth of the matter",
+ Creation: time.Now(),
+ })
+ if err != nil {
+ return err
+ }
+
+A single document can be retrieved by its ID. Pass a destination struct
+to Get to hold the resulting document.
+
+ var doc Doc
+ err := index.Get(ctx, id, &doc)
+ if err != nil {
+ return err
+ }
+
+
+Search and Listing Documents
+
+Indexes have two methods for retrieving multiple documents at once: Search and
+List.
+
+Searching an index for a query will result in an iterator. As with an iterator
+from package datastore, pass a destination struct to Next to decode the next
+result. Next will return Done when the iterator is exhausted.
+
+ for t := index.Search(ctx, "Comment:truth", nil); ; {
+ var doc Doc
+ id, err := t.Next(&doc)
+ if err == search.Done {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(w, "%s -> %#v\n", id, doc)
+ }
+
+Search takes a string query to determine which documents to return. The query
+can be simple, such as a single word to match, or complex. The query
+language is described at
+https://cloud.google.com/appengine/docs/standard/go/search/query_strings
+
+Search also takes an optional SearchOptions struct which gives much more
+control over how results are calculated and returned.
+
+Call List to iterate over all documents in an index.
+
+ for t := index.List(ctx, nil); ; {
+ var doc Doc
+ id, err := t.Next(&doc)
+ if err == search.Done {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(w, "%s -> %#v\n", id, doc)
+ }
+
+
+Fields and Facets
+
+A document's contents can be represented by a variety of types. These are
+typically struct pointers, but they can also be represented by any type
+implementing the FieldLoadSaver interface. The FieldLoadSaver allows metadata
+to be set for the document with the DocumentMetadata type. Struct pointers are
+more strongly typed and are easier to use; FieldLoadSavers are more flexible.
+
+A document's contents can be expressed in two ways: fields and facets.
+
+Fields are the most common way of providing content for documents. Fields can
+store data in multiple types and can be matched in searches using query
+strings.
+
+Facets provide a way to attach categorical information to a document. The only
+valid types for facets are search.Atom and float64. Facets allow search
+results to contain summaries of the categories matched in a search, and to
+restrict searches to only match against specific categories.
+
+By default, for struct pointers, all of the struct fields are used as document
+fields, and the field name used is the same as on the struct (and hence must
+start with an upper case letter). Struct fields may have a
+`search:"name,options"` tag. The name must start with a letter and be
+composed only of word characters. A "-" tag name means that the field will be
+ignored. If options is "facet" then the struct field will be used as a
+document facet. If options is "" then the comma may be omitted. There are no
+other recognized options.
+
+Example code:
+
+ // A and B are renamed to a and b.
+ // A, C and I are facets.
+ // D's tag is equivalent to having no tag at all (E).
+ // F and G are ignored entirely by the search package.
+ // I has tag information for both the search and json packages.
+ type TaggedStruct struct {
+ A float64 `search:"a,facet"`
+ B float64 `search:"b"`
+ C float64 `search:",facet"`
+ D float64 `search:""`
+ E float64
+ F float64 `search:"-"`
+ G float64 `search:"-,facet"`
+ I float64 `search:",facet" json:"i"`
+ }
+
+
+The FieldLoadSaver Interface
+
+A document's contents can also be represented by any type that implements the
+FieldLoadSaver interface. This type may be a struct pointer, but it
+does not have to be. The search package will call Load when loading the
+document's contents, and Save when saving them. In addition to a slice of
+Fields, the Load and Save methods also use the DocumentMetadata type to
+provide additional information about a document (such as its Rank, or set of
+Facets). Possible uses for this interface include deriving non-stored fields,
+verifying fields or setting specific languages for string and HTML fields.
+
+Example code:
+
+ type CustomFieldsExample struct {
+ // Item's title and which language it is in.
+ Title string
+ Lang string
+ // Mass, in grams.
+ Mass int
+ }
+
+ func (x *CustomFieldsExample) Load(fields []search.Field, meta *search.DocumentMetadata) error {
+ // Load the title field, failing if any other field is found.
+ for _, f := range fields {
+ if f.Name != "title" {
+ return fmt.Errorf("unknown field %q", f.Name)
+ }
+ s, ok := f.Value.(string)
+ if !ok {
+ return fmt.Errorf("unsupported type %T for field %q", f.Value, f.Name)
+ }
+ x.Title = s
+ x.Lang = f.Language
+ }
+ // Load the mass facet, failing if any other facet is found.
+ for _, f := range meta.Facets {
+ if f.Name != "mass" {
+ return fmt.Errorf("unknown facet %q", f.Name)
+ }
+ m, ok := f.Value.(float64)
+ if !ok {
+ return fmt.Errorf("unsupported type %T for facet %q", f.Value, f.Name)
+ }
+ x.Mass = int(m)
+ }
+ return nil
+ }
+
+ func (x *CustomFieldsExample) Save() ([]search.Field, *search.DocumentMetadata, error) {
+ fields := []search.Field{
+ {Name: "title", Value: x.Title, Language: x.Lang},
+ }
+ meta := &search.DocumentMetadata{
+ Facets: {
+ {Name: "mass", Value: float64(x.Mass)},
+ },
+ }
+ return fields, meta, nil
+ }
+*/
+package search
diff --git a/vendor/google.golang.org/appengine/search/field.go b/vendor/google.golang.org/appengine/search/field.go
new file mode 100644
index 000000000..707c2d8c0
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/field.go
@@ -0,0 +1,82 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package search
+
+// Field is a name/value pair. A search index's document can be loaded and
+// saved as a sequence of Fields.
+type Field struct {
+ // Name is the field name. A valid field name matches /[A-Za-z][A-Za-z0-9_]*/.
+ Name string
+ // Value is the field value. The valid types are:
+ // - string,
+ // - search.Atom,
+ // - search.HTML,
+ // - time.Time (stored with millisecond precision),
+ // - float64,
+ // - GeoPoint.
+ Value interface{}
+ // Language is a two-letter ISO 639-1 code for the field's language,
+ // defaulting to "en" if nothing is specified. It may only be specified for
+ // fields of type string and search.HTML.
+ Language string
+ // Derived marks fields that were calculated as a result of a
+ // FieldExpression provided to Search. This field is ignored when saving a
+ // document.
+ Derived bool
+}
+
+// Facet is a name/value pair which is used to add categorical information to a
+// document.
+type Facet struct {
+ // Name is the facet name. A valid facet name matches /[A-Za-z][A-Za-z0-9_]*/.
+ // A facet name cannot be longer than 500 characters.
+ Name string
+ // Value is the facet value.
+ //
+ // When being used in documents (for example, in
+ // DocumentMetadata.Facets), the valid types are:
+ // - search.Atom,
+ // - float64.
+ //
+ // When being used in SearchOptions.Refinements or being returned
+ // in FacetResult, the valid types are:
+ // - search.Atom,
+ // - search.Range.
+ Value interface{}
+}
+
+// DocumentMetadata is a struct containing information describing a given document.
+type DocumentMetadata struct {
+ // Rank is an integer specifying the order the document will be returned in
+ // search results. If zero, the rank will be set to the number of seconds since
+ // 2011-01-01 00:00:00 UTC when being Put into an index.
+ Rank int
+ // Facets is the set of facets for this document.
+ Facets []Facet
+}
+
+// FieldLoadSaver can be converted from and to a slice of Fields
+// with additional document metadata.
+type FieldLoadSaver interface {
+ Load([]Field, *DocumentMetadata) error
+ Save() ([]Field, *DocumentMetadata, error)
+}
+
+// FieldList converts a []Field to implement FieldLoadSaver.
+type FieldList []Field
+
+// Load loads all of the provided fields into l.
+// It does not first reset *l to an empty slice.
+func (l *FieldList) Load(f []Field, _ *DocumentMetadata) error {
+ *l = append(*l, f...)
+ return nil
+}
+
+// Save returns all of l's fields as a slice of Fields.
+func (l *FieldList) Save() ([]Field, *DocumentMetadata, error) {
+ return *l, nil, nil
+}
+
+var _ FieldLoadSaver = (*FieldList)(nil)
diff --git a/vendor/google.golang.org/appengine/search/search.go b/vendor/google.golang.org/appengine/search/search.go
new file mode 100644
index 000000000..35a567d62
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/search.go
@@ -0,0 +1,1189 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package search // import "google.golang.org/appengine/search"
+
+// TODO: let Put specify the document language: "en", "fr", etc. Also: order_id?? storage??
+// TODO: Index.GetAll (or Iterator.GetAll)?
+// TODO: struct <-> protobuf tests.
+// TODO: enforce Python's MIN_NUMBER_VALUE and MIN_DATE (which would disallow a zero
+// time.Time)? _MAXIMUM_STRING_LENGTH?
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/search"
+)
+
+const maxDocumentsPerPutDelete = 200
+
+var (
+ // ErrInvalidDocumentType is returned when methods like Put, Get or Next
+ // are passed a dst or src argument of invalid type.
+ ErrInvalidDocumentType = errors.New("search: invalid document type")
+
+ // ErrNoSuchDocument is returned when no document was found for a given ID.
+ ErrNoSuchDocument = errors.New("search: no such document")
+
+ // ErrTooManyDocuments is returned when the user passes too many documents to
+ // PutMulti or DeleteMulti.
+ ErrTooManyDocuments = fmt.Errorf("search: too many documents given to put or delete (max is %d)", maxDocumentsPerPutDelete)
+)
+
+// Atom is a document field whose contents are indexed as a single indivisible
+// string.
+type Atom string
+
+// HTML is a document field whose contents are indexed as HTML. Only text nodes
+// are indexed: "foo<b>bar" will be treated as "foobar".
+type HTML string
+
+// validIndexNameOrDocID is the Go equivalent of Python's
+// _ValidateVisiblePrintableAsciiNotReserved.
+func validIndexNameOrDocID(s string) bool {
+ if strings.HasPrefix(s, "!") {
+ return false
+ }
+ for _, c := range s {
+ if c < 0x21 || 0x7f <= c {
+ return false
+ }
+ }
+ return true
+}
+
+var (
+ fieldNameRE = regexp.MustCompile(`^[A-Za-z][A-Za-z0-9_]*$`)
+ languageRE = regexp.MustCompile(`^[a-z]{2}$`)
+)
+
+// validFieldName is the Go equivalent of Python's _CheckFieldName. It checks
+// the validity of both field and facet names.
+func validFieldName(s string) bool {
+ return len(s) <= 500 && fieldNameRE.MatchString(s)
+}
+
+// validDocRank checks that the ranks is in the range [0, 2^31).
+func validDocRank(r int) bool {
+ return 0 <= r && r <= (1<<31-1)
+}
+
+// validLanguage checks that a language looks like ISO 639-1.
+func validLanguage(s string) bool {
+ return languageRE.MatchString(s)
+}
+
+// validFloat checks that f is in the range [-2147483647, 2147483647].
+func validFloat(f float64) bool {
+ return -(1<<31-1) <= f && f <= (1<<31-1)
+}
+
+// Index is an index of documents.
+type Index struct {
+ spec pb.IndexSpec
+}
+
+// orderIDEpoch forms the basis for populating OrderId on documents.
+var orderIDEpoch = time.Date(2011, 1, 1, 0, 0, 0, 0, time.UTC)
+
+// Open opens the index with the given name. The index is created if it does
+// not already exist.
+//
+// The name is a human-readable ASCII string. It must contain no whitespace
+// characters and not start with "!".
+func Open(name string) (*Index, error) {
+ if !validIndexNameOrDocID(name) {
+ return nil, fmt.Errorf("search: invalid index name %q", name)
+ }
+ return &Index{
+ spec: pb.IndexSpec{
+ Name: &name,
+ },
+ }, nil
+}
+
+// Put saves src to the index. If id is empty, a new ID is allocated by the
+// service and returned. If id is not empty, any existing index entry for that
+// ID is replaced.
+//
+// The ID is a human-readable ASCII string. It must contain no whitespace
+// characters and not start with "!".
+//
+// src must be a non-nil struct pointer or implement the FieldLoadSaver
+// interface.
+func (x *Index) Put(c context.Context, id string, src interface{}) (string, error) {
+ ids, err := x.PutMulti(c, []string{id}, []interface{}{src})
+ if err != nil {
+ return "", err
+ }
+ return ids[0], nil
+}
+
+// PutMulti is like Put, but is more efficient for adding multiple documents to
+// the index at once.
+//
+// Up to 200 documents can be added at once. ErrTooManyDocuments is returned if
+// you try to add more.
+//
+// ids can either be an empty slice (which means new IDs will be allocated for
+// each of the documents added) or a slice the same size as srcs.
+//
+// The error may be an instance of appengine.MultiError, in which case it will
+// be the same size as srcs and the individual errors inside will correspond
+// with the items in srcs.
+func (x *Index) PutMulti(c context.Context, ids []string, srcs []interface{}) ([]string, error) {
+ if len(ids) != 0 && len(srcs) != len(ids) {
+ return nil, fmt.Errorf("search: PutMulti expects ids and srcs slices of the same length")
+ }
+ if len(srcs) > maxDocumentsPerPutDelete {
+ return nil, ErrTooManyDocuments
+ }
+
+ docs := make([]*pb.Document, len(srcs))
+ for i, s := range srcs {
+ var err error
+ docs[i], err = saveDoc(s)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(ids) != 0 && ids[i] != "" {
+ if !validIndexNameOrDocID(ids[i]) {
+ return nil, fmt.Errorf("search: invalid ID %q", ids[i])
+ }
+ docs[i].Id = proto.String(ids[i])
+ }
+ }
+
+ // spec is modified by Call when applying the current Namespace, so copy it to
+ // avoid retaining the namespace beyond the scope of the Call.
+ spec := x.spec
+ req := &pb.IndexDocumentRequest{
+ Params: &pb.IndexDocumentParams{
+ Document: docs,
+ IndexSpec: &spec,
+ },
+ }
+ res := &pb.IndexDocumentResponse{}
+ if err := internal.Call(c, "search", "IndexDocument", req, res); err != nil {
+ return nil, err
+ }
+ multiErr, hasErr := make(appengine.MultiError, len(res.Status)), false
+ for i, s := range res.Status {
+ if s.GetCode() != pb.SearchServiceError_OK {
+ multiErr[i] = fmt.Errorf("search: %s: %s", s.GetCode(), s.GetErrorDetail())
+ hasErr = true
+ }
+ }
+ if hasErr {
+ return res.DocId, multiErr
+ }
+
+ if len(res.Status) != len(docs) || len(res.DocId) != len(docs) {
+ return nil, fmt.Errorf("search: internal error: wrong number of results (%d Statuses, %d DocIDs, expected %d)",
+ len(res.Status), len(res.DocId), len(docs))
+ }
+ return res.DocId, nil
+}
+
+// Get loads the document with the given ID into dst.
+//
+// The ID is a human-readable ASCII string. It must be non-empty, contain no
+// whitespace characters and not start with "!".
+//
+// dst must be a non-nil struct pointer or implement the FieldLoadSaver
+// interface.
+//
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct. ErrFieldMismatch is only returned if
+// dst is a struct pointer. It is up to the callee to decide whether this error
+// is fatal, recoverable, or ignorable.
+func (x *Index) Get(c context.Context, id string, dst interface{}) error {
+ if id == "" || !validIndexNameOrDocID(id) {
+ return fmt.Errorf("search: invalid ID %q", id)
+ }
+ req := &pb.ListDocumentsRequest{
+ Params: &pb.ListDocumentsParams{
+ IndexSpec: &x.spec,
+ StartDocId: proto.String(id),
+ Limit: proto.Int32(1),
+ },
+ }
+ res := &pb.ListDocumentsResponse{}
+ if err := internal.Call(c, "search", "ListDocuments", req, res); err != nil {
+ return err
+ }
+ if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK {
+ return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail())
+ }
+ if len(res.Document) != 1 || res.Document[0].GetId() != id {
+ return ErrNoSuchDocument
+ }
+ return loadDoc(dst, res.Document[0], nil)
+}
+
+// Delete deletes a document from the index.
+func (x *Index) Delete(c context.Context, id string) error {
+ return x.DeleteMulti(c, []string{id})
+}
+
+// DeleteMulti deletes multiple documents from the index.
+//
+// The returned error may be an instance of appengine.MultiError, in which case
+// it will be the same size as srcs and the individual errors inside will
+// correspond with the items in srcs.
+func (x *Index) DeleteMulti(c context.Context, ids []string) error {
+ if len(ids) > maxDocumentsPerPutDelete {
+ return ErrTooManyDocuments
+ }
+
+ req := &pb.DeleteDocumentRequest{
+ Params: &pb.DeleteDocumentParams{
+ DocId: ids,
+ IndexSpec: &x.spec,
+ },
+ }
+ res := &pb.DeleteDocumentResponse{}
+ if err := internal.Call(c, "search", "DeleteDocument", req, res); err != nil {
+ return err
+ }
+ if len(res.Status) != len(ids) {
+ return fmt.Errorf("search: internal error: wrong number of results (%d, expected %d)",
+ len(res.Status), len(ids))
+ }
+ multiErr, hasErr := make(appengine.MultiError, len(ids)), false
+ for i, s := range res.Status {
+ if s.GetCode() != pb.SearchServiceError_OK {
+ multiErr[i] = fmt.Errorf("search: %s: %s", s.GetCode(), s.GetErrorDetail())
+ hasErr = true
+ }
+ }
+ if hasErr {
+ return multiErr
+ }
+ return nil
+}
+
+// List lists all of the documents in an index. The documents are returned in
+// increasing ID order.
+func (x *Index) List(c context.Context, opts *ListOptions) *Iterator {
+ t := &Iterator{
+ c: c,
+ index: x,
+ count: -1,
+ listInclusive: true,
+ more: moreList,
+ }
+ if opts != nil {
+ t.listStartID = opts.StartID
+ t.limit = opts.Limit
+ t.idsOnly = opts.IDsOnly
+ }
+ return t
+}
+
+func moreList(t *Iterator) error {
+ req := &pb.ListDocumentsRequest{
+ Params: &pb.ListDocumentsParams{
+ IndexSpec: &t.index.spec,
+ },
+ }
+ if t.listStartID != "" {
+ req.Params.StartDocId = &t.listStartID
+ req.Params.IncludeStartDoc = &t.listInclusive
+ }
+ if t.limit > 0 {
+ req.Params.Limit = proto.Int32(int32(t.limit))
+ }
+ if t.idsOnly {
+ req.Params.KeysOnly = &t.idsOnly
+ }
+
+ res := &pb.ListDocumentsResponse{}
+ if err := internal.Call(t.c, "search", "ListDocuments", req, res); err != nil {
+ return err
+ }
+ if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK {
+ return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail())
+ }
+ t.listRes = res.Document
+ t.listStartID, t.listInclusive, t.more = "", false, nil
+ if len(res.Document) != 0 && t.limit <= 0 {
+ if id := res.Document[len(res.Document)-1].GetId(); id != "" {
+ t.listStartID, t.more = id, moreList
+ }
+ }
+ return nil
+}
+
+// ListOptions are the options for listing documents in an index. Passing a nil
+// *ListOptions is equivalent to using the default values.
+type ListOptions struct {
+ // StartID is the inclusive lower bound for the ID of the returned
+ // documents. The zero value means all documents will be returned.
+ StartID string
+
+ // Limit is the maximum number of documents to return. The zero value
+ // indicates no limit.
+ Limit int
+
+ // IDsOnly indicates that only document IDs should be returned for the list
+ // operation; no document fields are populated.
+ IDsOnly bool
+}
+
+// Search searches the index for the given query.
+func (x *Index) Search(c context.Context, query string, opts *SearchOptions) *Iterator {
+ t := &Iterator{
+ c: c,
+ index: x,
+ searchQuery: query,
+ more: moreSearch,
+ }
+ if opts != nil {
+ if opts.Cursor != "" {
+ if opts.Offset != 0 {
+ return errIter("at most one of Cursor and Offset may be specified")
+ }
+ t.searchCursor = proto.String(string(opts.Cursor))
+ }
+ t.limit = opts.Limit
+ t.fields = opts.Fields
+ t.idsOnly = opts.IDsOnly
+ t.sort = opts.Sort
+ t.exprs = opts.Expressions
+ t.refinements = opts.Refinements
+ t.facetOpts = opts.Facets
+ t.searchOffset = opts.Offset
+ t.countAccuracy = opts.CountAccuracy
+ }
+ return t
+}
+
+func moreSearch(t *Iterator) error {
+ // We use per-result (rather than single/per-page) cursors since this
+ // lets us return a Cursor for every iterator document. The two cursor
+ // types are largely interchangeable: a page cursor is the same as the
+ // last per-result cursor in a given search response.
+ req := &pb.SearchRequest{
+ Params: &pb.SearchParams{
+ IndexSpec: &t.index.spec,
+ Query: &t.searchQuery,
+ Cursor: t.searchCursor,
+ CursorType: pb.SearchParams_PER_RESULT.Enum(),
+ FieldSpec: &pb.FieldSpec{
+ Name: t.fields,
+ },
+ },
+ }
+ if t.limit > 0 {
+ req.Params.Limit = proto.Int32(int32(t.limit))
+ }
+ if t.searchOffset > 0 {
+ req.Params.Offset = proto.Int32(int32(t.searchOffset))
+ t.searchOffset = 0
+ }
+ if t.countAccuracy > 0 {
+ req.Params.MatchedCountAccuracy = proto.Int32(int32(t.countAccuracy))
+ }
+ if t.idsOnly {
+ req.Params.KeysOnly = &t.idsOnly
+ }
+ if t.sort != nil {
+ if err := sortToProto(t.sort, req.Params); err != nil {
+ return err
+ }
+ }
+ if t.refinements != nil {
+ if err := refinementsToProto(t.refinements, req.Params); err != nil {
+ return err
+ }
+ }
+ for _, e := range t.exprs {
+ req.Params.FieldSpec.Expression = append(req.Params.FieldSpec.Expression, &pb.FieldSpec_Expression{
+ Name: proto.String(e.Name),
+ Expression: proto.String(e.Expr),
+ })
+ }
+ for _, f := range t.facetOpts {
+ if err := f.setParams(req.Params); err != nil {
+ return fmt.Errorf("bad FacetSearchOption: %v", err)
+ }
+ }
+ // Don't repeat facet search.
+ t.facetOpts = nil
+
+ res := &pb.SearchResponse{}
+ if err := internal.Call(t.c, "search", "Search", req, res); err != nil {
+ return err
+ }
+ if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK {
+ return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail())
+ }
+ t.searchRes = res.Result
+ if len(res.FacetResult) > 0 {
+ t.facetRes = res.FacetResult
+ }
+ t.count = int(*res.MatchedCount)
+ if t.limit > 0 {
+ t.more = nil
+ } else {
+ t.more = moreSearch
+ }
+ return nil
+}
+
+// SearchOptions are the options for searching an index. Passing a nil
+// *SearchOptions is equivalent to using the default values.
+type SearchOptions struct {
+ // Limit is the maximum number of documents to return. The zero value
+ // indicates no limit.
+ Limit int
+
+ // IDsOnly indicates that only document IDs should be returned for the search
+ // operation; no document fields are populated.
+ IDsOnly bool
+
+ // Sort controls the ordering of search results.
+ Sort *SortOptions
+
+ // Fields specifies which document fields to include in the results. If omitted,
+ // all document fields are returned. No more than 100 fields may be specified.
+ Fields []string
+
+ // Expressions specifies additional computed fields to add to each returned
+ // document.
+ Expressions []FieldExpression
+
+ // Facets controls what facet information is returned for these search results.
+ // If no options are specified, no facet results will be returned.
+ Facets []FacetSearchOption
+
+ // Refinements filters the returned documents by requiring them to contain facets
+ // with specific values. Refinements are applied in conjunction for facets with
+ // different names, and in disjunction otherwise.
+ Refinements []Facet
+
+ // Cursor causes the results to commence with the first document after
+ // the document associated with the cursor.
+ Cursor Cursor
+
+ // Offset specifies the number of documents to skip over before returning results.
+ // When specified, Cursor must be nil.
+ Offset int
+
+ // CountAccuracy specifies the maximum result count that can be expected to
+ // be accurate. If zero, the count accuracy defaults to 20.
+ CountAccuracy int
+}
+
+// Cursor represents an iterator's position.
+//
+// The string value of a cursor is web-safe. It can be saved and restored
+// for later use.
+type Cursor string
+
+// FieldExpression defines a custom expression to evaluate for each result.
+type FieldExpression struct {
+ // Name is the name to use for the computed field.
+ Name string
+
+ // Expr is evaluated to provide a custom content snippet for each document.
+ // See https://cloud.google.com/appengine/docs/standard/go/search/options for
+ // the supported expression syntax.
+ Expr string
+}
+
+// FacetSearchOption controls what facet information is returned in search results.
+type FacetSearchOption interface {
+ setParams(*pb.SearchParams) error
+}
+
+// AutoFacetDiscovery returns a FacetSearchOption which enables automatic facet
+// discovery for the search. Automatic facet discovery looks for the facets
+// which appear the most often in the aggregate in the matched documents.
+//
+// The maximum number of facets returned is controlled by facetLimit, and the
+// maximum number of values per facet by facetLimit. A limit of zero indicates
+// a default limit should be used.
+func AutoFacetDiscovery(facetLimit, valueLimit int) FacetSearchOption {
+ return &autoFacetOpt{facetLimit, valueLimit}
+}
+
+type autoFacetOpt struct {
+ facetLimit, valueLimit int
+}
+
+const defaultAutoFacetLimit = 10 // As per python runtime search.py.
+
+func (o *autoFacetOpt) setParams(params *pb.SearchParams) error {
+ lim := int32(o.facetLimit)
+ if lim == 0 {
+ lim = defaultAutoFacetLimit
+ }
+ params.AutoDiscoverFacetCount = &lim
+ if o.valueLimit > 0 {
+ params.FacetAutoDetectParam = &pb.FacetAutoDetectParam{
+ ValueLimit: proto.Int32(int32(o.valueLimit)),
+ }
+ }
+ return nil
+}
+
+// FacetDiscovery returns a FacetSearchOption which selects a facet to be
+// returned with the search results. By default, the most frequently
+// occurring values for that facet will be returned. However, you can also
+// specify a list of particular Atoms or specific Ranges to return.
+func FacetDiscovery(name string, value ...interface{}) FacetSearchOption {
+ return &facetOpt{name, value}
+}
+
+type facetOpt struct {
+ name string
+ values []interface{}
+}
+
+func (o *facetOpt) setParams(params *pb.SearchParams) error {
+ req := &pb.FacetRequest{Name: &o.name}
+ params.IncludeFacet = append(params.IncludeFacet, req)
+ if len(o.values) == 0 {
+ return nil
+ }
+ vtype := reflect.TypeOf(o.values[0])
+ reqParam := &pb.FacetRequestParam{}
+ for _, v := range o.values {
+ if reflect.TypeOf(v) != vtype {
+ return errors.New("values must all be Atom, or must all be Range")
+ }
+ switch v := v.(type) {
+ case Atom:
+ reqParam.ValueConstraint = append(reqParam.ValueConstraint, string(v))
+ case Range:
+ rng, err := rangeToProto(v)
+ if err != nil {
+ return fmt.Errorf("invalid range: %v", err)
+ }
+ reqParam.Range = append(reqParam.Range, rng)
+ default:
+ return fmt.Errorf("unsupported value type %T", v)
+ }
+ }
+ req.Params = reqParam
+ return nil
+}
+
+// FacetDocumentDepth returns a FacetSearchOption which controls the number of
+// documents to be evaluated with preparing facet results.
+func FacetDocumentDepth(depth int) FacetSearchOption {
+ return facetDepthOpt(depth)
+}
+
+type facetDepthOpt int
+
+func (o facetDepthOpt) setParams(params *pb.SearchParams) error {
+ params.FacetDepth = proto.Int32(int32(o))
+ return nil
+}
+
+// FacetResult represents the number of times a particular facet and value
+// appeared in the documents matching a search request.
+type FacetResult struct {
+ Facet
+
+ // Count is the number of times this specific facet and value appeared in the
+ // matching documents.
+ Count int
+}
+
+// Range represents a numeric range with inclusive start and exclusive end.
+// Start may be specified as math.Inf(-1) to indicate there is no minimum
+// value, and End may similarly be specified as math.Inf(1); at least one of
+// Start or End must be a finite number.
+type Range struct {
+ Start, End float64
+}
+
+var (
+ negInf = math.Inf(-1)
+ posInf = math.Inf(1)
+)
+
+// AtLeast returns a Range matching any value greater than, or equal to, min.
+func AtLeast(min float64) Range {
+ return Range{Start: min, End: posInf}
+}
+
+// LessThan returns a Range matching any value less than max.
+func LessThan(max float64) Range {
+ return Range{Start: negInf, End: max}
+}
+
+// SortOptions control the ordering and scoring of search results.
+type SortOptions struct {
+ // Expressions is a slice of expressions representing a multi-dimensional
+ // sort.
+ Expressions []SortExpression
+
+ // Scorer, when specified, will cause the documents to be scored according to
+ // search term frequency.
+ Scorer Scorer
+
+ // Limit is the maximum number of objects to score and/or sort. Limit cannot
+ // be more than 10,000. The zero value indicates a default limit.
+ Limit int
+}
+
+// SortExpression defines a single dimension for sorting a document.
+type SortExpression struct {
+ // Expr is evaluated to provide a sorting value for each document.
+ // See https://cloud.google.com/appengine/docs/standard/go/search/options for
+ // the supported expression syntax.
+ Expr string
+
+ // Reverse causes the documents to be sorted in ascending order.
+ Reverse bool
+
+ // The default value to use when no field is present or the expresion
+ // cannot be calculated for a document. For text sorts, Default must
+ // be of type string; for numeric sorts, float64.
+ Default interface{}
+}
+
+// A Scorer defines how a document is scored.
+type Scorer interface {
+ toProto(*pb.ScorerSpec)
+}
+
+type enumScorer struct {
+ enum pb.ScorerSpec_Scorer
+}
+
+func (e enumScorer) toProto(spec *pb.ScorerSpec) {
+ spec.Scorer = e.enum.Enum()
+}
+
+var (
+ // MatchScorer assigns a score based on term frequency in a document.
+ MatchScorer Scorer = enumScorer{pb.ScorerSpec_MATCH_SCORER}
+
+ // RescoringMatchScorer assigns a score based on the quality of the query
+ // match. It is similar to a MatchScorer but uses a more complex scoring
+ // algorithm based on match term frequency and other factors like field type.
+ // Please be aware that this algorithm is continually refined and can change
+ // over time without notice. This means that the ordering of search results
+ // that use this scorer can also change without notice.
+ RescoringMatchScorer Scorer = enumScorer{pb.ScorerSpec_RESCORING_MATCH_SCORER}
+)
+
+func sortToProto(sort *SortOptions, params *pb.SearchParams) error {
+ for _, e := range sort.Expressions {
+ spec := &pb.SortSpec{
+ SortExpression: proto.String(e.Expr),
+ }
+ if e.Reverse {
+ spec.SortDescending = proto.Bool(false)
+ }
+ if e.Default != nil {
+ switch d := e.Default.(type) {
+ case float64:
+ spec.DefaultValueNumeric = &d
+ case string:
+ spec.DefaultValueText = &d
+ default:
+ return fmt.Errorf("search: invalid Default type %T for expression %q", d, e.Expr)
+ }
+ }
+ params.SortSpec = append(params.SortSpec, spec)
+ }
+
+ spec := &pb.ScorerSpec{}
+ if sort.Limit > 0 {
+ spec.Limit = proto.Int32(int32(sort.Limit))
+ params.ScorerSpec = spec
+ }
+ if sort.Scorer != nil {
+ sort.Scorer.toProto(spec)
+ params.ScorerSpec = spec
+ }
+
+ return nil
+}
+
+func refinementsToProto(refinements []Facet, params *pb.SearchParams) error {
+ for _, r := range refinements {
+ ref := &pb.FacetRefinement{
+ Name: proto.String(r.Name),
+ }
+ switch v := r.Value.(type) {
+ case Atom:
+ ref.Value = proto.String(string(v))
+ case Range:
+ rng, err := rangeToProto(v)
+ if err != nil {
+ return fmt.Errorf("search: refinement for facet %q: %v", r.Name, err)
+ }
+ // Unfortunately there are two identical messages for identify Facet ranges.
+ ref.Range = &pb.FacetRefinement_Range{Start: rng.Start, End: rng.End}
+ default:
+ return fmt.Errorf("search: unsupported refinement for facet %q of type %T", r.Name, v)
+ }
+ params.FacetRefinement = append(params.FacetRefinement, ref)
+ }
+ return nil
+}
+
+func rangeToProto(r Range) (*pb.FacetRange, error) {
+ rng := &pb.FacetRange{}
+ if r.Start != negInf {
+ if !validFloat(r.Start) {
+ return nil, errors.New("invalid value for Start")
+ }
+ rng.Start = proto.String(strconv.FormatFloat(r.Start, 'e', -1, 64))
+ } else if r.End == posInf {
+ return nil, errors.New("either Start or End must be finite")
+ }
+ if r.End != posInf {
+ if !validFloat(r.End) {
+ return nil, errors.New("invalid value for End")
+ }
+ rng.End = proto.String(strconv.FormatFloat(r.End, 'e', -1, 64))
+ }
+ return rng, nil
+}
+
+func protoToRange(rng *pb.FacetRefinement_Range) Range {
+ r := Range{Start: negInf, End: posInf}
+ if x, err := strconv.ParseFloat(rng.GetStart(), 64); err != nil {
+ r.Start = x
+ }
+ if x, err := strconv.ParseFloat(rng.GetEnd(), 64); err != nil {
+ r.End = x
+ }
+ return r
+}
+
+// Iterator is the result of searching an index for a query or listing an
+// index.
+type Iterator struct {
+ c context.Context
+ index *Index
+ err error
+
+ listRes []*pb.Document
+ listStartID string
+ listInclusive bool
+
+ searchRes []*pb.SearchResult
+ facetRes []*pb.FacetResult
+ searchQuery string
+ searchCursor *string
+ searchOffset int
+ sort *SortOptions
+
+ fields []string
+ exprs []FieldExpression
+ refinements []Facet
+ facetOpts []FacetSearchOption
+
+ more func(*Iterator) error
+
+ count int
+ countAccuracy int
+ limit int // items left to return; 0 for unlimited.
+ idsOnly bool
+}
+
+// errIter returns an iterator that only returns the given error.
+func errIter(err string) *Iterator {
+ return &Iterator{
+ err: errors.New(err),
+ }
+}
+
+// Done is returned when a query iteration has completed.
+var Done = errors.New("search: query has no more results")
+
+// Count returns an approximation of the number of documents matched by the
+// query. It is only valid to call for iterators returned by Search.
+func (t *Iterator) Count() int { return t.count }
+
+// fetchMore retrieves more results, if there are no errors or pending results.
+func (t *Iterator) fetchMore() {
+ if t.err == nil && len(t.listRes)+len(t.searchRes) == 0 && t.more != nil {
+ t.err = t.more(t)
+ }
+}
+
+// Next returns the ID of the next result. When there are no more results,
+// Done is returned as the error.
+//
+// dst must be a non-nil struct pointer, implement the FieldLoadSaver
+// interface, or be a nil interface value. If a non-nil dst is provided, it
+// will be filled with the indexed fields. dst is ignored if this iterator was
+// created with an IDsOnly option.
+func (t *Iterator) Next(dst interface{}) (string, error) {
+ t.fetchMore()
+ if t.err != nil {
+ return "", t.err
+ }
+
+ var doc *pb.Document
+ var exprs []*pb.Field
+ switch {
+ case len(t.listRes) != 0:
+ doc = t.listRes[0]
+ t.listRes = t.listRes[1:]
+ case len(t.searchRes) != 0:
+ doc = t.searchRes[0].Document
+ exprs = t.searchRes[0].Expression
+ t.searchCursor = t.searchRes[0].Cursor
+ t.searchRes = t.searchRes[1:]
+ default:
+ return "", Done
+ }
+ if doc == nil {
+ return "", errors.New("search: internal error: no document returned")
+ }
+ if !t.idsOnly && dst != nil {
+ if err := loadDoc(dst, doc, exprs); err != nil {
+ return "", err
+ }
+ }
+ return doc.GetId(), nil
+}
+
+// Cursor returns the cursor associated with the current document (that is,
+// the document most recently returned by a call to Next).
+//
+// Passing this cursor in a future call to Search will cause those results
+// to commence with the first document after the current document.
+func (t *Iterator) Cursor() Cursor {
+ if t.searchCursor == nil {
+ return ""
+ }
+ return Cursor(*t.searchCursor)
+}
+
+// Facets returns the facets found within the search results, if any facets
+// were requested in the SearchOptions.
+func (t *Iterator) Facets() ([][]FacetResult, error) {
+ t.fetchMore()
+ if t.err != nil && t.err != Done {
+ return nil, t.err
+ }
+
+ var facets [][]FacetResult
+ for _, f := range t.facetRes {
+ fres := make([]FacetResult, 0, len(f.Value))
+ for _, v := range f.Value {
+ ref := v.Refinement
+ facet := FacetResult{
+ Facet: Facet{Name: ref.GetName()},
+ Count: int(v.GetCount()),
+ }
+ if ref.Value != nil {
+ facet.Value = Atom(*ref.Value)
+ } else {
+ facet.Value = protoToRange(ref.Range)
+ }
+ fres = append(fres, facet)
+ }
+ facets = append(facets, fres)
+ }
+ return facets, nil
+}
+
+// saveDoc converts from a struct pointer or
+// FieldLoadSaver/FieldMetadataLoadSaver to the Document protobuf.
+func saveDoc(src interface{}) (*pb.Document, error) {
+ var err error
+ var fields []Field
+ var meta *DocumentMetadata
+ switch x := src.(type) {
+ case FieldLoadSaver:
+ fields, meta, err = x.Save()
+ default:
+ fields, meta, err = saveStructWithMeta(src)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ fieldsProto, err := fieldsToProto(fields)
+ if err != nil {
+ return nil, err
+ }
+ d := &pb.Document{
+ Field: fieldsProto,
+ OrderId: proto.Int32(int32(time.Since(orderIDEpoch).Seconds())),
+ OrderIdSource: pb.Document_DEFAULTED.Enum(),
+ }
+ if meta != nil {
+ if meta.Rank != 0 {
+ if !validDocRank(meta.Rank) {
+ return nil, fmt.Errorf("search: invalid rank %d, must be [0, 2^31)", meta.Rank)
+ }
+ *d.OrderId = int32(meta.Rank)
+ d.OrderIdSource = pb.Document_SUPPLIED.Enum()
+ }
+ if len(meta.Facets) > 0 {
+ facets, err := facetsToProto(meta.Facets)
+ if err != nil {
+ return nil, err
+ }
+ d.Facet = facets
+ }
+ }
+ return d, nil
+}
+
+func fieldsToProto(src []Field) ([]*pb.Field, error) {
+ // Maps to catch duplicate time or numeric fields.
+ timeFields, numericFields := make(map[string]bool), make(map[string]bool)
+ dst := make([]*pb.Field, 0, len(src))
+ for _, f := range src {
+ if !validFieldName(f.Name) {
+ return nil, fmt.Errorf("search: invalid field name %q", f.Name)
+ }
+ fieldValue := &pb.FieldValue{}
+ switch x := f.Value.(type) {
+ case string:
+ fieldValue.Type = pb.FieldValue_TEXT.Enum()
+ fieldValue.StringValue = proto.String(x)
+ case Atom:
+ fieldValue.Type = pb.FieldValue_ATOM.Enum()
+ fieldValue.StringValue = proto.String(string(x))
+ case HTML:
+ fieldValue.Type = pb.FieldValue_HTML.Enum()
+ fieldValue.StringValue = proto.String(string(x))
+ case time.Time:
+ if timeFields[f.Name] {
+ return nil, fmt.Errorf("search: duplicate time field %q", f.Name)
+ }
+ timeFields[f.Name] = true
+ fieldValue.Type = pb.FieldValue_DATE.Enum()
+ fieldValue.StringValue = proto.String(strconv.FormatInt(x.UnixNano()/1e6, 10))
+ case float64:
+ if numericFields[f.Name] {
+ return nil, fmt.Errorf("search: duplicate numeric field %q", f.Name)
+ }
+ if !validFloat(x) {
+ return nil, fmt.Errorf("search: numeric field %q with invalid value %f", f.Name, x)
+ }
+ numericFields[f.Name] = true
+ fieldValue.Type = pb.FieldValue_NUMBER.Enum()
+ fieldValue.StringValue = proto.String(strconv.FormatFloat(x, 'e', -1, 64))
+ case appengine.GeoPoint:
+ if !x.Valid() {
+ return nil, fmt.Errorf(
+ "search: GeoPoint field %q with invalid value %v",
+ f.Name, x)
+ }
+ fieldValue.Type = pb.FieldValue_GEO.Enum()
+ fieldValue.Geo = &pb.FieldValue_Geo{
+ Lat: proto.Float64(x.Lat),
+ Lng: proto.Float64(x.Lng),
+ }
+ default:
+ return nil, fmt.Errorf("search: unsupported field type: %v", reflect.TypeOf(f.Value))
+ }
+ if f.Language != "" {
+ switch f.Value.(type) {
+ case string, HTML:
+ if !validLanguage(f.Language) {
+ return nil, fmt.Errorf("search: invalid language for field %q: %q", f.Name, f.Language)
+ }
+ fieldValue.Language = proto.String(f.Language)
+ default:
+ return nil, fmt.Errorf("search: setting language not supported for field %q of type %T", f.Name, f.Value)
+ }
+ }
+ if p := fieldValue.StringValue; p != nil && !utf8.ValidString(*p) {
+ return nil, fmt.Errorf("search: %q field is invalid UTF-8: %q", f.Name, *p)
+ }
+ dst = append(dst, &pb.Field{
+ Name: proto.String(f.Name),
+ Value: fieldValue,
+ })
+ }
+ return dst, nil
+}
+
+func facetsToProto(src []Facet) ([]*pb.Facet, error) {
+ dst := make([]*pb.Facet, 0, len(src))
+ for _, f := range src {
+ if !validFieldName(f.Name) {
+ return nil, fmt.Errorf("search: invalid facet name %q", f.Name)
+ }
+ facetValue := &pb.FacetValue{}
+ switch x := f.Value.(type) {
+ case Atom:
+ if !utf8.ValidString(string(x)) {
+ return nil, fmt.Errorf("search: %q facet is invalid UTF-8: %q", f.Name, x)
+ }
+ facetValue.Type = pb.FacetValue_ATOM.Enum()
+ facetValue.StringValue = proto.String(string(x))
+ case float64:
+ if !validFloat(x) {
+ return nil, fmt.Errorf("search: numeric facet %q with invalid value %f", f.Name, x)
+ }
+ facetValue.Type = pb.FacetValue_NUMBER.Enum()
+ facetValue.StringValue = proto.String(strconv.FormatFloat(x, 'e', -1, 64))
+ default:
+ return nil, fmt.Errorf("search: unsupported facet type: %v", reflect.TypeOf(f.Value))
+ }
+ dst = append(dst, &pb.Facet{
+ Name: proto.String(f.Name),
+ Value: facetValue,
+ })
+ }
+ return dst, nil
+}
+
+// loadDoc converts from protobufs to a struct pointer or
+// FieldLoadSaver/FieldMetadataLoadSaver. The src param provides the document's
+// stored fields and facets, and any document metadata. An additional slice of
+// fields, exprs, may optionally be provided to contain any derived expressions
+// requested by the developer.
+func loadDoc(dst interface{}, src *pb.Document, exprs []*pb.Field) (err error) {
+ fields, err := protoToFields(src.Field)
+ if err != nil {
+ return err
+ }
+ facets, err := protoToFacets(src.Facet)
+ if err != nil {
+ return err
+ }
+ if len(exprs) > 0 {
+ exprFields, err := protoToFields(exprs)
+ if err != nil {
+ return err
+ }
+ // Mark each field as derived.
+ for i := range exprFields {
+ exprFields[i].Derived = true
+ }
+ fields = append(fields, exprFields...)
+ }
+ meta := &DocumentMetadata{
+ Rank: int(src.GetOrderId()),
+ Facets: facets,
+ }
+ switch x := dst.(type) {
+ case FieldLoadSaver:
+ return x.Load(fields, meta)
+ default:
+ return loadStructWithMeta(dst, fields, meta)
+ }
+}
+
+func protoToFields(fields []*pb.Field) ([]Field, error) {
+ dst := make([]Field, 0, len(fields))
+ for _, field := range fields {
+ fieldValue := field.GetValue()
+ f := Field{
+ Name: field.GetName(),
+ }
+ switch fieldValue.GetType() {
+ case pb.FieldValue_TEXT:
+ f.Value = fieldValue.GetStringValue()
+ f.Language = fieldValue.GetLanguage()
+ case pb.FieldValue_ATOM:
+ f.Value = Atom(fieldValue.GetStringValue())
+ case pb.FieldValue_HTML:
+ f.Value = HTML(fieldValue.GetStringValue())
+ f.Language = fieldValue.GetLanguage()
+ case pb.FieldValue_DATE:
+ sv := fieldValue.GetStringValue()
+ millis, err := strconv.ParseInt(sv, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("search: internal error: bad time.Time encoding %q: %v", sv, err)
+ }
+ f.Value = time.Unix(0, millis*1e6)
+ case pb.FieldValue_NUMBER:
+ sv := fieldValue.GetStringValue()
+ x, err := strconv.ParseFloat(sv, 64)
+ if err != nil {
+ return nil, err
+ }
+ f.Value = x
+ case pb.FieldValue_GEO:
+ geoValue := fieldValue.GetGeo()
+ geoPoint := appengine.GeoPoint{geoValue.GetLat(), geoValue.GetLng()}
+ if !geoPoint.Valid() {
+ return nil, fmt.Errorf("search: internal error: invalid GeoPoint encoding: %v", geoPoint)
+ }
+ f.Value = geoPoint
+ default:
+ return nil, fmt.Errorf("search: internal error: unknown data type %s", fieldValue.GetType())
+ }
+ dst = append(dst, f)
+ }
+ return dst, nil
+}
+
+func protoToFacets(facets []*pb.Facet) ([]Facet, error) {
+ if len(facets) == 0 {
+ return nil, nil
+ }
+ dst := make([]Facet, 0, len(facets))
+ for _, facet := range facets {
+ facetValue := facet.GetValue()
+ f := Facet{
+ Name: facet.GetName(),
+ }
+ switch facetValue.GetType() {
+ case pb.FacetValue_ATOM:
+ f.Value = Atom(facetValue.GetStringValue())
+ case pb.FacetValue_NUMBER:
+ sv := facetValue.GetStringValue()
+ x, err := strconv.ParseFloat(sv, 64)
+ if err != nil {
+ return nil, err
+ }
+ f.Value = x
+ default:
+ return nil, fmt.Errorf("search: internal error: unknown data type %s", facetValue.GetType())
+ }
+ dst = append(dst, f)
+ }
+ return dst, nil
+}
+
+func namespaceMod(m proto.Message, namespace string) {
+ set := func(s **string) {
+ if *s == nil {
+ *s = &namespace
+ }
+ }
+ switch m := m.(type) {
+ case *pb.IndexDocumentRequest:
+ set(&m.Params.IndexSpec.Namespace)
+ case *pb.ListDocumentsRequest:
+ set(&m.Params.IndexSpec.Namespace)
+ case *pb.DeleteDocumentRequest:
+ set(&m.Params.IndexSpec.Namespace)
+ case *pb.SearchRequest:
+ set(&m.Params.IndexSpec.Namespace)
+ }
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("search", pb.SearchServiceError_ErrorCode_name)
+ internal.NamespaceMods["search"] = namespaceMod
+}
diff --git a/vendor/google.golang.org/appengine/search/search_test.go b/vendor/google.golang.org/appengine/search/search_test.go
new file mode 100644
index 000000000..ef1409c19
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/search_test.go
@@ -0,0 +1,1270 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package search
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/search"
+)
+
+type TestDoc struct {
+ String string
+ Atom Atom
+ HTML HTML
+ Float float64
+ Location appengine.GeoPoint
+ Time time.Time
+}
+
+type FieldListWithMeta struct {
+ Fields FieldList
+ Meta *DocumentMetadata
+}
+
+func (f *FieldListWithMeta) Load(fields []Field, meta *DocumentMetadata) error {
+ f.Meta = meta
+ return f.Fields.Load(fields, nil)
+}
+
+func (f *FieldListWithMeta) Save() ([]Field, *DocumentMetadata, error) {
+ fields, _, err := f.Fields.Save()
+ return fields, f.Meta, err
+}
+
+// Assert that FieldListWithMeta satisfies FieldLoadSaver
+var _ FieldLoadSaver = &FieldListWithMeta{}
+
+var (
+ float = 3.14159
+ floatOut = "3.14159e+00"
+ latitude = 37.3894
+ longitude = 122.0819
+ testGeo = appengine.GeoPoint{latitude, longitude}
+ testString = "foo<b>bar"
+ testTime = time.Unix(1337324400, 0)
+ testTimeOut = "1337324400000"
+ searchMeta = &DocumentMetadata{
+ Rank: 42,
+ }
+ searchDoc = TestDoc{
+ String: testString,
+ Atom: Atom(testString),
+ HTML: HTML(testString),
+ Float: float,
+ Location: testGeo,
+ Time: testTime,
+ }
+ searchFields = FieldList{
+ Field{Name: "String", Value: testString},
+ Field{Name: "Atom", Value: Atom(testString)},
+ Field{Name: "HTML", Value: HTML(testString)},
+ Field{Name: "Float", Value: float},
+ Field{Name: "Location", Value: testGeo},
+ Field{Name: "Time", Value: testTime},
+ }
+ // searchFieldsWithLang is a copy of the searchFields with the Language field
+ // set on text/HTML Fields.
+ searchFieldsWithLang = FieldList{}
+ protoFields = []*pb.Field{
+ newStringValueField("String", testString, pb.FieldValue_TEXT),
+ newStringValueField("Atom", testString, pb.FieldValue_ATOM),
+ newStringValueField("HTML", testString, pb.FieldValue_HTML),
+ newStringValueField("Float", floatOut, pb.FieldValue_NUMBER),
+ {
+ Name: proto.String("Location"),
+ Value: &pb.FieldValue{
+ Geo: &pb.FieldValue_Geo{
+ Lat: proto.Float64(latitude),
+ Lng: proto.Float64(longitude),
+ },
+ Type: pb.FieldValue_GEO.Enum(),
+ },
+ },
+ newStringValueField("Time", testTimeOut, pb.FieldValue_DATE),
+ }
+)
+
+func init() {
+ for _, f := range searchFields {
+ if f.Name == "String" || f.Name == "HTML" {
+ f.Language = "en"
+ }
+ searchFieldsWithLang = append(searchFieldsWithLang, f)
+ }
+}
+
+func newStringValueField(name, value string, valueType pb.FieldValue_ContentType) *pb.Field {
+ return &pb.Field{
+ Name: proto.String(name),
+ Value: &pb.FieldValue{
+ StringValue: proto.String(value),
+ Type: valueType.Enum(),
+ },
+ }
+}
+
+func newFacet(name, value string, valueType pb.FacetValue_ContentType) *pb.Facet {
+ return &pb.Facet{
+ Name: proto.String(name),
+ Value: &pb.FacetValue{
+ StringValue: proto.String(value),
+ Type: valueType.Enum(),
+ },
+ }
+}
+
+func TestValidIndexNameOrDocID(t *testing.T) {
+ testCases := []struct {
+ s string
+ want bool
+ }{
+ {"", true},
+ {"!", false},
+ {"$", true},
+ {"!bad", false},
+ {"good!", true},
+ {"alsoGood", true},
+ {"has spaces", false},
+ {"is_inva\xffid_UTF-8", false},
+ {"is_non-ASCïI", false},
+ {"underscores_are_ok", true},
+ }
+ for _, tc := range testCases {
+ if got := validIndexNameOrDocID(tc.s); got != tc.want {
+ t.Errorf("%q: got %v, want %v", tc.s, got, tc.want)
+ }
+ }
+}
+
+func TestLoadDoc(t *testing.T) {
+ got, want := TestDoc{}, searchDoc
+ if err := loadDoc(&got, &pb.Document{Field: protoFields}, nil); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if got != want {
+ t.Errorf("loadDoc: got %v, wanted %v", got, want)
+ }
+}
+
+func TestSaveDoc(t *testing.T) {
+ got, err := saveDoc(&searchDoc)
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ want := protoFields
+ if !reflect.DeepEqual(got.Field, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+}
+
+func TestSaveDocUsesDefaultedRankIfNotSpecified(t *testing.T) {
+ got, err := saveDoc(&searchDoc)
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ orderIdSource := got.GetOrderIdSource()
+ if orderIdSource != pb.Document_DEFAULTED {
+ t.Errorf("OrderIdSource: got %v, wanted DEFAULTED", orderIdSource)
+ }
+}
+
+func TestLoadFieldList(t *testing.T) {
+ var got FieldList
+ want := searchFieldsWithLang
+ if err := loadDoc(&got, &pb.Document{Field: protoFields}, nil); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+}
+
+func TestLangFields(t *testing.T) {
+ fl := &FieldList{
+ {Name: "Foo", Value: "I am English", Language: "en"},
+ {Name: "Bar", Value: "私は日本人だ", Language: "jp"},
+ }
+ var got FieldList
+ doc, err := saveDoc(fl)
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ if err := loadDoc(&got, doc, nil); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if want := fl; !reflect.DeepEqual(&got, want) {
+ t.Errorf("got %v\nwant %v", got, want)
+ }
+}
+
+func TestSaveFieldList(t *testing.T) {
+ got, err := saveDoc(&searchFields)
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ want := protoFields
+ if !reflect.DeepEqual(got.Field, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+}
+
+func TestLoadFieldAndExprList(t *testing.T) {
+ var got, want FieldList
+ for i, f := range searchFieldsWithLang {
+ f.Derived = (i >= 2) // First 2 elements are "fields", next are "expressions".
+ want = append(want, f)
+ }
+ doc, expr := &pb.Document{Field: protoFields[:2]}, protoFields[2:]
+ if err := loadDoc(&got, doc, expr); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("got %v\nwant %v", got, want)
+ }
+}
+
+func TestLoadMeta(t *testing.T) {
+ var got FieldListWithMeta
+ want := FieldListWithMeta{
+ Meta: searchMeta,
+ Fields: searchFieldsWithLang,
+ }
+ doc := &pb.Document{
+ Field: protoFields,
+ OrderId: proto.Int32(42),
+ OrderIdSource: pb.Document_SUPPLIED.Enum(),
+ }
+ if err := loadDoc(&got, doc, nil); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+}
+
+func TestSaveMeta(t *testing.T) {
+ got, err := saveDoc(&FieldListWithMeta{
+ Meta: searchMeta,
+ Fields: searchFields,
+ })
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ want := &pb.Document{
+ Field: protoFields,
+ OrderId: proto.Int32(42),
+ OrderIdSource: pb.Document_SUPPLIED.Enum(),
+ }
+ if !proto.Equal(got, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+}
+
+func TestSaveMetaWithDefaultedRank(t *testing.T) {
+ metaWithoutRank := &DocumentMetadata{
+ Rank: 0,
+ }
+ got, err := saveDoc(&FieldListWithMeta{
+ Meta: metaWithoutRank,
+ Fields: searchFields,
+ })
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ want := &pb.Document{
+ Field: protoFields,
+ OrderId: got.OrderId,
+ OrderIdSource: pb.Document_DEFAULTED.Enum(),
+ }
+ if !proto.Equal(got, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+}
+
+func TestSaveWithoutMetaUsesDefaultedRank(t *testing.T) {
+ got, err := saveDoc(&FieldListWithMeta{
+ Fields: searchFields,
+ })
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ want := &pb.Document{
+ Field: protoFields,
+ OrderId: got.OrderId,
+ OrderIdSource: pb.Document_DEFAULTED.Enum(),
+ }
+ if !proto.Equal(got, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+}
+
+func TestLoadSaveWithStruct(t *testing.T) {
+ type gopher struct {
+ Name string
+ Info string `search:"about"`
+ Legs float64 `search:",facet"`
+ Fuzz Atom `search:"Fur,facet"`
+ }
+
+ doc := gopher{"Gopher", "Likes slide rules.", 4, Atom("furry")}
+ pb := &pb.Document{
+ Field: []*pb.Field{
+ newStringValueField("Name", "Gopher", pb.FieldValue_TEXT),
+ newStringValueField("about", "Likes slide rules.", pb.FieldValue_TEXT),
+ },
+ Facet: []*pb.Facet{
+ newFacet("Legs", "4e+00", pb.FacetValue_NUMBER),
+ newFacet("Fur", "furry", pb.FacetValue_ATOM),
+ },
+ }
+
+ var gotDoc gopher
+ if err := loadDoc(&gotDoc, pb, nil); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if !reflect.DeepEqual(gotDoc, doc) {
+ t.Errorf("loading doc\ngot %v\nwant %v", gotDoc, doc)
+ }
+
+ gotPB, err := saveDoc(&doc)
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ gotPB.OrderId = nil // Don't test: it's time dependent.
+ gotPB.OrderIdSource = nil // Don't test because it's contingent on OrderId.
+ if !proto.Equal(gotPB, pb) {
+ t.Errorf("saving doc\ngot %v\nwant %v", gotPB, pb)
+ }
+}
+
+func TestValidFieldNames(t *testing.T) {
+ testCases := []struct {
+ name string
+ valid bool
+ }{
+ {"Normal", true},
+ {"Also_OK_123", true},
+ {"Not so great", false},
+ {"lower_case", true},
+ {"Exclaim!", false},
+ {"Hello세상아 안녕", false},
+ {"", false},
+ {"Hεllo", false},
+ {strings.Repeat("A", 500), true},
+ {strings.Repeat("A", 501), false},
+ }
+
+ for _, tc := range testCases {
+ _, err := saveDoc(&FieldList{
+ Field{Name: tc.name, Value: "val"},
+ })
+ if err != nil && !strings.Contains(err.Error(), "invalid field name") {
+ t.Errorf("unexpected err %q for field name %q", err, tc.name)
+ }
+ if (err == nil) != tc.valid {
+ t.Errorf("field %q: expected valid %t, received err %v", tc.name, tc.valid, err)
+ }
+ }
+}
+
+func TestValidLangs(t *testing.T) {
+ testCases := []struct {
+ field Field
+ valid bool
+ }{
+ {Field{Name: "Foo", Value: "String", Language: ""}, true},
+ {Field{Name: "Foo", Value: "String", Language: "en"}, true},
+ {Field{Name: "Foo", Value: "String", Language: "aussie"}, false},
+ {Field{Name: "Foo", Value: "String", Language: "12"}, false},
+ {Field{Name: "Foo", Value: HTML("String"), Language: "en"}, true},
+ {Field{Name: "Foo", Value: Atom("String"), Language: "en"}, false},
+ {Field{Name: "Foo", Value: 42, Language: "en"}, false},
+ }
+
+ for _, tt := range testCases {
+ _, err := saveDoc(&FieldList{tt.field})
+ if err == nil != tt.valid {
+ t.Errorf("Field %v, got error %v, wanted valid %t", tt.field, err, tt.valid)
+ }
+ }
+}
+
+func TestDuplicateFields(t *testing.T) {
+ testCases := []struct {
+ desc string
+ fields FieldList
+ errMsg string // Non-empty if we expect an error
+ }{
+ {
+ desc: "multi string",
+ fields: FieldList{{Name: "FieldA", Value: "val1"}, {Name: "FieldA", Value: "val2"}, {Name: "FieldA", Value: "val3"}},
+ },
+ {
+ desc: "multi atom",
+ fields: FieldList{{Name: "FieldA", Value: Atom("val1")}, {Name: "FieldA", Value: Atom("val2")}, {Name: "FieldA", Value: Atom("val3")}},
+ },
+ {
+ desc: "mixed",
+ fields: FieldList{{Name: "FieldA", Value: testString}, {Name: "FieldA", Value: testTime}, {Name: "FieldA", Value: float}},
+ },
+ {
+ desc: "multi time",
+ fields: FieldList{{Name: "FieldA", Value: testTime}, {Name: "FieldA", Value: testTime}},
+ errMsg: `duplicate time field "FieldA"`,
+ },
+ {
+ desc: "multi num",
+ fields: FieldList{{Name: "FieldA", Value: float}, {Name: "FieldA", Value: float}},
+ errMsg: `duplicate numeric field "FieldA"`,
+ },
+ }
+ for _, tc := range testCases {
+ _, err := saveDoc(&tc.fields)
+ if (err == nil) != (tc.errMsg == "") || (err != nil && !strings.Contains(err.Error(), tc.errMsg)) {
+ t.Errorf("%s: got err %v, wanted %q", tc.desc, err, tc.errMsg)
+ }
+ }
+}
+
+func TestLoadErrFieldMismatch(t *testing.T) {
+ testCases := []struct {
+ desc string
+ dst interface{}
+ src []*pb.Field
+ err error
+ }{
+ {
+ desc: "missing",
+ dst: &struct{ One string }{},
+ src: []*pb.Field{newStringValueField("Two", "woop!", pb.FieldValue_TEXT)},
+ err: &ErrFieldMismatch{
+ FieldName: "Two",
+ Reason: "no such struct field",
+ },
+ },
+ {
+ desc: "wrong type",
+ dst: &struct{ Num float64 }{},
+ src: []*pb.Field{newStringValueField("Num", "woop!", pb.FieldValue_TEXT)},
+ err: &ErrFieldMismatch{
+ FieldName: "Num",
+ Reason: "type mismatch: float64 for string data",
+ },
+ },
+ {
+ desc: "unsettable",
+ dst: &struct{ lower string }{},
+ src: []*pb.Field{newStringValueField("lower", "woop!", pb.FieldValue_TEXT)},
+ err: &ErrFieldMismatch{
+ FieldName: "lower",
+ Reason: "cannot set struct field",
+ },
+ },
+ }
+ for _, tc := range testCases {
+ err := loadDoc(tc.dst, &pb.Document{Field: tc.src}, nil)
+ if !reflect.DeepEqual(err, tc.err) {
+ t.Errorf("%s, got err %v, wanted %v", tc.desc, err, tc.err)
+ }
+ }
+}
+
+func TestLimit(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+ c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, res *pb.SearchResponse) error {
+ limit := 20 // Default per page.
+ if req.Params.Limit != nil {
+ limit = int(*req.Params.Limit)
+ }
+ res.Status = &pb.RequestStatus{Code: pb.SearchServiceError_OK.Enum()}
+ res.MatchedCount = proto.Int64(int64(limit))
+ for i := 0; i < limit; i++ {
+ res.Result = append(res.Result, &pb.SearchResult{Document: &pb.Document{}})
+ res.Cursor = proto.String("moreresults")
+ }
+ return nil
+ })
+
+ const maxDocs = 500 // Limit maximum number of docs.
+ testCases := []struct {
+ limit, want int
+ }{
+ {limit: 0, want: maxDocs},
+ {limit: 42, want: 42},
+ {limit: 100, want: 100},
+ {limit: 1000, want: maxDocs},
+ }
+
+ for _, tt := range testCases {
+ it := index.Search(c, "gopher", &SearchOptions{Limit: tt.limit, IDsOnly: true})
+ count := 0
+ for ; count < maxDocs; count++ {
+ _, err := it.Next(nil)
+ if err == Done {
+ break
+ }
+ if err != nil {
+ t.Fatalf("err after %d: %v", count, err)
+ }
+ }
+ if count != tt.want {
+ t.Errorf("got %d results, expected %d", count, tt.want)
+ }
+ }
+}
+
+func TestPut(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(in *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error {
+ expectedIn := &pb.IndexDocumentRequest{
+ Params: &pb.IndexDocumentParams{
+ Document: []*pb.Document{
+ {Field: protoFields, OrderId: proto.Int32(42), OrderIdSource: pb.Document_SUPPLIED.Enum()},
+ },
+ IndexSpec: &pb.IndexSpec{
+ Name: proto.String("Doc"),
+ },
+ },
+ }
+ if !proto.Equal(in, expectedIn) {
+ return fmt.Errorf("unsupported argument:\ngot %v\nwant %v", in, expectedIn)
+ }
+ *out = pb.IndexDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {Code: pb.SearchServiceError_OK.Enum()},
+ },
+ DocId: []string{
+ "doc_id",
+ },
+ }
+ return nil
+ })
+
+ id, err := index.Put(c, "", &FieldListWithMeta{
+ Meta: searchMeta,
+ Fields: searchFields,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want := "doc_id"; id != want {
+ t.Errorf("Got doc ID %q, want %q", id, want)
+ }
+}
+
+func TestPutAutoOrderID(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(in *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error {
+ if len(in.Params.GetDocument()) < 1 {
+ return fmt.Errorf("expected at least one Document, got %v", in)
+ }
+ got, want := in.Params.Document[0].GetOrderId(), int32(time.Since(orderIDEpoch).Seconds())
+ if d := got - want; -5 > d || d > 5 {
+ return fmt.Errorf("got OrderId %d, want near %d", got, want)
+ }
+ *out = pb.IndexDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {Code: pb.SearchServiceError_OK.Enum()},
+ },
+ DocId: []string{
+ "doc_id",
+ },
+ }
+ return nil
+ })
+
+ if _, err := index.Put(c, "", &searchFields); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestPutBadStatus(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(_ *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error {
+ *out = pb.IndexDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {
+ Code: pb.SearchServiceError_INVALID_REQUEST.Enum(),
+ ErrorDetail: proto.String("insufficient gophers"),
+ },
+ },
+ }
+ return nil
+ })
+
+ wantErr := "search: INVALID_REQUEST: insufficient gophers"
+ if _, err := index.Put(c, "", &searchFields); err == nil || err.Error() != wantErr {
+ t.Fatalf("Put: got %v error, want %q", err, wantErr)
+ }
+}
+
+func TestPutMultiNilIDSlice(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(in *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error {
+ if len(in.Params.GetDocument()) < 1 {
+ return fmt.Errorf("got %v, want at least 1 document", in)
+ }
+ got, want := in.Params.Document[0].GetOrderId(), int32(time.Since(orderIDEpoch).Seconds())
+ if d := got - want; -5 > d || d > 5 {
+ return fmt.Errorf("got OrderId %d, want near %d", got, want)
+ }
+ *out = pb.IndexDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {Code: pb.SearchServiceError_OK.Enum()},
+ },
+ DocId: []string{
+ "doc_id",
+ },
+ }
+ return nil
+ })
+
+ if _, err := index.PutMulti(c, nil, []interface{}{&searchFields}); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestPutMultiError(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(in *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error {
+ *out = pb.IndexDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {Code: pb.SearchServiceError_OK.Enum()},
+ {Code: pb.SearchServiceError_PERMISSION_DENIED.Enum(), ErrorDetail: proto.String("foo")},
+ },
+ DocId: []string{
+ "id1",
+ "",
+ },
+ }
+ return nil
+ })
+
+ switch _, err := index.PutMulti(c, nil, []interface{}{&searchFields, &searchFields}); {
+ case err == nil:
+ t.Fatalf("got nil, want error")
+ case err.(appengine.MultiError)[0] != nil:
+ t.Fatalf("got %v, want nil MultiError[0]", err.(appengine.MultiError)[0])
+ case err.(appengine.MultiError)[1] == nil:
+ t.Fatalf("got nil, want not-nill MultiError[1]")
+ }
+}
+
+func TestPutMultiWrongNumberOfIDs(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(in *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error {
+ return nil
+ })
+
+ if _, err := index.PutMulti(c, []string{"a"}, []interface{}{&searchFields, &searchFields}); err == nil {
+ t.Fatal("got success, want error")
+ }
+}
+
+func TestPutMultiTooManyDocs(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(in *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error {
+ return nil
+ })
+
+ srcs := make([]interface{}, 201)
+ for i, _ := range srcs {
+ srcs[i] = &searchFields
+ }
+
+ if _, err := index.PutMulti(c, nil, srcs); err != ErrTooManyDocuments {
+ t.Fatalf("got %v, want ErrTooManyDocuments", err)
+ }
+}
+
+func TestSortOptions(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ noErr := errors.New("") // Sentinel err to return to prevent sending request.
+
+ testCases := []struct {
+ desc string
+ sort *SortOptions
+ wantSort []*pb.SortSpec
+ wantScorer *pb.ScorerSpec
+ wantErr string
+ }{
+ {
+ desc: "No SortOptions",
+ },
+ {
+ desc: "Basic",
+ sort: &SortOptions{
+ Expressions: []SortExpression{
+ {Expr: "dog"},
+ {Expr: "cat", Reverse: true},
+ {Expr: "gopher", Default: "blue"},
+ {Expr: "fish", Default: 2.0},
+ },
+ Limit: 42,
+ Scorer: MatchScorer,
+ },
+ wantSort: []*pb.SortSpec{
+ {SortExpression: proto.String("dog")},
+ {SortExpression: proto.String("cat"), SortDescending: proto.Bool(false)},
+ {SortExpression: proto.String("gopher"), DefaultValueText: proto.String("blue")},
+ {SortExpression: proto.String("fish"), DefaultValueNumeric: proto.Float64(2)},
+ },
+ wantScorer: &pb.ScorerSpec{
+ Limit: proto.Int32(42),
+ Scorer: pb.ScorerSpec_MATCH_SCORER.Enum(),
+ },
+ },
+ {
+ desc: "Bad expression default",
+ sort: &SortOptions{
+ Expressions: []SortExpression{
+ {Expr: "dog", Default: true},
+ },
+ },
+ wantErr: `search: invalid Default type bool for expression "dog"`,
+ },
+ {
+ desc: "RescoringMatchScorer",
+ sort: &SortOptions{Scorer: RescoringMatchScorer},
+ wantScorer: &pb.ScorerSpec{Scorer: pb.ScorerSpec_RESCORING_MATCH_SCORER.Enum()},
+ },
+ }
+
+ for _, tt := range testCases {
+ c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, _ *pb.SearchResponse) error {
+ params := req.Params
+ if !reflect.DeepEqual(params.SortSpec, tt.wantSort) {
+ t.Errorf("%s: params.SortSpec=%v; want %v", tt.desc, params.SortSpec, tt.wantSort)
+ }
+ if !reflect.DeepEqual(params.ScorerSpec, tt.wantScorer) {
+ t.Errorf("%s: params.ScorerSpec=%v; want %v", tt.desc, params.ScorerSpec, tt.wantScorer)
+ }
+ return noErr // Always return some error to prevent response parsing.
+ })
+
+ it := index.Search(c, "gopher", &SearchOptions{Sort: tt.sort})
+ _, err := it.Next(nil)
+ if err == nil {
+ t.Fatalf("%s: err==nil; should not happen", tt.desc)
+ }
+ if err.Error() != tt.wantErr {
+ t.Errorf("%s: got error %q, want %q", tt.desc, err, tt.wantErr)
+ }
+ }
+}
+
+func TestFieldSpec(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ errFoo := errors.New("foo") // sentinel error when there isn't one.
+
+ testCases := []struct {
+ desc string
+ opts *SearchOptions
+ want *pb.FieldSpec
+ }{
+ {
+ desc: "No options",
+ want: &pb.FieldSpec{},
+ },
+ {
+ desc: "Fields",
+ opts: &SearchOptions{
+ Fields: []string{"one", "two"},
+ },
+ want: &pb.FieldSpec{
+ Name: []string{"one", "two"},
+ },
+ },
+ {
+ desc: "Expressions",
+ opts: &SearchOptions{
+ Expressions: []FieldExpression{
+ {Name: "one", Expr: "price * quantity"},
+ {Name: "two", Expr: "min(daily_use, 10) * rate"},
+ },
+ },
+ want: &pb.FieldSpec{
+ Expression: []*pb.FieldSpec_Expression{
+ {Name: proto.String("one"), Expression: proto.String("price * quantity")},
+ {Name: proto.String("two"), Expression: proto.String("min(daily_use, 10) * rate")},
+ },
+ },
+ },
+ }
+
+ for _, tt := range testCases {
+ c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, _ *pb.SearchResponse) error {
+ params := req.Params
+ if !reflect.DeepEqual(params.FieldSpec, tt.want) {
+ t.Errorf("%s: params.FieldSpec=%v; want %v", tt.desc, params.FieldSpec, tt.want)
+ }
+ return errFoo // Always return some error to prevent response parsing.
+ })
+
+ it := index.Search(c, "gopher", tt.opts)
+ if _, err := it.Next(nil); err != errFoo {
+ t.Fatalf("%s: got error %v; want %v", tt.desc, err, errFoo)
+ }
+ }
+}
+
+func TestBasicSearchOpts(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ noErr := errors.New("") // Sentinel err to return to prevent sending request.
+
+ testCases := []struct {
+ desc string
+ facetOpts []FacetSearchOption
+ cursor Cursor
+ offset int
+ countAccuracy int
+ want *pb.SearchParams
+ wantErr string
+ }{
+ {
+ desc: "No options",
+ want: &pb.SearchParams{},
+ },
+ {
+ desc: "Default auto discovery",
+ facetOpts: []FacetSearchOption{
+ AutoFacetDiscovery(0, 0),
+ },
+ want: &pb.SearchParams{
+ AutoDiscoverFacetCount: proto.Int32(10),
+ },
+ },
+ {
+ desc: "Auto discovery",
+ facetOpts: []FacetSearchOption{
+ AutoFacetDiscovery(7, 12),
+ },
+ want: &pb.SearchParams{
+ AutoDiscoverFacetCount: proto.Int32(7),
+ FacetAutoDetectParam: &pb.FacetAutoDetectParam{
+ ValueLimit: proto.Int32(12),
+ },
+ },
+ },
+ {
+ desc: "Param Depth",
+ facetOpts: []FacetSearchOption{
+ AutoFacetDiscovery(7, 12),
+ },
+ want: &pb.SearchParams{
+ AutoDiscoverFacetCount: proto.Int32(7),
+ FacetAutoDetectParam: &pb.FacetAutoDetectParam{
+ ValueLimit: proto.Int32(12),
+ },
+ },
+ },
+ {
+ desc: "Doc depth",
+ facetOpts: []FacetSearchOption{
+ FacetDocumentDepth(123),
+ },
+ want: &pb.SearchParams{
+ FacetDepth: proto.Int32(123),
+ },
+ },
+ {
+ desc: "Facet discovery",
+ facetOpts: []FacetSearchOption{
+ FacetDiscovery("colour"),
+ FacetDiscovery("size", Atom("M"), Atom("L")),
+ FacetDiscovery("price", LessThan(7), Range{7, 14}, AtLeast(14)),
+ },
+ want: &pb.SearchParams{
+ IncludeFacet: []*pb.FacetRequest{
+ {Name: proto.String("colour")},
+ {Name: proto.String("size"), Params: &pb.FacetRequestParam{
+ ValueConstraint: []string{"M", "L"},
+ }},
+ {Name: proto.String("price"), Params: &pb.FacetRequestParam{
+ Range: []*pb.FacetRange{
+ {End: proto.String("7e+00")},
+ {Start: proto.String("7e+00"), End: proto.String("1.4e+01")},
+ {Start: proto.String("1.4e+01")},
+ },
+ }},
+ },
+ },
+ },
+ {
+ desc: "Facet discovery - bad value",
+ facetOpts: []FacetSearchOption{
+ FacetDiscovery("colour", true),
+ },
+ wantErr: "bad FacetSearchOption: unsupported value type bool",
+ },
+ {
+ desc: "Facet discovery - mix value types",
+ facetOpts: []FacetSearchOption{
+ FacetDiscovery("colour", Atom("blue"), AtLeast(7)),
+ },
+ wantErr: "bad FacetSearchOption: values must all be Atom, or must all be Range",
+ },
+ {
+ desc: "Facet discovery - invalid range",
+ facetOpts: []FacetSearchOption{
+ FacetDiscovery("colour", Range{negInf, posInf}),
+ },
+ wantErr: "bad FacetSearchOption: invalid range: either Start or End must be finite",
+ },
+ {
+ desc: "Cursor",
+ cursor: Cursor("mycursor"),
+ want: &pb.SearchParams{
+ Cursor: proto.String("mycursor"),
+ },
+ },
+ {
+ desc: "Offset",
+ offset: 121,
+ want: &pb.SearchParams{
+ Offset: proto.Int32(121),
+ },
+ },
+ {
+ desc: "Cursor and Offset set",
+ cursor: Cursor("mycursor"),
+ offset: 121,
+ wantErr: "at most one of Cursor and Offset may be specified",
+ },
+ {
+ desc: "Count accuracy",
+ countAccuracy: 100,
+ want: &pb.SearchParams{
+ MatchedCountAccuracy: proto.Int32(100),
+ },
+ },
+ }
+
+ for _, tt := range testCases {
+ c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, _ *pb.SearchResponse) error {
+ if tt.want == nil {
+ t.Errorf("%s: expected call to fail", tt.desc)
+ return nil
+ }
+ // Set default fields.
+ tt.want.Query = proto.String("gopher")
+ tt.want.IndexSpec = &pb.IndexSpec{Name: proto.String("Doc")}
+ tt.want.CursorType = pb.SearchParams_PER_RESULT.Enum()
+ tt.want.FieldSpec = &pb.FieldSpec{}
+ if got := req.Params; !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("%s: params=%v; want %v", tt.desc, got, tt.want)
+ }
+ return noErr // Always return some error to prevent response parsing.
+ })
+
+ it := index.Search(c, "gopher", &SearchOptions{
+ Facets: tt.facetOpts,
+ Cursor: tt.cursor,
+ Offset: tt.offset,
+ CountAccuracy: tt.countAccuracy,
+ })
+ _, err := it.Next(nil)
+ if err == nil {
+ t.Fatalf("%s: err==nil; should not happen", tt.desc)
+ }
+ if err.Error() != tt.wantErr {
+ t.Errorf("%s: got error %q, want %q", tt.desc, err, tt.wantErr)
+ }
+ }
+}
+
+func TestFacetRefinements(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ noErr := errors.New("") // Sentinel err to return to prevent sending request.
+
+ testCases := []struct {
+ desc string
+ refine []Facet
+ want []*pb.FacetRefinement
+ wantErr string
+ }{
+ {
+ desc: "No refinements",
+ },
+ {
+ desc: "Basic",
+ refine: []Facet{
+ {Name: "fur", Value: Atom("fluffy")},
+ {Name: "age", Value: LessThan(123)},
+ {Name: "age", Value: AtLeast(0)},
+ {Name: "legs", Value: Range{Start: 3, End: 5}},
+ },
+ want: []*pb.FacetRefinement{
+ {Name: proto.String("fur"), Value: proto.String("fluffy")},
+ {Name: proto.String("age"), Range: &pb.FacetRefinement_Range{End: proto.String("1.23e+02")}},
+ {Name: proto.String("age"), Range: &pb.FacetRefinement_Range{Start: proto.String("0e+00")}},
+ {Name: proto.String("legs"), Range: &pb.FacetRefinement_Range{Start: proto.String("3e+00"), End: proto.String("5e+00")}},
+ },
+ },
+ {
+ desc: "Infinite range",
+ refine: []Facet{
+ {Name: "age", Value: Range{Start: negInf, End: posInf}},
+ },
+ wantErr: `search: refinement for facet "age": either Start or End must be finite`,
+ },
+ {
+ desc: "Bad End value in range",
+ refine: []Facet{
+ {Name: "age", Value: LessThan(2147483648)},
+ },
+ wantErr: `search: refinement for facet "age": invalid value for End`,
+ },
+ {
+ desc: "Bad Start value in range",
+ refine: []Facet{
+ {Name: "age", Value: AtLeast(-2147483649)},
+ },
+ wantErr: `search: refinement for facet "age": invalid value for Start`,
+ },
+ {
+ desc: "Unknown value type",
+ refine: []Facet{
+ {Name: "age", Value: "you can't use strings!"},
+ },
+ wantErr: `search: unsupported refinement for facet "age" of type string`,
+ },
+ }
+
+ for _, tt := range testCases {
+ c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, _ *pb.SearchResponse) error {
+ if got := req.Params.FacetRefinement; !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("%s: params.FacetRefinement=%v; want %v", tt.desc, got, tt.want)
+ }
+ return noErr // Always return some error to prevent response parsing.
+ })
+
+ it := index.Search(c, "gopher", &SearchOptions{Refinements: tt.refine})
+ _, err := it.Next(nil)
+ if err == nil {
+ t.Fatalf("%s: err==nil; should not happen", tt.desc)
+ }
+ if err.Error() != tt.wantErr {
+ t.Errorf("%s: got error %q, want %q", tt.desc, err, tt.wantErr)
+ }
+ }
+}
+
+func TestNamespaceResetting(t *testing.T) {
+ namec := make(chan *string, 1)
+ c0 := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(req *pb.IndexDocumentRequest, res *pb.IndexDocumentResponse) error {
+ namec <- req.Params.IndexSpec.Namespace
+ return fmt.Errorf("RPC error")
+ })
+
+ // Check that wrapping c0 in a namespace twice works correctly.
+ c1, err := appengine.Namespace(c0, "A")
+ if err != nil {
+ t.Fatalf("appengine.Namespace: %v", err)
+ }
+ c2, err := appengine.Namespace(c1, "") // should act as the original context
+ if err != nil {
+ t.Fatalf("appengine.Namespace: %v", err)
+ }
+
+ i := (&Index{})
+
+ i.Put(c0, "something", &searchDoc)
+ if ns := <-namec; ns != nil {
+ t.Errorf(`Put with c0: ns = %q, want nil`, *ns)
+ }
+
+ i.Put(c1, "something", &searchDoc)
+ if ns := <-namec; ns == nil {
+ t.Error(`Put with c1: ns = nil, want "A"`)
+ } else if *ns != "A" {
+ t.Errorf(`Put with c1: ns = %q, want "A"`, *ns)
+ }
+
+ i.Put(c2, "something", &searchDoc)
+ if ns := <-namec; ns != nil {
+ t.Errorf(`Put with c2: ns = %q, want nil`, *ns)
+ }
+}
+
+func TestDelete(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "DeleteDocument", func(in *pb.DeleteDocumentRequest, out *pb.DeleteDocumentResponse) error {
+ expectedIn := &pb.DeleteDocumentRequest{
+ Params: &pb.DeleteDocumentParams{
+ DocId: []string{"id"},
+ IndexSpec: &pb.IndexSpec{Name: proto.String("Doc")},
+ },
+ }
+ if !proto.Equal(in, expectedIn) {
+ return fmt.Errorf("unsupported argument:\ngot %v\nwant %v", in, expectedIn)
+ }
+ *out = pb.DeleteDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {Code: pb.SearchServiceError_OK.Enum()},
+ },
+ }
+ return nil
+ })
+
+ if err := index.Delete(c, "id"); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestDeleteMulti(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "DeleteDocument", func(in *pb.DeleteDocumentRequest, out *pb.DeleteDocumentResponse) error {
+ expectedIn := &pb.DeleteDocumentRequest{
+ Params: &pb.DeleteDocumentParams{
+ DocId: []string{"id1", "id2"},
+ IndexSpec: &pb.IndexSpec{Name: proto.String("Doc")},
+ },
+ }
+ if !proto.Equal(in, expectedIn) {
+ return fmt.Errorf("unsupported argument:\ngot %v\nwant %v", in, expectedIn)
+ }
+ *out = pb.DeleteDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {Code: pb.SearchServiceError_OK.Enum()},
+ {Code: pb.SearchServiceError_OK.Enum()},
+ },
+ }
+ return nil
+ })
+
+ if err := index.DeleteMulti(c, []string{"id1", "id2"}); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestDeleteWrongNumberOfResults(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "DeleteDocument", func(in *pb.DeleteDocumentRequest, out *pb.DeleteDocumentResponse) error {
+ expectedIn := &pb.DeleteDocumentRequest{
+ Params: &pb.DeleteDocumentParams{
+ DocId: []string{"id1", "id2"},
+ IndexSpec: &pb.IndexSpec{Name: proto.String("Doc")},
+ },
+ }
+ if !proto.Equal(in, expectedIn) {
+ return fmt.Errorf("unsupported argument:\ngot %v\nwant %v", in, expectedIn)
+ }
+ *out = pb.DeleteDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {Code: pb.SearchServiceError_OK.Enum()},
+ },
+ }
+ return nil
+ })
+
+ if err := index.DeleteMulti(c, []string{"id1", "id2"}); err == nil {
+ t.Fatalf("got nil, want error")
+ }
+}
+
+func TestDeleteMultiError(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "DeleteDocument", func(in *pb.DeleteDocumentRequest, out *pb.DeleteDocumentResponse) error {
+ expectedIn := &pb.DeleteDocumentRequest{
+ Params: &pb.DeleteDocumentParams{
+ DocId: []string{"id1", "id2"},
+ IndexSpec: &pb.IndexSpec{Name: proto.String("Doc")},
+ },
+ }
+ if !proto.Equal(in, expectedIn) {
+ return fmt.Errorf("unsupported argument:\ngot %v\nwant %v", in, expectedIn)
+ }
+ *out = pb.DeleteDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {Code: pb.SearchServiceError_OK.Enum()},
+ {Code: pb.SearchServiceError_PERMISSION_DENIED.Enum(), ErrorDetail: proto.String("foo")},
+ },
+ }
+ return nil
+ })
+
+ switch err := index.DeleteMulti(c, []string{"id1", "id2"}); {
+ case err == nil:
+ t.Fatalf("got nil, want error")
+ case err.(appengine.MultiError)[0] != nil:
+ t.Fatalf("got %v, want nil MultiError[0]", err.(appengine.MultiError)[0])
+ case err.(appengine.MultiError)[1] == nil:
+ t.Fatalf("got nil, want not-nill MultiError[1]")
+ }
+}
diff --git a/vendor/google.golang.org/appengine/search/struct.go b/vendor/google.golang.org/appengine/search/struct.go
new file mode 100644
index 000000000..e73d2f2ef
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/struct.go
@@ -0,0 +1,251 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package search
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// than the one it was stored from, or when a field is missing or unexported in
+// the destination struct.
+type ErrFieldMismatch struct {
+ FieldName string
+ Reason string
+}
+
+func (e *ErrFieldMismatch) Error() string {
+ return fmt.Sprintf("search: cannot load field %q: %s", e.FieldName, e.Reason)
+}
+
+// ErrFacetMismatch is returned when a facet is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct. StructType is the type of the struct
+// pointed to by the destination argument passed to Iterator.Next.
+type ErrFacetMismatch struct {
+ StructType reflect.Type
+ FacetName string
+ Reason string
+}
+
+func (e *ErrFacetMismatch) Error() string {
+ return fmt.Sprintf("search: cannot load facet %q into a %q: %s", e.FacetName, e.StructType, e.Reason)
+}
+
+// structCodec defines how to convert a given struct to/from a search document.
+type structCodec struct {
+ // byIndex returns the struct tag for the i'th struct field.
+ byIndex []structTag
+
+ // fieldByName returns the index of the struct field for the given field name.
+ fieldByName map[string]int
+
+ // facetByName returns the index of the struct field for the given facet name,
+ facetByName map[string]int
+}
+
+// structTag holds a structured version of each struct field's parsed tag.
+type structTag struct {
+ name string
+ facet bool
+ ignore bool
+}
+
+var (
+ codecsMu sync.RWMutex
+ codecs = map[reflect.Type]*structCodec{}
+)
+
+func loadCodec(t reflect.Type) (*structCodec, error) {
+ codecsMu.RLock()
+ codec, ok := codecs[t]
+ codecsMu.RUnlock()
+ if ok {
+ return codec, nil
+ }
+
+ codecsMu.Lock()
+ defer codecsMu.Unlock()
+ if codec, ok := codecs[t]; ok {
+ return codec, nil
+ }
+
+ codec = &structCodec{
+ fieldByName: make(map[string]int),
+ facetByName: make(map[string]int),
+ }
+
+ for i, I := 0, t.NumField(); i < I; i++ {
+ f := t.Field(i)
+ name, opts := f.Tag.Get("search"), ""
+ if i := strings.Index(name, ","); i != -1 {
+ name, opts = name[:i], name[i+1:]
+ }
+ ignore := false
+ if name == "-" {
+ ignore = true
+ } else if name == "" {
+ name = f.Name
+ } else if !validFieldName(name) {
+ return nil, fmt.Errorf("search: struct tag has invalid field name: %q", name)
+ }
+ facet := opts == "facet"
+ codec.byIndex = append(codec.byIndex, structTag{name: name, facet: facet, ignore: ignore})
+ if facet {
+ codec.facetByName[name] = i
+ } else {
+ codec.fieldByName[name] = i
+ }
+ }
+
+ codecs[t] = codec
+ return codec, nil
+}
+
+// structFLS adapts a struct to be a FieldLoadSaver.
+type structFLS struct {
+ v reflect.Value
+ codec *structCodec
+}
+
+func (s structFLS) Load(fields []Field, meta *DocumentMetadata) error {
+ var err error
+ for _, field := range fields {
+ i, ok := s.codec.fieldByName[field.Name]
+ if !ok {
+ // Note the error, but keep going.
+ err = &ErrFieldMismatch{
+ FieldName: field.Name,
+ Reason: "no such struct field",
+ }
+ continue
+
+ }
+ f := s.v.Field(i)
+ if !f.CanSet() {
+ // Note the error, but keep going.
+ err = &ErrFieldMismatch{
+ FieldName: field.Name,
+ Reason: "cannot set struct field",
+ }
+ continue
+ }
+ v := reflect.ValueOf(field.Value)
+ if ft, vt := f.Type(), v.Type(); ft != vt {
+ err = &ErrFieldMismatch{
+ FieldName: field.Name,
+ Reason: fmt.Sprintf("type mismatch: %v for %v data", ft, vt),
+ }
+ continue
+ }
+ f.Set(v)
+ }
+ if meta == nil {
+ return err
+ }
+ for _, facet := range meta.Facets {
+ i, ok := s.codec.facetByName[facet.Name]
+ if !ok {
+ // Note the error, but keep going.
+ if err == nil {
+ err = &ErrFacetMismatch{
+ StructType: s.v.Type(),
+ FacetName: facet.Name,
+ Reason: "no matching field found",
+ }
+ }
+ continue
+ }
+ f := s.v.Field(i)
+ if !f.CanSet() {
+ // Note the error, but keep going.
+ if err == nil {
+ err = &ErrFacetMismatch{
+ StructType: s.v.Type(),
+ FacetName: facet.Name,
+ Reason: "unable to set unexported field of struct",
+ }
+ }
+ continue
+ }
+ v := reflect.ValueOf(facet.Value)
+ if ft, vt := f.Type(), v.Type(); ft != vt {
+ if err == nil {
+ err = &ErrFacetMismatch{
+ StructType: s.v.Type(),
+ FacetName: facet.Name,
+ Reason: fmt.Sprintf("type mismatch: %v for %d data", ft, vt),
+ }
+ continue
+ }
+ }
+ f.Set(v)
+ }
+ return err
+}
+
+func (s structFLS) Save() ([]Field, *DocumentMetadata, error) {
+ fields := make([]Field, 0, len(s.codec.fieldByName))
+ var facets []Facet
+ for i, tag := range s.codec.byIndex {
+ if tag.ignore {
+ continue
+ }
+ f := s.v.Field(i)
+ if !f.CanSet() {
+ continue
+ }
+ if tag.facet {
+ facets = append(facets, Facet{Name: tag.name, Value: f.Interface()})
+ } else {
+ fields = append(fields, Field{Name: tag.name, Value: f.Interface()})
+ }
+ }
+ return fields, &DocumentMetadata{Facets: facets}, nil
+}
+
+// newStructFLS returns a FieldLoadSaver for the struct pointer p.
+func newStructFLS(p interface{}) (FieldLoadSaver, error) {
+ v := reflect.ValueOf(p)
+ if v.Kind() != reflect.Ptr || v.IsNil() || v.Elem().Kind() != reflect.Struct {
+ return nil, ErrInvalidDocumentType
+ }
+ codec, err := loadCodec(v.Elem().Type())
+ if err != nil {
+ return nil, err
+ }
+ return structFLS{v.Elem(), codec}, nil
+}
+
+func loadStructWithMeta(dst interface{}, f []Field, meta *DocumentMetadata) error {
+ x, err := newStructFLS(dst)
+ if err != nil {
+ return err
+ }
+ return x.Load(f, meta)
+}
+
+func saveStructWithMeta(src interface{}) ([]Field, *DocumentMetadata, error) {
+ x, err := newStructFLS(src)
+ if err != nil {
+ return nil, nil, err
+ }
+ return x.Save()
+}
+
+// LoadStruct loads the fields from f to dst. dst must be a struct pointer.
+func LoadStruct(dst interface{}, f []Field) error {
+ return loadStructWithMeta(dst, f, nil)
+}
+
+// SaveStruct returns the fields from src as a slice of Field.
+// src must be a struct pointer.
+func SaveStruct(src interface{}) ([]Field, error) {
+ f, _, err := saveStructWithMeta(src)
+ return f, err
+}
diff --git a/vendor/google.golang.org/appengine/search/struct_test.go b/vendor/google.golang.org/appengine/search/struct_test.go
new file mode 100644
index 000000000..4e5b5d1b8
--- /dev/null
+++ b/vendor/google.golang.org/appengine/search/struct_test.go
@@ -0,0 +1,213 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package search
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestLoadingStruct(t *testing.T) {
+ testCases := []struct {
+ desc string
+ fields []Field
+ meta *DocumentMetadata
+ want interface{}
+ wantErr bool
+ }{
+ {
+ desc: "Basic struct",
+ fields: []Field{
+ {Name: "Name", Value: "Gopher"},
+ {Name: "Legs", Value: float64(4)},
+ },
+ want: &struct {
+ Name string
+ Legs float64
+ }{"Gopher", 4},
+ },
+ {
+ desc: "Struct with tags",
+ fields: []Field{
+ {Name: "Name", Value: "Gopher"},
+ {Name: "about", Value: "Likes slide rules."},
+ },
+ meta: &DocumentMetadata{Facets: []Facet{
+ {Name: "Legs", Value: float64(4)},
+ {Name: "Fur", Value: Atom("furry")},
+ }},
+ want: &struct {
+ Name string
+ Info string `search:"about"`
+ Legs float64 `search:",facet"`
+ Fuzz Atom `search:"Fur,facet"`
+ }{"Gopher", "Likes slide rules.", 4, Atom("furry")},
+ },
+ {
+ desc: "Bad field from tag",
+ want: &struct {
+ AlphaBeta string `search:"αβ"`
+ }{},
+ wantErr: true,
+ },
+ {
+ desc: "Ignore missing field",
+ fields: []Field{
+ {Name: "Meaning", Value: float64(42)},
+ },
+ want: &struct{}{},
+ wantErr: true,
+ },
+ {
+ desc: "Ignore unsettable field",
+ fields: []Field{
+ {Name: "meaning", Value: float64(42)},
+ },
+ want: &struct{ meaning float64 }{}, // field not populated.
+ wantErr: true,
+ },
+ {
+ desc: "Error on missing facet",
+ meta: &DocumentMetadata{Facets: []Facet{
+ {Name: "Set", Value: Atom("yes")},
+ {Name: "Missing", Value: Atom("no")},
+ }},
+ want: &struct {
+ Set Atom `search:",facet"`
+ }{Atom("yes")},
+ wantErr: true,
+ },
+ {
+ desc: "Error on unsettable facet",
+ meta: &DocumentMetadata{Facets: []Facet{
+ {Name: "Set", Value: Atom("yes")},
+ {Name: "unset", Value: Atom("no")},
+ }},
+ want: &struct {
+ Set Atom `search:",facet"`
+ }{Atom("yes")},
+ wantErr: true,
+ },
+ {
+ desc: "Error setting ignored field",
+ fields: []Field{
+ {Name: "Set", Value: "yes"},
+ {Name: "Ignored", Value: "no"},
+ },
+ want: &struct {
+ Set string
+ Ignored string `search:"-"`
+ }{Set: "yes"},
+ wantErr: true,
+ },
+ {
+ desc: "Error setting ignored facet",
+ meta: &DocumentMetadata{Facets: []Facet{
+ {Name: "Set", Value: Atom("yes")},
+ {Name: "Ignored", Value: Atom("no")},
+ }},
+ want: &struct {
+ Set Atom `search:",facet"`
+ Ignored Atom `search:"-,facet"`
+ }{Set: Atom("yes")},
+ wantErr: true,
+ },
+ }
+
+ for _, tt := range testCases {
+ // Make a pointer to an empty version of what want points to.
+ dst := reflect.New(reflect.TypeOf(tt.want).Elem()).Interface()
+ err := loadStructWithMeta(dst, tt.fields, tt.meta)
+ if err != nil != tt.wantErr {
+ t.Errorf("%s: got err %v; want err %t", tt.desc, err, tt.wantErr)
+ continue
+ }
+ if !reflect.DeepEqual(dst, tt.want) {
+ t.Errorf("%s: doesn't match\ngot: %v\nwant: %v", tt.desc, dst, tt.want)
+ }
+ }
+}
+
+func TestSavingStruct(t *testing.T) {
+ testCases := []struct {
+ desc string
+ doc interface{}
+ wantFields []Field
+ wantFacets []Facet
+ }{
+ {
+ desc: "Basic struct",
+ doc: &struct {
+ Name string
+ Legs float64
+ }{"Gopher", 4},
+ wantFields: []Field{
+ {Name: "Name", Value: "Gopher"},
+ {Name: "Legs", Value: float64(4)},
+ },
+ },
+ {
+ desc: "Struct with tags",
+ doc: &struct {
+ Name string
+ Info string `search:"about"`
+ Legs float64 `search:",facet"`
+ Fuzz Atom `search:"Fur,facet"`
+ }{"Gopher", "Likes slide rules.", 4, Atom("furry")},
+ wantFields: []Field{
+ {Name: "Name", Value: "Gopher"},
+ {Name: "about", Value: "Likes slide rules."},
+ },
+ wantFacets: []Facet{
+ {Name: "Legs", Value: float64(4)},
+ {Name: "Fur", Value: Atom("furry")},
+ },
+ },
+ {
+ desc: "Ignore unexported struct fields",
+ doc: &struct {
+ Name string
+ info string
+ Legs float64 `search:",facet"`
+ fuzz Atom `search:",facet"`
+ }{"Gopher", "Likes slide rules.", 4, Atom("furry")},
+ wantFields: []Field{
+ {Name: "Name", Value: "Gopher"},
+ },
+ wantFacets: []Facet{
+ {Name: "Legs", Value: float64(4)},
+ },
+ },
+ {
+ desc: "Ignore fields marked -",
+ doc: &struct {
+ Name string
+ Info string `search:"-"`
+ Legs float64 `search:",facet"`
+ Fuzz Atom `search:"-,facet"`
+ }{"Gopher", "Likes slide rules.", 4, Atom("furry")},
+ wantFields: []Field{
+ {Name: "Name", Value: "Gopher"},
+ },
+ wantFacets: []Facet{
+ {Name: "Legs", Value: float64(4)},
+ },
+ },
+ }
+
+ for _, tt := range testCases {
+ fields, meta, err := saveStructWithMeta(tt.doc)
+ if err != nil {
+ t.Errorf("%s: got err %v; want nil", tt.desc, err)
+ continue
+ }
+ if !reflect.DeepEqual(fields, tt.wantFields) {
+ t.Errorf("%s: fields don't match\ngot: %v\nwant: %v", tt.desc, fields, tt.wantFields)
+ }
+ if facets := meta.Facets; !reflect.DeepEqual(facets, tt.wantFacets) {
+ t.Errorf("%s: facets don't match\ngot: %v\nwant: %v", tt.desc, facets, tt.wantFacets)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/socket/doc.go b/vendor/google.golang.org/appengine/socket/doc.go
new file mode 100644
index 000000000..3de46df82
--- /dev/null
+++ b/vendor/google.golang.org/appengine/socket/doc.go
@@ -0,0 +1,10 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package socket provides outbound network sockets.
+//
+// This package is only required in the classic App Engine environment.
+// Applications running only in App Engine "flexible environment" should
+// use the standard library's net package.
+package socket
diff --git a/vendor/google.golang.org/appengine/socket/socket_classic.go b/vendor/google.golang.org/appengine/socket/socket_classic.go
new file mode 100644
index 000000000..0ad50e2d3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/socket/socket_classic.go
@@ -0,0 +1,290 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package socket
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "strconv"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+ "google.golang.org/appengine/internal"
+
+ pb "google.golang.org/appengine/internal/socket"
+)
+
+// Dial connects to the address addr on the network protocol.
+// The address format is host:port, where host may be a hostname or an IP address.
+// Known protocols are "tcp" and "udp".
+// The returned connection satisfies net.Conn, and is valid while ctx is valid;
+// if the connection is to be used after ctx becomes invalid, invoke SetContext
+// with the new context.
+func Dial(ctx context.Context, protocol, addr string) (*Conn, error) {
+ return DialTimeout(ctx, protocol, addr, 0)
+}
+
+var ipFamilies = []pb.CreateSocketRequest_SocketFamily{
+ pb.CreateSocketRequest_IPv4,
+ pb.CreateSocketRequest_IPv6,
+}
+
+// DialTimeout is like Dial but takes a timeout.
+// The timeout includes name resolution, if required.
+func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) {
+ dialCtx := ctx // Used for dialing and name resolution, but not stored in the *Conn.
+ if timeout > 0 {
+ var cancel context.CancelFunc
+ dialCtx, cancel = context.WithTimeout(ctx, timeout)
+ defer cancel()
+ }
+
+ host, portStr, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+ port, err := strconv.Atoi(portStr)
+ if err != nil {
+ return nil, fmt.Errorf("socket: bad port %q: %v", portStr, err)
+ }
+
+ var prot pb.CreateSocketRequest_SocketProtocol
+ switch protocol {
+ case "tcp":
+ prot = pb.CreateSocketRequest_TCP
+ case "udp":
+ prot = pb.CreateSocketRequest_UDP
+ default:
+ return nil, fmt.Errorf("socket: unknown protocol %q", protocol)
+ }
+
+ packedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host)
+ if err != nil {
+ return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err)
+ }
+ if len(packedAddrs) == 0 {
+ return nil, fmt.Errorf("no addresses for %q", host)
+ }
+
+ packedAddr := packedAddrs[0] // use first address
+ fam := pb.CreateSocketRequest_IPv4
+ if len(packedAddr) == net.IPv6len {
+ fam = pb.CreateSocketRequest_IPv6
+ }
+
+ req := &pb.CreateSocketRequest{
+ Family: fam.Enum(),
+ Protocol: prot.Enum(),
+ RemoteIp: &pb.AddressPort{
+ Port: proto.Int32(int32(port)),
+ PackedAddress: packedAddr,
+ },
+ }
+ if resolved {
+ req.RemoteIp.HostnameHint = &host
+ }
+ res := &pb.CreateSocketReply{}
+ if err := internal.Call(dialCtx, "remote_socket", "CreateSocket", req, res); err != nil {
+ return nil, err
+ }
+
+ return &Conn{
+ ctx: ctx,
+ desc: res.GetSocketDescriptor(),
+ prot: prot,
+ local: res.ProxyExternalIp,
+ remote: req.RemoteIp,
+ }, nil
+}
+
+// LookupIP returns the given host's IP addresses.
+func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) {
+ packedAddrs, _, err := resolve(ctx, ipFamilies, host)
+ if err != nil {
+ return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err)
+ }
+ addrs = make([]net.IP, len(packedAddrs))
+ for i, pa := range packedAddrs {
+ addrs[i] = net.IP(pa)
+ }
+ return addrs, nil
+}
+
+func resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) {
+ // Check if it's an IP address.
+ if ip := net.ParseIP(host); ip != nil {
+ if ip := ip.To4(); ip != nil {
+ return [][]byte{ip}, false, nil
+ }
+ return [][]byte{ip}, false, nil
+ }
+
+ req := &pb.ResolveRequest{
+ Name: &host,
+ AddressFamilies: fams,
+ }
+ res := &pb.ResolveReply{}
+ if err := internal.Call(ctx, "remote_socket", "Resolve", req, res); err != nil {
+ // XXX: need to map to pb.ResolveReply_ErrorCode?
+ return nil, false, err
+ }
+ return res.PackedAddress, true, nil
+}
+
+// withDeadline is like context.WithDeadline, except it ignores the zero deadline.
+func withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) {
+ if deadline.IsZero() {
+ return parent, func() {}
+ }
+ return context.WithDeadline(parent, deadline)
+}
+
+// Conn represents a socket connection.
+// It implements net.Conn.
+type Conn struct {
+ ctx context.Context
+ desc string
+ offset int64
+
+ prot pb.CreateSocketRequest_SocketProtocol
+ local, remote *pb.AddressPort
+
+ readDeadline, writeDeadline time.Time // optional
+}
+
+// SetContext sets the context that is used by this Conn.
+// It is usually used only when using a Conn that was created in a different context,
+// such as when a connection is created during a warmup request but used while
+// servicing a user request.
+func (cn *Conn) SetContext(ctx context.Context) {
+ cn.ctx = ctx
+}
+
+func (cn *Conn) Read(b []byte) (n int, err error) {
+ const maxRead = 1 << 20
+ if len(b) > maxRead {
+ b = b[:maxRead]
+ }
+
+ req := &pb.ReceiveRequest{
+ SocketDescriptor: &cn.desc,
+ DataSize: proto.Int32(int32(len(b))),
+ }
+ res := &pb.ReceiveReply{}
+ if !cn.readDeadline.IsZero() {
+ req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds())
+ }
+ ctx, cancel := withDeadline(cn.ctx, cn.readDeadline)
+ defer cancel()
+ if err := internal.Call(ctx, "remote_socket", "Receive", req, res); err != nil {
+ return 0, err
+ }
+ if len(res.Data) == 0 {
+ return 0, io.EOF
+ }
+ if len(res.Data) > len(b) {
+ return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b))
+ }
+ return copy(b, res.Data), nil
+}
+
+func (cn *Conn) Write(b []byte) (n int, err error) {
+ const lim = 1 << 20 // max per chunk
+
+ for n < len(b) {
+ chunk := b[n:]
+ if len(chunk) > lim {
+ chunk = chunk[:lim]
+ }
+
+ req := &pb.SendRequest{
+ SocketDescriptor: &cn.desc,
+ Data: chunk,
+ StreamOffset: &cn.offset,
+ }
+ res := &pb.SendReply{}
+ if !cn.writeDeadline.IsZero() {
+ req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds())
+ }
+ ctx, cancel := withDeadline(cn.ctx, cn.writeDeadline)
+ defer cancel()
+ if err = internal.Call(ctx, "remote_socket", "Send", req, res); err != nil {
+ // assume zero bytes were sent in this RPC
+ break
+ }
+ n += int(res.GetDataSent())
+ cn.offset += int64(res.GetDataSent())
+ }
+
+ return
+}
+
+func (cn *Conn) Close() error {
+ req := &pb.CloseRequest{
+ SocketDescriptor: &cn.desc,
+ }
+ res := &pb.CloseReply{}
+ if err := internal.Call(cn.ctx, "remote_socket", "Close", req, res); err != nil {
+ return err
+ }
+ cn.desc = "CLOSED"
+ return nil
+}
+
+func addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr {
+ if ap == nil {
+ return nil
+ }
+ switch prot {
+ case pb.CreateSocketRequest_TCP:
+ return &net.TCPAddr{
+ IP: net.IP(ap.PackedAddress),
+ Port: int(*ap.Port),
+ }
+ case pb.CreateSocketRequest_UDP:
+ return &net.UDPAddr{
+ IP: net.IP(ap.PackedAddress),
+ Port: int(*ap.Port),
+ }
+ }
+ panic("unknown protocol " + prot.String())
+}
+
+func (cn *Conn) LocalAddr() net.Addr { return addr(cn.prot, cn.local) }
+func (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) }
+
+func (cn *Conn) SetDeadline(t time.Time) error {
+ cn.readDeadline = t
+ cn.writeDeadline = t
+ return nil
+}
+
+func (cn *Conn) SetReadDeadline(t time.Time) error {
+ cn.readDeadline = t
+ return nil
+}
+
+func (cn *Conn) SetWriteDeadline(t time.Time) error {
+ cn.writeDeadline = t
+ return nil
+}
+
+// KeepAlive signals that the connection is still in use.
+// It may be called to prevent the socket being closed due to inactivity.
+func (cn *Conn) KeepAlive() error {
+ req := &pb.GetSocketNameRequest{
+ SocketDescriptor: &cn.desc,
+ }
+ res := &pb.GetSocketNameReply{}
+ return internal.Call(cn.ctx, "remote_socket", "GetSocketName", req, res)
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("remote_socket", pb.RemoteSocketServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/socket/socket_vm.go b/vendor/google.golang.org/appengine/socket/socket_vm.go
new file mode 100644
index 000000000..c804169a1
--- /dev/null
+++ b/vendor/google.golang.org/appengine/socket/socket_vm.go
@@ -0,0 +1,64 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package socket
+
+import (
+ "net"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+// Dial connects to the address addr on the network protocol.
+// The address format is host:port, where host may be a hostname or an IP address.
+// Known protocols are "tcp" and "udp".
+// The returned connection satisfies net.Conn, and is valid while ctx is valid;
+// if the connection is to be used after ctx becomes invalid, invoke SetContext
+// with the new context.
+func Dial(ctx context.Context, protocol, addr string) (*Conn, error) {
+ conn, err := net.Dial(protocol, addr)
+ if err != nil {
+ return nil, err
+ }
+ return &Conn{conn}, nil
+}
+
+// DialTimeout is like Dial but takes a timeout.
+// The timeout includes name resolution, if required.
+func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) {
+ conn, err := net.DialTimeout(protocol, addr, timeout)
+ if err != nil {
+ return nil, err
+ }
+ return &Conn{conn}, nil
+}
+
+// LookupIP returns the given host's IP addresses.
+func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) {
+ return net.LookupIP(host)
+}
+
+// Conn represents a socket connection.
+// It implements net.Conn.
+type Conn struct {
+ net.Conn
+}
+
+// SetContext sets the context that is used by this Conn.
+// It is usually used only when using a Conn that was created in a different context,
+// such as when a connection is created during a warmup request but used while
+// servicing a user request.
+func (cn *Conn) SetContext(ctx context.Context) {
+ // This function is not required in App Engine "flexible environment".
+}
+
+// KeepAlive signals that the connection is still in use.
+// It may be called to prevent the socket being closed due to inactivity.
+func (cn *Conn) KeepAlive() error {
+ // This function is not required in App Engine "flexible environment".
+ return nil
+}
diff --git a/vendor/google.golang.org/appengine/taskqueue/taskqueue.go b/vendor/google.golang.org/appengine/taskqueue/taskqueue.go
new file mode 100644
index 000000000..965c5ab4c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/taskqueue/taskqueue.go
@@ -0,0 +1,541 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package taskqueue provides a client for App Engine's taskqueue service.
+Using this service, applications may perform work outside a user's request.
+
+A Task may be constructed manually; alternatively, since the most common
+taskqueue operation is to add a single POST task, NewPOSTTask makes it easy.
+
+ t := taskqueue.NewPOSTTask("/worker", url.Values{
+ "key": {key},
+ })
+ taskqueue.Add(c, t, "") // add t to the default queue
+*/
+package taskqueue // import "google.golang.org/appengine/taskqueue"
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strconv"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ dspb "google.golang.org/appengine/internal/datastore"
+ pb "google.golang.org/appengine/internal/taskqueue"
+)
+
+var (
+ // ErrTaskAlreadyAdded is the error returned by Add and AddMulti when a task has already been added with a particular name.
+ ErrTaskAlreadyAdded = errors.New("taskqueue: task has already been added")
+)
+
+// RetryOptions let you control whether to retry a task and the backoff intervals between tries.
+type RetryOptions struct {
+ // Number of tries/leases after which the task fails permanently and is deleted.
+ // If AgeLimit is also set, both limits must be exceeded for the task to fail permanently.
+ RetryLimit int32
+
+ // Maximum time allowed since the task's first try before the task fails permanently and is deleted (only for push tasks).
+ // If RetryLimit is also set, both limits must be exceeded for the task to fail permanently.
+ AgeLimit time.Duration
+
+ // Minimum time between successive tries (only for push tasks).
+ MinBackoff time.Duration
+
+ // Maximum time between successive tries (only for push tasks).
+ MaxBackoff time.Duration
+
+ // Maximum number of times to double the interval between successive tries before the intervals increase linearly (only for push tasks).
+ MaxDoublings int32
+
+ // If MaxDoublings is zero, set ApplyZeroMaxDoublings to true to override the default non-zero value.
+ // Otherwise a zero MaxDoublings is ignored and the default is used.
+ ApplyZeroMaxDoublings bool
+}
+
+// toRetryParameter converts RetryOptions to pb.TaskQueueRetryParameters.
+func (opt *RetryOptions) toRetryParameters() *pb.TaskQueueRetryParameters {
+ params := &pb.TaskQueueRetryParameters{}
+ if opt.RetryLimit > 0 {
+ params.RetryLimit = proto.Int32(opt.RetryLimit)
+ }
+ if opt.AgeLimit > 0 {
+ params.AgeLimitSec = proto.Int64(int64(opt.AgeLimit.Seconds()))
+ }
+ if opt.MinBackoff > 0 {
+ params.MinBackoffSec = proto.Float64(opt.MinBackoff.Seconds())
+ }
+ if opt.MaxBackoff > 0 {
+ params.MaxBackoffSec = proto.Float64(opt.MaxBackoff.Seconds())
+ }
+ if opt.MaxDoublings > 0 || (opt.MaxDoublings == 0 && opt.ApplyZeroMaxDoublings) {
+ params.MaxDoublings = proto.Int32(opt.MaxDoublings)
+ }
+ return params
+}
+
+// A Task represents a task to be executed.
+type Task struct {
+ // Path is the worker URL for the task.
+ // If unset, it will default to /_ah/queue/<queue_name>.
+ Path string
+
+ // Payload is the data for the task.
+ // This will be delivered as the HTTP request body.
+ // It is only used when Method is POST, PUT or PULL.
+ // url.Values' Encode method may be used to generate this for POST requests.
+ Payload []byte
+
+ // Additional HTTP headers to pass at the task's execution time.
+ // To schedule the task to be run with an alternate app version
+ // or backend, set the "Host" header.
+ Header http.Header
+
+ // Method is the HTTP method for the task ("GET", "POST", etc.),
+ // or "PULL" if this is task is destined for a pull-based queue.
+ // If empty, this defaults to "POST".
+ Method string
+
+ // A name for the task.
+ // If empty, a name will be chosen.
+ Name string
+
+ // Delay specifies the duration the task queue service must wait
+ // before executing the task.
+ // Either Delay or ETA may be set, but not both.
+ Delay time.Duration
+
+ // ETA specifies the earliest time a task may be executed (push queues)
+ // or leased (pull queues).
+ // Either Delay or ETA may be set, but not both.
+ ETA time.Time
+
+ // The number of times the task has been dispatched or leased.
+ RetryCount int32
+
+ // Tag for the task. Only used when Method is PULL.
+ Tag string
+
+ // Retry options for this task. May be nil.
+ RetryOptions *RetryOptions
+}
+
+func (t *Task) method() string {
+ if t.Method == "" {
+ return "POST"
+ }
+ return t.Method
+}
+
+// NewPOSTTask creates a Task that will POST to a path with the given form data.
+func NewPOSTTask(path string, params url.Values) *Task {
+ h := make(http.Header)
+ h.Set("Content-Type", "application/x-www-form-urlencoded")
+ return &Task{
+ Path: path,
+ Payload: []byte(params.Encode()),
+ Header: h,
+ Method: "POST",
+ }
+}
+
+// RequestHeaders are the special HTTP request headers available to push task
+// HTTP request handlers. These headers are set internally by App Engine.
+// See https://cloud.google.com/appengine/docs/standard/go/taskqueue/push/creating-handlers#reading_request_headers
+// for a description of the fields.
+type RequestHeaders struct {
+ QueueName string
+ TaskName string
+ TaskRetryCount int64
+ TaskExecutionCount int64
+ TaskETA time.Time
+
+ TaskPreviousResponse int
+ TaskRetryReason string
+ FailFast bool
+}
+
+// ParseRequestHeaders parses the special HTTP request headers available to push
+// task request handlers. This function silently ignores values of the wrong
+// format.
+func ParseRequestHeaders(h http.Header) *RequestHeaders {
+ ret := &RequestHeaders{
+ QueueName: h.Get("X-AppEngine-QueueName"),
+ TaskName: h.Get("X-AppEngine-TaskName"),
+ }
+
+ ret.TaskRetryCount, _ = strconv.ParseInt(h.Get("X-AppEngine-TaskRetryCount"), 10, 64)
+ ret.TaskExecutionCount, _ = strconv.ParseInt(h.Get("X-AppEngine-TaskExecutionCount"), 10, 64)
+
+ etaSecs, _ := strconv.ParseInt(h.Get("X-AppEngine-TaskETA"), 10, 64)
+ if etaSecs != 0 {
+ ret.TaskETA = time.Unix(etaSecs, 0)
+ }
+
+ ret.TaskPreviousResponse, _ = strconv.Atoi(h.Get("X-AppEngine-TaskPreviousResponse"))
+ ret.TaskRetryReason = h.Get("X-AppEngine-TaskRetryReason")
+ if h.Get("X-AppEngine-FailFast") != "" {
+ ret.FailFast = true
+ }
+
+ return ret
+}
+
+var (
+ currentNamespace = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
+ defaultNamespace = http.CanonicalHeaderKey("X-AppEngine-Default-Namespace")
+)
+
+func getDefaultNamespace(ctx context.Context) string {
+ return internal.IncomingHeaders(ctx).Get(defaultNamespace)
+}
+
+func newAddReq(c context.Context, task *Task, queueName string) (*pb.TaskQueueAddRequest, error) {
+ if queueName == "" {
+ queueName = "default"
+ }
+ path := task.Path
+ if path == "" {
+ path = "/_ah/queue/" + queueName
+ }
+ eta := task.ETA
+ if eta.IsZero() {
+ eta = time.Now().Add(task.Delay)
+ } else if task.Delay != 0 {
+ panic("taskqueue: both Delay and ETA are set")
+ }
+ req := &pb.TaskQueueAddRequest{
+ QueueName: []byte(queueName),
+ TaskName: []byte(task.Name),
+ EtaUsec: proto.Int64(eta.UnixNano() / 1e3),
+ }
+ method := task.method()
+ if method == "PULL" {
+ // Pull-based task
+ req.Body = task.Payload
+ req.Mode = pb.TaskQueueMode_PULL.Enum()
+ if task.Tag != "" {
+ req.Tag = []byte(task.Tag)
+ }
+ } else {
+ // HTTP-based task
+ if v, ok := pb.TaskQueueAddRequest_RequestMethod_value[method]; ok {
+ req.Method = pb.TaskQueueAddRequest_RequestMethod(v).Enum()
+ } else {
+ return nil, fmt.Errorf("taskqueue: bad method %q", method)
+ }
+ req.Url = []byte(path)
+ for k, vs := range task.Header {
+ for _, v := range vs {
+ req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{
+ Key: []byte(k),
+ Value: []byte(v),
+ })
+ }
+ }
+ if method == "POST" || method == "PUT" {
+ req.Body = task.Payload
+ }
+
+ // Namespace headers.
+ if _, ok := task.Header[currentNamespace]; !ok {
+ // Fetch the current namespace of this request.
+ ns := internal.NamespaceFromContext(c)
+ req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{
+ Key: []byte(currentNamespace),
+ Value: []byte(ns),
+ })
+ }
+ if _, ok := task.Header[defaultNamespace]; !ok {
+ // Fetch the X-AppEngine-Default-Namespace header of this request.
+ if ns := getDefaultNamespace(c); ns != "" {
+ req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{
+ Key: []byte(defaultNamespace),
+ Value: []byte(ns),
+ })
+ }
+ }
+ }
+
+ if task.RetryOptions != nil {
+ req.RetryParameters = task.RetryOptions.toRetryParameters()
+ }
+
+ return req, nil
+}
+
+var alreadyAddedErrors = map[pb.TaskQueueServiceError_ErrorCode]bool{
+ pb.TaskQueueServiceError_TASK_ALREADY_EXISTS: true,
+ pb.TaskQueueServiceError_TOMBSTONED_TASK: true,
+}
+
+// Add adds the task to a named queue.
+// An empty queue name means that the default queue will be used.
+// Add returns an equivalent Task with defaults filled in, including setting
+// the task's Name field to the chosen name if the original was empty.
+func Add(c context.Context, task *Task, queueName string) (*Task, error) {
+ req, err := newAddReq(c, task, queueName)
+ if err != nil {
+ return nil, err
+ }
+ res := &pb.TaskQueueAddResponse{}
+ if err := internal.Call(c, "taskqueue", "Add", req, res); err != nil {
+ apiErr, ok := err.(*internal.APIError)
+ if ok && alreadyAddedErrors[pb.TaskQueueServiceError_ErrorCode(apiErr.Code)] {
+ return nil, ErrTaskAlreadyAdded
+ }
+ return nil, err
+ }
+ resultTask := *task
+ resultTask.Method = task.method()
+ if task.Name == "" {
+ resultTask.Name = string(res.ChosenTaskName)
+ }
+ return &resultTask, nil
+}
+
+// AddMulti adds multiple tasks to a named queue.
+// An empty queue name means that the default queue will be used.
+// AddMulti returns a slice of equivalent tasks with defaults filled in, including setting
+// each task's Name field to the chosen name if the original was empty.
+// If a given task is badly formed or could not be added, an appengine.MultiError is returned.
+func AddMulti(c context.Context, tasks []*Task, queueName string) ([]*Task, error) {
+ req := &pb.TaskQueueBulkAddRequest{
+ AddRequest: make([]*pb.TaskQueueAddRequest, len(tasks)),
+ }
+ me, any := make(appengine.MultiError, len(tasks)), false
+ for i, t := range tasks {
+ req.AddRequest[i], me[i] = newAddReq(c, t, queueName)
+ any = any || me[i] != nil
+ }
+ if any {
+ return nil, me
+ }
+ res := &pb.TaskQueueBulkAddResponse{}
+ if err := internal.Call(c, "taskqueue", "BulkAdd", req, res); err != nil {
+ return nil, err
+ }
+ if len(res.Taskresult) != len(tasks) {
+ return nil, errors.New("taskqueue: server error")
+ }
+ tasksOut := make([]*Task, len(tasks))
+ for i, tr := range res.Taskresult {
+ tasksOut[i] = new(Task)
+ *tasksOut[i] = *tasks[i]
+ tasksOut[i].Method = tasksOut[i].method()
+ if tasksOut[i].Name == "" {
+ tasksOut[i].Name = string(tr.ChosenTaskName)
+ }
+ if *tr.Result != pb.TaskQueueServiceError_OK {
+ if alreadyAddedErrors[*tr.Result] {
+ me[i] = ErrTaskAlreadyAdded
+ } else {
+ me[i] = &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(*tr.Result),
+ }
+ }
+ any = true
+ }
+ }
+ if any {
+ return tasksOut, me
+ }
+ return tasksOut, nil
+}
+
+// Delete deletes a task from a named queue.
+func Delete(c context.Context, task *Task, queueName string) error {
+ err := DeleteMulti(c, []*Task{task}, queueName)
+ if me, ok := err.(appengine.MultiError); ok {
+ return me[0]
+ }
+ return err
+}
+
+// DeleteMulti deletes multiple tasks from a named queue.
+// If a given task could not be deleted, an appengine.MultiError is returned.
+// Each task is deleted independently; one may fail to delete while the others
+// are sucessfully deleted.
+func DeleteMulti(c context.Context, tasks []*Task, queueName string) error {
+ taskNames := make([][]byte, len(tasks))
+ for i, t := range tasks {
+ taskNames[i] = []byte(t.Name)
+ }
+ if queueName == "" {
+ queueName = "default"
+ }
+ req := &pb.TaskQueueDeleteRequest{
+ QueueName: []byte(queueName),
+ TaskName: taskNames,
+ }
+ res := &pb.TaskQueueDeleteResponse{}
+ if err := internal.Call(c, "taskqueue", "Delete", req, res); err != nil {
+ return err
+ }
+ if a, b := len(req.TaskName), len(res.Result); a != b {
+ return fmt.Errorf("taskqueue: internal error: requested deletion of %d tasks, got %d results", a, b)
+ }
+ me, any := make(appengine.MultiError, len(res.Result)), false
+ for i, ec := range res.Result {
+ if ec != pb.TaskQueueServiceError_OK {
+ me[i] = &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(ec),
+ }
+ any = true
+ }
+ }
+ if any {
+ return me
+ }
+ return nil
+}
+
+func lease(c context.Context, maxTasks int, queueName string, leaseTime int, groupByTag bool, tag []byte) ([]*Task, error) {
+ if queueName == "" {
+ queueName = "default"
+ }
+ req := &pb.TaskQueueQueryAndOwnTasksRequest{
+ QueueName: []byte(queueName),
+ LeaseSeconds: proto.Float64(float64(leaseTime)),
+ MaxTasks: proto.Int64(int64(maxTasks)),
+ GroupByTag: proto.Bool(groupByTag),
+ Tag: tag,
+ }
+ res := &pb.TaskQueueQueryAndOwnTasksResponse{}
+ if err := internal.Call(c, "taskqueue", "QueryAndOwnTasks", req, res); err != nil {
+ return nil, err
+ }
+ tasks := make([]*Task, len(res.Task))
+ for i, t := range res.Task {
+ tasks[i] = &Task{
+ Payload: t.Body,
+ Name: string(t.TaskName),
+ Method: "PULL",
+ ETA: time.Unix(0, *t.EtaUsec*1e3),
+ RetryCount: *t.RetryCount,
+ Tag: string(t.Tag),
+ }
+ }
+ return tasks, nil
+}
+
+// Lease leases tasks from a queue.
+// leaseTime is in seconds.
+// The number of tasks fetched will be at most maxTasks.
+func Lease(c context.Context, maxTasks int, queueName string, leaseTime int) ([]*Task, error) {
+ return lease(c, maxTasks, queueName, leaseTime, false, nil)
+}
+
+// LeaseByTag leases tasks from a queue, grouped by tag.
+// If tag is empty, then the returned tasks are grouped by the tag of the task with earliest ETA.
+// leaseTime is in seconds.
+// The number of tasks fetched will be at most maxTasks.
+func LeaseByTag(c context.Context, maxTasks int, queueName string, leaseTime int, tag string) ([]*Task, error) {
+ return lease(c, maxTasks, queueName, leaseTime, true, []byte(tag))
+}
+
+// Purge removes all tasks from a queue.
+func Purge(c context.Context, queueName string) error {
+ if queueName == "" {
+ queueName = "default"
+ }
+ req := &pb.TaskQueuePurgeQueueRequest{
+ QueueName: []byte(queueName),
+ }
+ res := &pb.TaskQueuePurgeQueueResponse{}
+ return internal.Call(c, "taskqueue", "PurgeQueue", req, res)
+}
+
+// ModifyLease modifies the lease of a task.
+// Used to request more processing time, or to abandon processing.
+// leaseTime is in seconds and must not be negative.
+func ModifyLease(c context.Context, task *Task, queueName string, leaseTime int) error {
+ if queueName == "" {
+ queueName = "default"
+ }
+ req := &pb.TaskQueueModifyTaskLeaseRequest{
+ QueueName: []byte(queueName),
+ TaskName: []byte(task.Name),
+ EtaUsec: proto.Int64(task.ETA.UnixNano() / 1e3), // Used to verify ownership.
+ LeaseSeconds: proto.Float64(float64(leaseTime)),
+ }
+ res := &pb.TaskQueueModifyTaskLeaseResponse{}
+ if err := internal.Call(c, "taskqueue", "ModifyTaskLease", req, res); err != nil {
+ return err
+ }
+ task.ETA = time.Unix(0, *res.UpdatedEtaUsec*1e3)
+ return nil
+}
+
+// QueueStatistics represents statistics about a single task queue.
+type QueueStatistics struct {
+ Tasks int // may be an approximation
+ OldestETA time.Time // zero if there are no pending tasks
+
+ Executed1Minute int // tasks executed in the last minute
+ InFlight int // tasks executing now
+ EnforcedRate float64 // requests per second
+}
+
+// QueueStats retrieves statistics about queues.
+func QueueStats(c context.Context, queueNames []string) ([]QueueStatistics, error) {
+ req := &pb.TaskQueueFetchQueueStatsRequest{
+ QueueName: make([][]byte, len(queueNames)),
+ }
+ for i, q := range queueNames {
+ if q == "" {
+ q = "default"
+ }
+ req.QueueName[i] = []byte(q)
+ }
+ res := &pb.TaskQueueFetchQueueStatsResponse{}
+ if err := internal.Call(c, "taskqueue", "FetchQueueStats", req, res); err != nil {
+ return nil, err
+ }
+ qs := make([]QueueStatistics, len(res.Queuestats))
+ for i, qsg := range res.Queuestats {
+ qs[i] = QueueStatistics{
+ Tasks: int(*qsg.NumTasks),
+ }
+ if eta := *qsg.OldestEtaUsec; eta > -1 {
+ qs[i].OldestETA = time.Unix(0, eta*1e3)
+ }
+ if si := qsg.ScannerInfo; si != nil {
+ qs[i].Executed1Minute = int(*si.ExecutedLastMinute)
+ qs[i].InFlight = int(si.GetRequestsInFlight())
+ qs[i].EnforcedRate = si.GetEnforcedRate()
+ }
+ }
+ return qs, nil
+}
+
+func setTransaction(x *pb.TaskQueueAddRequest, t *dspb.Transaction) {
+ x.Transaction = t
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("taskqueue", pb.TaskQueueServiceError_ErrorCode_name)
+
+ // Datastore error codes are shifted by DATASTORE_ERROR when presented through taskqueue.
+ dsCode := int32(pb.TaskQueueServiceError_DATASTORE_ERROR) + int32(dspb.Error_TIMEOUT)
+ internal.RegisterTimeoutErrorCode("taskqueue", dsCode)
+
+ // Transaction registration.
+ internal.RegisterTransactionSetter(setTransaction)
+ internal.RegisterTransactionSetter(func(x *pb.TaskQueueBulkAddRequest, t *dspb.Transaction) {
+ for _, req := range x.AddRequest {
+ setTransaction(req, t)
+ }
+ })
+}
diff --git a/vendor/google.golang.org/appengine/taskqueue/taskqueue_test.go b/vendor/google.golang.org/appengine/taskqueue/taskqueue_test.go
new file mode 100644
index 000000000..d9eec50b7
--- /dev/null
+++ b/vendor/google.golang.org/appengine/taskqueue/taskqueue_test.go
@@ -0,0 +1,173 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package taskqueue
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "reflect"
+ "testing"
+ "time"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/taskqueue"
+)
+
+func TestAddErrors(t *testing.T) {
+ var tests = []struct {
+ err, want error
+ sameErr bool // if true, should return err exactly
+ }{
+ {
+ err: &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(pb.TaskQueueServiceError_TASK_ALREADY_EXISTS),
+ },
+ want: ErrTaskAlreadyAdded,
+ },
+ {
+ err: &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(pb.TaskQueueServiceError_TOMBSTONED_TASK),
+ },
+ want: ErrTaskAlreadyAdded,
+ },
+ {
+ err: &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(pb.TaskQueueServiceError_UNKNOWN_QUEUE),
+ },
+ want: errors.New("not used"),
+ sameErr: true,
+ },
+ }
+ for _, tc := range tests {
+ c := aetesting.FakeSingleContext(t, "taskqueue", "Add", func(req *pb.TaskQueueAddRequest, res *pb.TaskQueueAddResponse) error {
+ // don't fill in any of the response
+ return tc.err
+ })
+ task := &Task{Path: "/worker", Method: "PULL"}
+ _, err := Add(c, task, "a-queue")
+ want := tc.want
+ if tc.sameErr {
+ want = tc.err
+ }
+ if err != want {
+ t.Errorf("Add with tc.err = %v, got %#v, want = %#v", tc.err, err, want)
+ }
+ }
+}
+
+func TestAddMulti(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "taskqueue", "BulkAdd", func(req *pb.TaskQueueBulkAddRequest, res *pb.TaskQueueBulkAddResponse) error {
+ res.Taskresult = []*pb.TaskQueueBulkAddResponse_TaskResult{
+ {
+ Result: pb.TaskQueueServiceError_OK.Enum(),
+ },
+ {
+ Result: pb.TaskQueueServiceError_TASK_ALREADY_EXISTS.Enum(),
+ },
+ {
+ Result: pb.TaskQueueServiceError_TOMBSTONED_TASK.Enum(),
+ },
+ {
+ Result: pb.TaskQueueServiceError_INTERNAL_ERROR.Enum(),
+ },
+ }
+ return nil
+ })
+ tasks := []*Task{
+ {Path: "/worker", Method: "PULL"},
+ {Path: "/worker", Method: "PULL"},
+ {Path: "/worker", Method: "PULL"},
+ {Path: "/worker", Method: "PULL"},
+ }
+ r, err := AddMulti(c, tasks, "a-queue")
+ if len(r) != len(tasks) {
+ t.Fatalf("AddMulti returned %d tasks, want %d", len(r), len(tasks))
+ }
+ want := appengine.MultiError{
+ nil,
+ ErrTaskAlreadyAdded,
+ ErrTaskAlreadyAdded,
+ &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(pb.TaskQueueServiceError_INTERNAL_ERROR),
+ },
+ }
+ if !reflect.DeepEqual(err, want) {
+ t.Errorf("AddMulti got %v, wanted %v", err, want)
+ }
+}
+
+func TestAddWithEmptyPath(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "taskqueue", "Add", func(req *pb.TaskQueueAddRequest, res *pb.TaskQueueAddResponse) error {
+ if got, want := string(req.Url), "/_ah/queue/a-queue"; got != want {
+ return fmt.Errorf("req.Url = %q; want %q", got, want)
+ }
+ return nil
+ })
+ if _, err := Add(c, &Task{}, "a-queue"); err != nil {
+ t.Fatalf("Add: %v", err)
+ }
+}
+
+func TestParseRequestHeaders(t *testing.T) {
+ tests := []struct {
+ Header http.Header
+ Want RequestHeaders
+ }{
+ {
+ Header: map[string][]string{
+ "X-Appengine-Queuename": []string{"foo"},
+ "X-Appengine-Taskname": []string{"bar"},
+ "X-Appengine-Taskretrycount": []string{"4294967297"}, // 2^32 + 1
+ "X-Appengine-Taskexecutioncount": []string{"4294967298"}, // 2^32 + 2
+ "X-Appengine-Tasketa": []string{"1500000000"},
+ "X-Appengine-Taskpreviousresponse": []string{"404"},
+ "X-Appengine-Taskretryreason": []string{"baz"},
+ "X-Appengine-Failfast": []string{"yes"},
+ },
+ Want: RequestHeaders{
+ QueueName: "foo",
+ TaskName: "bar",
+ TaskRetryCount: 4294967297,
+ TaskExecutionCount: 4294967298,
+ TaskETA: time.Date(2017, time.July, 14, 2, 40, 0, 0, time.UTC),
+ TaskPreviousResponse: 404,
+ TaskRetryReason: "baz",
+ FailFast: true,
+ },
+ },
+ {
+ Header: map[string][]string{},
+ Want: RequestHeaders{
+ QueueName: "",
+ TaskName: "",
+ TaskRetryCount: 0,
+ TaskExecutionCount: 0,
+ TaskETA: time.Time{},
+ TaskPreviousResponse: 0,
+ TaskRetryReason: "",
+ FailFast: false,
+ },
+ },
+ }
+
+ for idx, test := range tests {
+ got := *ParseRequestHeaders(test.Header)
+ if got.TaskETA.UnixNano() != test.Want.TaskETA.UnixNano() {
+ t.Errorf("%d. ParseRequestHeaders got TaskETA %v, wanted %v", idx, got.TaskETA, test.Want.TaskETA)
+ }
+ got.TaskETA = time.Time{}
+ test.Want.TaskETA = time.Time{}
+ if !reflect.DeepEqual(got, test.Want) {
+ t.Errorf("%d. ParseRequestHeaders got %v, wanted %v", idx, got, test.Want)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go
new file mode 100644
index 000000000..05642a992
--- /dev/null
+++ b/vendor/google.golang.org/appengine/timeout.go
@@ -0,0 +1,20 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import "golang.org/x/net/context"
+
+// IsTimeoutError reports whether err is a timeout error.
+func IsTimeoutError(err error) bool {
+ if err == context.DeadlineExceeded {
+ return true
+ }
+ if t, ok := err.(interface {
+ IsTimeout() bool
+ }); ok {
+ return t.IsTimeout()
+ }
+ return false
+}
diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
new file mode 100644
index 000000000..6ffe1e6d9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
@@ -0,0 +1,210 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package urlfetch provides an http.RoundTripper implementation
+// for fetching URLs via App Engine's urlfetch service.
+package urlfetch // import "google.golang.org/appengine/urlfetch"
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/urlfetch"
+)
+
+// Transport is an implementation of http.RoundTripper for
+// App Engine. Users should generally create an http.Client using
+// this transport and use the Client rather than using this transport
+// directly.
+type Transport struct {
+ Context context.Context
+
+ // Controls whether the application checks the validity of SSL certificates
+ // over HTTPS connections. A value of false (the default) instructs the
+ // application to send a request to the server only if the certificate is
+ // valid and signed by a trusted certificate authority (CA), and also
+ // includes a hostname that matches the certificate. A value of true
+ // instructs the application to perform no certificate validation.
+ AllowInvalidServerCertificate bool
+}
+
+// Verify statically that *Transport implements http.RoundTripper.
+var _ http.RoundTripper = (*Transport)(nil)
+
+// Client returns an *http.Client using a default urlfetch Transport. This
+// client will have the default deadline of 5 seconds, and will check the
+// validity of SSL certificates.
+//
+// Any deadline of the provided context will be used for requests through this client;
+// if the client does not have a deadline then a 5 second default is used.
+func Client(ctx context.Context) *http.Client {
+ return &http.Client{
+ Transport: &Transport{
+ Context: ctx,
+ },
+ }
+}
+
+type bodyReader struct {
+ content []byte
+ truncated bool
+ closed bool
+}
+
+// ErrTruncatedBody is the error returned after the final Read() from a
+// response's Body if the body has been truncated by App Engine's proxy.
+var ErrTruncatedBody = errors.New("urlfetch: truncated body")
+
+func statusCodeToText(code int) string {
+ if t := http.StatusText(code); t != "" {
+ return t
+ }
+ return strconv.Itoa(code)
+}
+
+func (br *bodyReader) Read(p []byte) (n int, err error) {
+ if br.closed {
+ if br.truncated {
+ return 0, ErrTruncatedBody
+ }
+ return 0, io.EOF
+ }
+ n = copy(p, br.content)
+ if n > 0 {
+ br.content = br.content[n:]
+ return
+ }
+ if br.truncated {
+ br.closed = true
+ return 0, ErrTruncatedBody
+ }
+ return 0, io.EOF
+}
+
+func (br *bodyReader) Close() error {
+ br.closed = true
+ br.content = nil
+ return nil
+}
+
+// A map of the URL Fetch-accepted methods that take a request body.
+var methodAcceptsRequestBody = map[string]bool{
+ "POST": true,
+ "PUT": true,
+ "PATCH": true,
+}
+
+// urlString returns a valid string given a URL. This function is necessary because
+// the String method of URL doesn't correctly handle URLs with non-empty Opaque values.
+// See http://code.google.com/p/go/issues/detail?id=4860.
+func urlString(u *url.URL) string {
+ if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") {
+ return u.String()
+ }
+ aux := *u
+ aux.Opaque = "//" + aux.Host + aux.Opaque
+ return aux.String()
+}
+
+// RoundTrip issues a single HTTP request and returns its response. Per the
+// http.RoundTripper interface, RoundTrip only returns an error if there
+// was an unsupported request or the URL Fetch proxy fails.
+// Note that HTTP response codes such as 5xx, 403, 404, etc are not
+// errors as far as the transport is concerned and will be returned
+// with err set to nil.
+func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) {
+ methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method]
+ if !ok {
+ return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method)
+ }
+
+ method := pb.URLFetchRequest_RequestMethod(methNum)
+
+ freq := &pb.URLFetchRequest{
+ Method: &method,
+ Url: proto.String(urlString(req.URL)),
+ FollowRedirects: proto.Bool(false), // http.Client's responsibility
+ MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate),
+ }
+ if deadline, ok := t.Context.Deadline(); ok {
+ freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds())
+ }
+
+ for k, vals := range req.Header {
+ for _, val := range vals {
+ freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{
+ Key: proto.String(k),
+ Value: proto.String(val),
+ })
+ }
+ }
+ if methodAcceptsRequestBody[req.Method] && req.Body != nil {
+ // Avoid a []byte copy if req.Body has a Bytes method.
+ switch b := req.Body.(type) {
+ case interface {
+ Bytes() []byte
+ }:
+ freq.Payload = b.Bytes()
+ default:
+ freq.Payload, err = ioutil.ReadAll(req.Body)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ fres := &pb.URLFetchResponse{}
+ if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil {
+ return nil, err
+ }
+
+ res = &http.Response{}
+ res.StatusCode = int(*fres.StatusCode)
+ res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode))
+ res.Header = make(http.Header)
+ res.Request = req
+
+ // Faked:
+ res.ProtoMajor = 1
+ res.ProtoMinor = 1
+ res.Proto = "HTTP/1.1"
+ res.Close = true
+
+ for _, h := range fres.Header {
+ hkey := http.CanonicalHeaderKey(*h.Key)
+ hval := *h.Value
+ if hkey == "Content-Length" {
+ // Will get filled in below for all but HEAD requests.
+ if req.Method == "HEAD" {
+ res.ContentLength, _ = strconv.ParseInt(hval, 10, 64)
+ }
+ continue
+ }
+ res.Header.Add(hkey, hval)
+ }
+
+ if req.Method != "HEAD" {
+ res.ContentLength = int64(len(fres.Content))
+ }
+
+ truncated := fres.GetContentWasTruncated()
+ res.Body = &bodyReader{content: fres.Content, truncated: truncated}
+ return
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name)
+ internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED))
+}
diff --git a/vendor/google.golang.org/appengine/user/oauth.go b/vendor/google.golang.org/appengine/user/oauth.go
new file mode 100644
index 000000000..ffad57182
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/oauth.go
@@ -0,0 +1,52 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package user
+
+import (
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/user"
+)
+
+// CurrentOAuth returns the user associated with the OAuth consumer making this
+// request. If the OAuth consumer did not make a valid OAuth request, or the
+// scopes is non-empty and the current user does not have at least one of the
+// scopes, this method will return an error.
+func CurrentOAuth(c context.Context, scopes ...string) (*User, error) {
+ req := &pb.GetOAuthUserRequest{}
+ if len(scopes) != 1 || scopes[0] != "" {
+ // The signature for this function used to be CurrentOAuth(Context, string).
+ // Ignore the singular "" scope to preserve existing behavior.
+ req.Scopes = scopes
+ }
+
+ res := &pb.GetOAuthUserResponse{}
+
+ err := internal.Call(c, "user", "GetOAuthUser", req, res)
+ if err != nil {
+ return nil, err
+ }
+ return &User{
+ Email: *res.Email,
+ AuthDomain: *res.AuthDomain,
+ Admin: res.GetIsAdmin(),
+ ID: *res.UserId,
+ ClientID: res.GetClientId(),
+ }, nil
+}
+
+// OAuthConsumerKey returns the OAuth consumer key provided with the current
+// request. This method will return an error if the OAuth request was invalid.
+func OAuthConsumerKey(c context.Context) (string, error) {
+ req := &pb.CheckOAuthSignatureRequest{}
+ res := &pb.CheckOAuthSignatureResponse{}
+
+ err := internal.Call(c, "user", "CheckOAuthSignature", req, res)
+ if err != nil {
+ return "", err
+ }
+ return *res.OauthConsumerKey, err
+}
diff --git a/vendor/google.golang.org/appengine/user/user.go b/vendor/google.golang.org/appengine/user/user.go
new file mode 100644
index 000000000..eb76f59b7
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/user.go
@@ -0,0 +1,84 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package user provides a client for App Engine's user authentication service.
+package user // import "google.golang.org/appengine/user"
+
+import (
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/user"
+)
+
+// User represents a user of the application.
+type User struct {
+ Email string
+ AuthDomain string
+ Admin bool
+
+ // ID is the unique permanent ID of the user.
+ // It is populated if the Email is associated
+ // with a Google account, or empty otherwise.
+ ID string
+
+ // ClientID is the ID of the pre-registered client so its identity can be verified.
+ // See https://developers.google.com/console/help/#generatingoauth2 for more information.
+ ClientID string
+
+ FederatedIdentity string
+ FederatedProvider string
+}
+
+// String returns a displayable name for the user.
+func (u *User) String() string {
+ if u.AuthDomain != "" && strings.HasSuffix(u.Email, "@"+u.AuthDomain) {
+ return u.Email[:len(u.Email)-len("@"+u.AuthDomain)]
+ }
+ if u.FederatedIdentity != "" {
+ return u.FederatedIdentity
+ }
+ return u.Email
+}
+
+// LoginURL returns a URL that, when visited, prompts the user to sign in,
+// then redirects the user to the URL specified by dest.
+func LoginURL(c context.Context, dest string) (string, error) {
+ return LoginURLFederated(c, dest, "")
+}
+
+// LoginURLFederated is like LoginURL but accepts a user's OpenID identifier.
+func LoginURLFederated(c context.Context, dest, identity string) (string, error) {
+ req := &pb.CreateLoginURLRequest{
+ DestinationUrl: proto.String(dest),
+ }
+ if identity != "" {
+ req.FederatedIdentity = proto.String(identity)
+ }
+ res := &pb.CreateLoginURLResponse{}
+ if err := internal.Call(c, "user", "CreateLoginURL", req, res); err != nil {
+ return "", err
+ }
+ return *res.LoginUrl, nil
+}
+
+// LogoutURL returns a URL that, when visited, signs the user out,
+// then redirects the user to the URL specified by dest.
+func LogoutURL(c context.Context, dest string) (string, error) {
+ req := &pb.CreateLogoutURLRequest{
+ DestinationUrl: proto.String(dest),
+ }
+ res := &pb.CreateLogoutURLResponse{}
+ if err := internal.Call(c, "user", "CreateLogoutURL", req, res); err != nil {
+ return "", err
+ }
+ return *res.LogoutUrl, nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("user", pb.UserServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/user/user_classic.go b/vendor/google.golang.org/appengine/user/user_classic.go
new file mode 100644
index 000000000..81315094c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/user_classic.go
@@ -0,0 +1,44 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package user
+
+import (
+ "appengine/user"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+func Current(ctx context.Context) *User {
+ c, err := internal.ClassicContextFromContext(ctx)
+ if err != nil {
+ panic(err)
+ }
+ u := user.Current(c)
+ if u == nil {
+ return nil
+ }
+ // Map appengine/user.User to this package's User type.
+ return &User{
+ Email: u.Email,
+ AuthDomain: u.AuthDomain,
+ Admin: u.Admin,
+ ID: u.ID,
+ FederatedIdentity: u.FederatedIdentity,
+ FederatedProvider: u.FederatedProvider,
+ }
+}
+
+func IsAdmin(ctx context.Context) bool {
+ c, err := internal.ClassicContextFromContext(ctx)
+ if err != nil {
+ panic(err)
+ }
+
+ return user.IsAdmin(c)
+}
diff --git a/vendor/google.golang.org/appengine/user/user_test.go b/vendor/google.golang.org/appengine/user/user_test.go
new file mode 100644
index 000000000..5fc5957a8
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/user_test.go
@@ -0,0 +1,99 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package user
+
+import (
+ "fmt"
+ "net/http"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine/internal"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/user"
+)
+
+func baseReq() *http.Request {
+ return &http.Request{
+ Header: http.Header{},
+ }
+}
+
+type basicUserTest struct {
+ nickname, email, authDomain, admin string
+ // expectations
+ isNil, isAdmin bool
+ displayName string
+}
+
+var basicUserTests = []basicUserTest{
+ {"", "", "", "0", true, false, ""},
+ {"ken", "ken@example.com", "example.com", "0", false, false, "ken"},
+ {"ken", "ken@example.com", "auth_domain.com", "1", false, true, "ken@example.com"},
+}
+
+func TestBasicUserAPI(t *testing.T) {
+ for i, tc := range basicUserTests {
+ req := baseReq()
+ req.Header.Set("X-AppEngine-User-Nickname", tc.nickname)
+ req.Header.Set("X-AppEngine-User-Email", tc.email)
+ req.Header.Set("X-AppEngine-Auth-Domain", tc.authDomain)
+ req.Header.Set("X-AppEngine-User-Is-Admin", tc.admin)
+
+ c := internal.ContextForTesting(req)
+
+ if ga := IsAdmin(c); ga != tc.isAdmin {
+ t.Errorf("test %d: expected IsAdmin(c) = %v, got %v", i, tc.isAdmin, ga)
+ }
+
+ u := Current(c)
+ if tc.isNil {
+ if u != nil {
+ t.Errorf("test %d: expected u == nil, got %+v", i, u)
+ }
+ continue
+ }
+ if u == nil {
+ t.Errorf("test %d: expected u != nil, got nil", i)
+ continue
+ }
+ if u.Email != tc.email {
+ t.Errorf("test %d: expected u.Email = %q, got %q", i, tc.email, u.Email)
+ }
+ if gs := u.String(); gs != tc.displayName {
+ t.Errorf("test %d: expected u.String() = %q, got %q", i, tc.displayName, gs)
+ }
+ if u.Admin != tc.isAdmin {
+ t.Errorf("test %d: expected u.Admin = %v, got %v", i, tc.isAdmin, u.Admin)
+ }
+ }
+}
+
+func TestLoginURL(t *testing.T) {
+ expectedQuery := &pb.CreateLoginURLRequest{
+ DestinationUrl: proto.String("/destination"),
+ }
+ const expectedDest = "/redir/dest"
+ c := aetesting.FakeSingleContext(t, "user", "CreateLoginURL", func(req *pb.CreateLoginURLRequest, res *pb.CreateLoginURLResponse) error {
+ if !proto.Equal(req, expectedQuery) {
+ return fmt.Errorf("got %v, want %v", req, expectedQuery)
+ }
+ res.LoginUrl = proto.String(expectedDest)
+ return nil
+ })
+
+ url, err := LoginURL(c, "/destination")
+ if err != nil {
+ t.Fatalf("LoginURL failed: %v", err)
+ }
+ if url != expectedDest {
+ t.Errorf("got %v, want %v", url, expectedDest)
+ }
+}
+
+// TODO(dsymonds): Add test for LogoutURL.
diff --git a/vendor/google.golang.org/appengine/user/user_vm.go b/vendor/google.golang.org/appengine/user/user_vm.go
new file mode 100644
index 000000000..8dc672e92
--- /dev/null
+++ b/vendor/google.golang.org/appengine/user/user_vm.go
@@ -0,0 +1,38 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package user
+
+import (
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// Current returns the currently logged-in user,
+// or nil if the user is not signed in.
+func Current(c context.Context) *User {
+ h := internal.IncomingHeaders(c)
+ u := &User{
+ Email: h.Get("X-AppEngine-User-Email"),
+ AuthDomain: h.Get("X-AppEngine-Auth-Domain"),
+ ID: h.Get("X-AppEngine-User-Id"),
+ Admin: h.Get("X-AppEngine-User-Is-Admin") == "1",
+ FederatedIdentity: h.Get("X-AppEngine-Federated-Identity"),
+ FederatedProvider: h.Get("X-AppEngine-Federated-Provider"),
+ }
+ if u.Email == "" && u.FederatedIdentity == "" {
+ return nil
+ }
+ return u
+}
+
+// IsAdmin returns true if the current user is signed in and
+// is currently registered as an administrator of the application.
+func IsAdmin(c context.Context) bool {
+ h := internal.IncomingHeaders(c)
+ return h.Get("X-AppEngine-User-Is-Admin") == "1"
+}
diff --git a/vendor/google.golang.org/appengine/xmpp/xmpp.go b/vendor/google.golang.org/appengine/xmpp/xmpp.go
new file mode 100644
index 000000000..3a561fd53
--- /dev/null
+++ b/vendor/google.golang.org/appengine/xmpp/xmpp.go
@@ -0,0 +1,253 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package xmpp provides the means to send and receive instant messages
+to and from users of XMPP-compatible services.
+
+To send a message,
+ m := &xmpp.Message{
+ To: []string{"kaylee@example.com"},
+ Body: `Hi! How's the carrot?`,
+ }
+ err := m.Send(c)
+
+To receive messages,
+ func init() {
+ xmpp.Handle(handleChat)
+ }
+
+ func handleChat(c context.Context, m *xmpp.Message) {
+ // ...
+ }
+*/
+package xmpp // import "google.golang.org/appengine/xmpp"
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/xmpp"
+)
+
+// Message represents an incoming chat message.
+type Message struct {
+ // Sender is the JID of the sender.
+ // Optional for outgoing messages.
+ Sender string
+
+ // To is the intended recipients of the message.
+ // Incoming messages will have exactly one element.
+ To []string
+
+ // Body is the body of the message.
+ Body string
+
+ // Type is the message type, per RFC 3921.
+ // It defaults to "chat".
+ Type string
+
+ // RawXML is whether the body contains raw XML.
+ RawXML bool
+}
+
+// Presence represents an outgoing presence update.
+type Presence struct {
+ // Sender is the JID (optional).
+ Sender string
+
+ // The intended recipient of the presence update.
+ To string
+
+ // Type, per RFC 3921 (optional). Defaults to "available".
+ Type string
+
+ // State of presence (optional).
+ // Valid values: "away", "chat", "xa", "dnd" (RFC 3921).
+ State string
+
+ // Free text status message (optional).
+ Status string
+}
+
+var (
+ ErrPresenceUnavailable = errors.New("xmpp: presence unavailable")
+ ErrInvalidJID = errors.New("xmpp: invalid JID")
+)
+
+// Handle arranges for f to be called for incoming XMPP messages.
+// Only messages of type "chat" or "normal" will be handled.
+func Handle(f func(c context.Context, m *Message)) {
+ http.HandleFunc("/_ah/xmpp/message/chat/", func(_ http.ResponseWriter, r *http.Request) {
+ f(appengine.NewContext(r), &Message{
+ Sender: r.FormValue("from"),
+ To: []string{r.FormValue("to")},
+ Body: r.FormValue("body"),
+ })
+ })
+}
+
+// Send sends a message.
+// If any failures occur with specific recipients, the error will be an appengine.MultiError.
+func (m *Message) Send(c context.Context) error {
+ req := &pb.XmppMessageRequest{
+ Jid: m.To,
+ Body: &m.Body,
+ RawXml: &m.RawXML,
+ }
+ if m.Type != "" && m.Type != "chat" {
+ req.Type = &m.Type
+ }
+ if m.Sender != "" {
+ req.FromJid = &m.Sender
+ }
+ res := &pb.XmppMessageResponse{}
+ if err := internal.Call(c, "xmpp", "SendMessage", req, res); err != nil {
+ return err
+ }
+
+ if len(res.Status) != len(req.Jid) {
+ return fmt.Errorf("xmpp: sent message to %d JIDs, but only got %d statuses back", len(req.Jid), len(res.Status))
+ }
+ me, any := make(appengine.MultiError, len(req.Jid)), false
+ for i, st := range res.Status {
+ if st != pb.XmppMessageResponse_NO_ERROR {
+ me[i] = errors.New(st.String())
+ any = true
+ }
+ }
+ if any {
+ return me
+ }
+ return nil
+}
+
+// Invite sends an invitation. If the from address is an empty string
+// the default (yourapp@appspot.com/bot) will be used.
+func Invite(c context.Context, to, from string) error {
+ req := &pb.XmppInviteRequest{
+ Jid: &to,
+ }
+ if from != "" {
+ req.FromJid = &from
+ }
+ res := &pb.XmppInviteResponse{}
+ return internal.Call(c, "xmpp", "SendInvite", req, res)
+}
+
+// Send sends a presence update.
+func (p *Presence) Send(c context.Context) error {
+ req := &pb.XmppSendPresenceRequest{
+ Jid: &p.To,
+ }
+ if p.State != "" {
+ req.Show = &p.State
+ }
+ if p.Type != "" {
+ req.Type = &p.Type
+ }
+ if p.Sender != "" {
+ req.FromJid = &p.Sender
+ }
+ if p.Status != "" {
+ req.Status = &p.Status
+ }
+ res := &pb.XmppSendPresenceResponse{}
+ return internal.Call(c, "xmpp", "SendPresence", req, res)
+}
+
+var presenceMap = map[pb.PresenceResponse_SHOW]string{
+ pb.PresenceResponse_NORMAL: "",
+ pb.PresenceResponse_AWAY: "away",
+ pb.PresenceResponse_DO_NOT_DISTURB: "dnd",
+ pb.PresenceResponse_CHAT: "chat",
+ pb.PresenceResponse_EXTENDED_AWAY: "xa",
+}
+
+// GetPresence retrieves a user's presence.
+// If the from address is an empty string the default
+// (yourapp@appspot.com/bot) will be used.
+// Possible return values are "", "away", "dnd", "chat", "xa".
+// ErrPresenceUnavailable is returned if the presence is unavailable.
+func GetPresence(c context.Context, to string, from string) (string, error) {
+ req := &pb.PresenceRequest{
+ Jid: &to,
+ }
+ if from != "" {
+ req.FromJid = &from
+ }
+ res := &pb.PresenceResponse{}
+ if err := internal.Call(c, "xmpp", "GetPresence", req, res); err != nil {
+ return "", err
+ }
+ if !*res.IsAvailable || res.Presence == nil {
+ return "", ErrPresenceUnavailable
+ }
+ presence, ok := presenceMap[*res.Presence]
+ if ok {
+ return presence, nil
+ }
+ return "", fmt.Errorf("xmpp: unknown presence %v", *res.Presence)
+}
+
+// GetPresenceMulti retrieves multiple users' presence.
+// If the from address is an empty string the default
+// (yourapp@appspot.com/bot) will be used.
+// Possible return values are "", "away", "dnd", "chat", "xa".
+// If any presence is unavailable, an appengine.MultiError is returned
+func GetPresenceMulti(c context.Context, to []string, from string) ([]string, error) {
+ req := &pb.BulkPresenceRequest{
+ Jid: to,
+ }
+ if from != "" {
+ req.FromJid = &from
+ }
+ res := &pb.BulkPresenceResponse{}
+
+ if err := internal.Call(c, "xmpp", "BulkGetPresence", req, res); err != nil {
+ return nil, err
+ }
+
+ presences := make([]string, 0, len(res.PresenceResponse))
+ errs := appengine.MultiError{}
+
+ addResult := func(presence string, err error) {
+ presences = append(presences, presence)
+ errs = append(errs, err)
+ }
+
+ anyErr := false
+ for _, subres := range res.PresenceResponse {
+ if !subres.GetValid() {
+ anyErr = true
+ addResult("", ErrInvalidJID)
+ continue
+ }
+ if !*subres.IsAvailable || subres.Presence == nil {
+ anyErr = true
+ addResult("", ErrPresenceUnavailable)
+ continue
+ }
+ presence, ok := presenceMap[*subres.Presence]
+ if ok {
+ addResult(presence, nil)
+ } else {
+ anyErr = true
+ addResult("", fmt.Errorf("xmpp: unknown presence %q", *subres.Presence))
+ }
+ }
+ if anyErr {
+ return presences, errs
+ }
+ return presences, nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("xmpp", pb.XmppServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/xmpp/xmpp_test.go b/vendor/google.golang.org/appengine/xmpp/xmpp_test.go
new file mode 100644
index 000000000..c3030d36d
--- /dev/null
+++ b/vendor/google.golang.org/appengine/xmpp/xmpp_test.go
@@ -0,0 +1,173 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package xmpp
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/xmpp"
+)
+
+func newPresenceResponse(isAvailable bool, presence pb.PresenceResponse_SHOW, valid bool) *pb.PresenceResponse {
+ return &pb.PresenceResponse{
+ IsAvailable: proto.Bool(isAvailable),
+ Presence: presence.Enum(),
+ Valid: proto.Bool(valid),
+ }
+}
+
+func setPresenceResponse(m *pb.PresenceResponse, isAvailable bool, presence pb.PresenceResponse_SHOW, valid bool) {
+ m.IsAvailable = &isAvailable
+ m.Presence = presence.Enum()
+ m.Valid = &valid
+}
+
+func TestGetPresence(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "GetPresence", func(in *pb.PresenceRequest, out *pb.PresenceResponse) error {
+ if jid := in.GetJid(); jid != "user@example.com" {
+ return fmt.Errorf("bad jid %q", jid)
+ }
+ setPresenceResponse(out, true, pb.PresenceResponse_CHAT, true)
+ return nil
+ })
+
+ presence, err := GetPresence(c, "user@example.com", "")
+ if err != nil {
+ t.Fatalf("GetPresence: %v", err)
+ }
+
+ if presence != "chat" {
+ t.Errorf("GetPresence: got %#v, want %#v", presence, pb.PresenceResponse_CHAT)
+ }
+}
+
+func TestGetPresenceMultiSingleJID(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error {
+ if !reflect.DeepEqual(in.Jid, []string{"user@example.com"}) {
+ return fmt.Errorf("bad request jids %#v", in.Jid)
+ }
+ out.PresenceResponse = []*pb.PresenceResponse{
+ newPresenceResponse(true, pb.PresenceResponse_NORMAL, true),
+ }
+ return nil
+ })
+
+ presence, err := GetPresenceMulti(c, []string{"user@example.com"}, "")
+ if err != nil {
+ t.Fatalf("GetPresenceMulti: %v", err)
+ }
+ if !reflect.DeepEqual(presence, []string{""}) {
+ t.Errorf("GetPresenceMulti: got %s, want %s", presence, []string{""})
+ }
+}
+
+func TestGetPresenceMultiJID(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error {
+ if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) {
+ return fmt.Errorf("bad request jids %#v", in.Jid)
+ }
+ out.PresenceResponse = []*pb.PresenceResponse{
+ newPresenceResponse(true, pb.PresenceResponse_NORMAL, true),
+ newPresenceResponse(true, pb.PresenceResponse_AWAY, true),
+ }
+ return nil
+ })
+
+ jids := []string{"user@example.com", "user2@example.com"}
+ presence, err := GetPresenceMulti(c, jids, "")
+ if err != nil {
+ t.Fatalf("GetPresenceMulti: %v", err)
+ }
+ want := []string{"", "away"}
+ if !reflect.DeepEqual(presence, want) {
+ t.Errorf("GetPresenceMulti: got %v, want %v", presence, want)
+ }
+}
+
+func TestGetPresenceMultiFromJID(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error {
+ if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) {
+ return fmt.Errorf("bad request jids %#v", in.Jid)
+ }
+ if jid := in.GetFromJid(); jid != "bot@appspot.com" {
+ return fmt.Errorf("bad from jid %q", jid)
+ }
+ out.PresenceResponse = []*pb.PresenceResponse{
+ newPresenceResponse(true, pb.PresenceResponse_NORMAL, true),
+ newPresenceResponse(true, pb.PresenceResponse_CHAT, true),
+ }
+ return nil
+ })
+
+ jids := []string{"user@example.com", "user2@example.com"}
+ presence, err := GetPresenceMulti(c, jids, "bot@appspot.com")
+ if err != nil {
+ t.Fatalf("GetPresenceMulti: %v", err)
+ }
+ want := []string{"", "chat"}
+ if !reflect.DeepEqual(presence, want) {
+ t.Errorf("GetPresenceMulti: got %v, want %v", presence, want)
+ }
+}
+
+func TestGetPresenceMultiInvalid(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error {
+ if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) {
+ return fmt.Errorf("bad request jids %#v", in.Jid)
+ }
+ out.PresenceResponse = []*pb.PresenceResponse{
+ newPresenceResponse(true, pb.PresenceResponse_EXTENDED_AWAY, true),
+ newPresenceResponse(true, pb.PresenceResponse_CHAT, false),
+ }
+ return nil
+ })
+
+ jids := []string{"user@example.com", "user2@example.com"}
+ presence, err := GetPresenceMulti(c, jids, "")
+
+ wantErr := appengine.MultiError{nil, ErrInvalidJID}
+ if !reflect.DeepEqual(err, wantErr) {
+ t.Fatalf("GetPresenceMulti: got %#v, want %#v", err, wantErr)
+ }
+
+ want := []string{"xa", ""}
+ if !reflect.DeepEqual(presence, want) {
+ t.Errorf("GetPresenceMulti: got %#v, want %#v", presence, want)
+ }
+}
+
+func TestGetPresenceMultiUnavailable(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error {
+ if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) {
+ return fmt.Errorf("bad request jids %#v", in.Jid)
+ }
+ out.PresenceResponse = []*pb.PresenceResponse{
+ newPresenceResponse(false, pb.PresenceResponse_AWAY, true),
+ newPresenceResponse(false, pb.PresenceResponse_DO_NOT_DISTURB, true),
+ }
+ return nil
+ })
+
+ jids := []string{"user@example.com", "user2@example.com"}
+ presence, err := GetPresenceMulti(c, jids, "")
+
+ wantErr := appengine.MultiError{
+ ErrPresenceUnavailable,
+ ErrPresenceUnavailable,
+ }
+ if !reflect.DeepEqual(err, wantErr) {
+ t.Fatalf("GetPresenceMulti: got %#v, want %#v", err, wantErr)
+ }
+ want := []string{"", ""}
+ if !reflect.DeepEqual(presence, want) {
+ t.Errorf("GetPresenceMulti: got %#v, want %#v", presence, want)
+ }
+}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/.gitignore b/vendor/gopkg.in/olivere/elastic.v5/.gitignore
index 47140b99b..306ffbd83 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/.gitignore
+++ b/vendor/gopkg.in/olivere/elastic.v5/.gitignore
@@ -22,6 +22,7 @@ _testmain.go
*.exe
/.vscode/
+/debug.test
/generator
/cluster-test/cluster-test
/cluster-test/*.log
diff --git a/vendor/gopkg.in/olivere/elastic.v5/.travis.yml b/vendor/gopkg.in/olivere/elastic.v5/.travis.yml
index 6f718f66a..b4322c13c 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/.travis.yml
+++ b/vendor/gopkg.in/olivere/elastic.v5/.travis.yml
@@ -12,5 +12,4 @@ services:
- docker
before_install:
- sudo sysctl -w vm.max_map_count=262144
- - docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -e "bootstrap.memory_lock=true" -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" docker.elastic.co/elasticsearch/elasticsearch:5.6.3 elasticsearch -Expack.security.enabled=false -Escript.inline=true -Escript.stored=true -Escript.file=true -Enetwork.host=_local_,_site_ -Enetwork.publish_host=_local_ >& /dev/null &
- - sleep 30
+ - docker run -d --rm -p 9200:9200 -e "http.host=0.0.0.0" -e "transport.host=127.0.0.1" -e "bootstrap.memory_lock=true" -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" docker.elastic.co/elasticsearch/elasticsearch:6.1.2 elasticsearch -Expack.security.enabled=false -Enetwork.host=_local_,_site_ -Enetwork.publish_host=_local_
diff --git a/vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-6.0.md b/vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-6.0.md
new file mode 100644
index 000000000..277925929
--- /dev/null
+++ b/vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-6.0.md
@@ -0,0 +1,18 @@
+# Changes from 5.0 to 6.0
+
+See [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-6.0.html).
+
+## _all removed
+
+6.0 has removed support for the `_all` field.
+
+## Boolean values coerced
+
+Only use `true` or `false` for boolean values, not `0` or `1` or `on` or `off`.
+
+## Single Type Indices
+
+Notice that 6.0 and future versions will default to single type indices, i.e. you may not use multiple types when e.g. adding an index with a mapping.
+
+See [here for details](https://www.elastic.co/guide/en/elasticsearch/reference/6.x/removal-of-types.html#_what_are_mapping_types).
+
diff --git a/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS b/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS
index e3ded87cd..d7f7f780f 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS
+++ b/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS
@@ -12,6 +12,7 @@ Adam Weiner [@adamweiner](https://github.com/adamweiner)
Adrian Lungu [@AdrianLungu](https://github.com/AdrianLungu)
alehano [@alehano](https://github.com/alehano)
Alex [@akotlar](https://github.com/akotlar)
+Alexander Sack [@asac](https://github.com/asac)
Alexandre Olivier [@aliphen](https://github.com/aliphen)
Alexey Sharov [@nizsheanez](https://github.com/nizsheanez)
AndreKR [@AndreKR](https://github.com/AndreKR)
@@ -62,6 +63,7 @@ Jack Lindamood [@cep21](https://github.com/cep21)
Jacob [@jdelgad](https://github.com/jdelgad)
Jayme Rotsaert [@jrots](https://github.com/jrots)
Jeremy Canady [@jrmycanady](https://github.com/jrmycanady)
+Jim Berlage [@jimberlage](https://github.com/jimberlage)
Joe Buck [@four2five](https://github.com/four2five)
John Barker [@j16r](https://github.com/j16r)
John Goodall [@jgoodall](https://github.com/jgoodall)
@@ -82,6 +84,7 @@ Marcy Buccellato [@marcybuccellato](https://github.com/marcybuccellato)
Mark Costello [@mcos](https://github.com/mcos)
Martin Häger [@protomouse](https://github.com/protomouse)
Medhi Bechina [@mdzor](https://github.com/mdzor)
+mnpritula [@mnpritula](https://github.com/mnpritula)
mosa [@mosasiru](https://github.com/mosasiru)
naimulhaider [@naimulhaider](https://github.com/naimulhaider)
Naoya Yoshizawa [@azihsoyn](https://github.com/azihsoyn)
@@ -108,6 +111,7 @@ Take [ww24](https://github.com/ww24)
Tetsuya Morimoto [@t2y](https://github.com/t2y)
TimeEmit [@TimeEmit](https://github.com/timeemit)
TusharM [@tusharm](https://github.com/tusharm)
+zhangxin [@visaxin](https://github.com/visaxin)
wangtuo [@wangtuo](https://github.com/wangtuo)
Wédney Yuri [@wedneyyuri](https://github.com/wedneyyuri)
wolfkdy [@wolfkdy](https://github.com/wolfkdy)
diff --git a/vendor/gopkg.in/olivere/elastic.v5/ISSUE_TEMPLATE.md b/vendor/gopkg.in/olivere/elastic.v5/ISSUE_TEMPLATE.md
index c5eb690a7..88d66cc83 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/ISSUE_TEMPLATE.md
+++ b/vendor/gopkg.in/olivere/elastic.v5/ISSUE_TEMPLATE.md
@@ -6,6 +6,7 @@ your issue/question without further inquiry. Thank you.
[ ] elastic.v2 (for Elasticsearch 1.x)
[ ] elastic.v3 (for Elasticsearch 2.x)
[ ] elastic.v5 (for Elasticsearch 5.x)
+[ ] elastic.v6 (for Elasticsearch 6.x)
### Please describe the expected behavior
diff --git a/vendor/gopkg.in/olivere/elastic.v5/README.md b/vendor/gopkg.in/olivere/elastic.v5/README.md
index 45b84dab8..f452b664d 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/README.md
+++ b/vendor/gopkg.in/olivere/elastic.v5/README.md
@@ -1,10 +1,12 @@
# Elastic
+**This is a development branch that is actively being worked on. DO NOT USE IN PRODUCTION!**
+
Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for the
[Go](http://www.golang.org/) programming language.
-[![Build Status](https://travis-ci.org/olivere/elastic.svg?branch=release-branch.v5)](https://travis-ci.org/olivere/elastic)
-[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](http://godoc.org/gopkg.in/olivere/elastic.v5)
+[![Build Status](https://travis-ci.org/olivere/elastic.svg?branch=release-branch.v6)](https://travis-ci.org/olivere/elastic)
+[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](http://godoc.org/github.com/olivere/elastic)
[![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE)
See the [wiki](https://github.com/olivere/elastic/wiki) for additional information about Elastic.
@@ -12,35 +14,46 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for additional informati
## Releases
-**The release branches (e.g. [`release-branch.v5`](https://github.com/olivere/elastic/tree/release-branch.v5))
+**The release branches (e.g. [`release-branch.v6`](https://github.com/olivere/elastic/tree/release-branch.v6))
are actively being worked on and can break at any time.
-If you want to use stable versions of Elastic, please use the packages released via [gopkg.in](https://gopkg.in).**
+If you want to use stable versions of Elastic, please use a dependency manager like [dep](https://github.com/golang/dep).**
Here's the version matrix:
-Elasticsearch version | Elastic version -| Package URL
-----------------------|------------------|------------
-5.x | 5.0 | [`gopkg.in/olivere/elastic.v5`](https://gopkg.in/olivere/elastic.v5) ([source](https://github.com/olivere/elastic/tree/release-branch.v5) [doc](http://godoc.org/gopkg.in/olivere/elastic.v5))
-2.x | 3.0 | [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3) ([source](https://github.com/olivere/elastic/tree/release-branch.v3) [doc](http://godoc.org/gopkg.in/olivere/elastic.v3))
-1.x | 2.0 | [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2) ([source](https://github.com/olivere/elastic/tree/release-branch.v2) [doc](http://godoc.org/gopkg.in/olivere/elastic.v2))
-0.9-1.3 | 1.0 | [`gopkg.in/olivere/elastic.v1`](https://gopkg.in/olivere/elastic.v1) ([source](https://github.com/olivere/elastic/tree/release-branch.v1) [doc](http://godoc.org/gopkg.in/olivere/elastic.v1))
+Elasticsearch version | Elastic version | Package URL | Remarks |
+----------------------|------------------|-------------|---------|
+6.x                   | 6.0             | [`github.com/olivere/elastic`](https://github.com/olivere/elastic) ([source](https://github.com/olivere/elastic/tree/release-branch.v6) [doc](http://godoc.org/github.com/olivere/elastic)) | Use a dependency manager (see below).
+5.x | 5.0 | [`gopkg.in/olivere/elastic.v5`](https://gopkg.in/olivere/elastic.v5) ([source](https://github.com/olivere/elastic/tree/release-branch.v5) [doc](http://godoc.org/gopkg.in/olivere/elastic.v5)) | Actively maintained.
+2.x | 3.0 | [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3) ([source](https://github.com/olivere/elastic/tree/release-branch.v3) [doc](http://godoc.org/gopkg.in/olivere/elastic.v3)) | Deprecated. Please update.
+1.x | 2.0 | [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2) ([source](https://github.com/olivere/elastic/tree/release-branch.v2) [doc](http://godoc.org/gopkg.in/olivere/elastic.v2)) | Deprecated. Please update.
+0.9-1.3 | 1.0 | [`gopkg.in/olivere/elastic.v1`](https://gopkg.in/olivere/elastic.v1) ([source](https://github.com/olivere/elastic/tree/release-branch.v1) [doc](http://godoc.org/gopkg.in/olivere/elastic.v1)) | Deprecated. Please update.
**Example:**
-You have installed Elasticsearch 5.0.0 and want to use Elastic.
-As listed above, you should use Elastic 5.0.
-So you first install the stable release of Elastic 5.0 from gopkg.in.
+You have installed Elasticsearch 6.0.0 and want to use Elastic.
+As listed above, you should use Elastic 6.0.
-```sh
-$ go get gopkg.in/olivere/elastic.v5
-```
+To use the required version of Elastic in your application, it is strongly
+advised to use a tool like
+[dep](https://github.com/golang/dep)
+or
+[Glide](https://glide.sh/)
+to manage that dependency. Make sure to use a version such as `^6.0.0`.
-You then import it with this import path:
+To use Elastic, simply import:
```go
-import elastic "gopkg.in/olivere/elastic.v5"
+import "github.com/olivere/elastic"
```
+### Elastic 6.0
+
+Elastic 6.0 targets Elasticsearch 6.x which was [released on 14th November 2017](https://www.elastic.co/blog/elasticsearch-6-0-0-released).
+
+Notice that there are will be a lot of [breaking changes in Elasticsearch 6.0](https://www.elastic.co/guide/en/elasticsearch/reference/6.0/breaking-changes-6.0.html)
+and we used this as an opportunity to [clean up and refactor Elastic](https://github.com/olivere/elastic/blob/release-branch.v6/CHANGELOG-6.0.md)
+as we did in the transition from earlier versions of Elastic.
+
### Elastic 5.0
Elastic 5.0 targets Elasticsearch 5.0.0 and later. Elasticsearch 5.0.0 was
@@ -107,7 +120,7 @@ The client connects to Elasticsearch on `http://127.0.0.1:9200` by default.
You typically create one client for your app. Here's a complete example of
creating a client, creating an index, adding a document, executing a search etc.
-An example is available [here](https://olivere.github.io/elastic/)
+An example is available [here](https://olivere.github.io/elastic/).
Here's a [link to a complete working example for v3](https://gist.github.com/olivere/114347ff9d9cfdca7bdc0ecea8b82263).
@@ -143,11 +156,10 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details.
- [x] Context Suggester
- [x] Multi Search API
- [x] Count API
-- [ ] Search Exists API
- [ ] Validate API
- [x] Explain API
- [x] Profile API
-- [x] Field Stats API
+- [x] Field Capabilities API
### Aggregations
@@ -167,9 +179,11 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details.
- [x] Top Hits
- [x] Value Count
- Bucket Aggregations
+ - [ ] Adjacency Matrix
- [x] Children
- [x] Date Histogram
- [x] Date Range
+ - [ ] Diversified Sampler
- [x] Filter
- [x] Filters
- [x] Geo Distance
@@ -183,6 +197,7 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details.
- [x] Reverse Nested
- [x] Sampler
- [x] Significant Terms
+ - [x] Significant Text
- [x] Terms
- Pipeline Aggregations
- [x] Avg Bucket
@@ -222,7 +237,7 @@ See the [wiki](https://github.com/olivere/elastic/wiki) for more details.
- [x] Index Templates
- [ ] Shadow Replica Indices
- [x] Indices Stats
-- [ ] Indices Segments
+- [x] Indices Segments
- [ ] Indices Recovery
- [ ] Indices Shard Stores
- [ ] Clear Cache
diff --git a/vendor/gopkg.in/olivere/elastic.v5/acknowledged_response.go b/vendor/gopkg.in/olivere/elastic.v5/acknowledged_response.go
index 83f954f44..2045ab85e 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/acknowledged_response.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/acknowledged_response.go
@@ -7,5 +7,7 @@ package elastic
// AcknowledgedResponse is returned from various APIs. It simply indicates
// whether the operation is ack'd or not.
type AcknowledgedResponse struct {
- Acknowledged bool `json:"acknowledged"`
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk.go b/vendor/gopkg.in/olivere/elastic.v5/bulk.go
index f2fa0ea73..f4228294f 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/bulk.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/bulk.go
@@ -11,7 +11,7 @@ import (
"fmt"
"net/url"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// BulkService allows for batching bulk requests and sending them to
@@ -23,10 +23,11 @@ import (
// reuse BulkService to send many batches. You do not have to create a new
// BulkService for each batch.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
// for more details.
type BulkService struct {
- client *Client
+ client *Client
+ retrier Retrier
index string
typ string
@@ -57,6 +58,13 @@ func (s *BulkService) reset() {
s.sizeInBytesCursor = 0
}
+// Retrier allows to set specific retry logic for this BulkService.
+// If not specified, it will use the client's default retrier.
+func (s *BulkService) Retrier(retrier Retrier) *BulkService {
+ s.retrier = retrier
+ return s
+}
+
// Index specifies the index to use for all batches. You may also leave
// this blank and specify the index in the individual bulk requests.
func (s *BulkService) Index(index string) *BulkService {
@@ -159,7 +167,8 @@ func (s *BulkService) NumberOfActions() int {
}
func (s *BulkService) bodyAsString() (string, error) {
- var buf bytes.Buffer
+ // Pre-allocate to reduce allocs
+ buf := bytes.NewBuffer(make([]byte, 0, s.EstimatedSizeInBytes()))
for _, req := range s.requests {
source, err := req.Source()
@@ -234,7 +243,14 @@ func (s *BulkService) Do(ctx context.Context) (*BulkResponse, error) {
}
// Get response
- res, err := s.client.PerformRequestWithContentType(ctx, "POST", path, params, body, "application/x-ndjson")
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ ContentType: "application/x-ndjson",
+ Retrier: s.retrier,
+ })
if err != nil {
return nil, err
}
@@ -304,11 +320,14 @@ type BulkResponseItem struct {
Type string `json:"_type,omitempty"`
Id string `json:"_id,omitempty"`
Version int64 `json:"_version,omitempty"`
- Status int `json:"status,omitempty"`
Result string `json:"result,omitempty"`
+ Shards *shardsInfo `json:"_shards,omitempty"`
+ SeqNo int64 `json:"_seq_no,omitempty"`
+ PrimaryTerm int64 `json:"_primary_term,omitempty"`
+ Status int `json:"status,omitempty"`
ForcedRefresh bool `json:"forced_refresh,omitempty"`
- Found bool `json:"found,omitempty"`
Error *ErrorDetails `json:"error,omitempty"`
+ GetResult *GetResult `json:"get,omitempty"`
}
// Indexed returns all bulk request results of "index" actions.
diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request.go
index 5d4a2a5a7..e6c98c553 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request.go
@@ -4,6 +4,8 @@
package elastic
+//go:generate easyjson bulk_delete_request.go
+
import (
"encoding/json"
"fmt"
@@ -14,7 +16,7 @@ import (
// BulkDeleteRequest is a request to remove a document from Elasticsearch.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
// for details.
type BulkDeleteRequest struct {
BulkableRequest
@@ -27,6 +29,22 @@ type BulkDeleteRequest struct {
versionType string // default is "internal"
source []string
+
+ useEasyJSON bool
+}
+
+//easyjson:json
+type bulkDeleteRequestCommand map[string]bulkDeleteRequestCommandOp
+
+//easyjson:json
+type bulkDeleteRequestCommandOp struct {
+ Index string `json:"_index,omitempty"`
+ Type string `json:"_type,omitempty"`
+ Id string `json:"_id,omitempty"`
+ Parent string `json:"parent,omitempty"`
+ Routing string `json:"routing,omitempty"`
+ Version int64 `json:"version,omitempty"`
+ VersionType string `json:"version_type,omitempty"`
}
// NewBulkDeleteRequest returns a new BulkDeleteRequest.
@@ -34,6 +52,16 @@ func NewBulkDeleteRequest() *BulkDeleteRequest {
return &BulkDeleteRequest{}
}
+// UseEasyJSON is an experimental setting that enables serialization
+// with github.com/mailru/easyjson, which should in faster serialization
+// time and less allocations, but removed compatibility with encoding/json,
+// usage of unsafe etc. See https://github.com/mailru/easyjson#issues-notes-and-limitations
+// for details. This setting is disabled by default.
+func (r *BulkDeleteRequest) UseEasyJSON(enable bool) *BulkDeleteRequest {
+ r.useEasyJSON = enable
+ return r
+}
+
// Index specifies the Elasticsearch index to use for this delete request.
// If unspecified, the index set on the BulkService will be used.
func (r *BulkDeleteRequest) Index(index string) *BulkDeleteRequest {
@@ -81,7 +109,7 @@ func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest {
}
// VersionType can be "internal" (default), "external", "external_gte",
-// "external_gt", or "force".
+// or "external_gt".
func (r *BulkDeleteRequest) VersionType(versionType string) *BulkDeleteRequest {
r.versionType = versionType
r.source = nil
@@ -100,45 +128,38 @@ func (r *BulkDeleteRequest) String() string {
// Source returns the on-wire representation of the delete request,
// split into an action-and-meta-data line and an (optional) source line.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
// for details.
func (r *BulkDeleteRequest) Source() ([]string, error) {
if r.source != nil {
return r.source, nil
}
- lines := make([]string, 1)
-
- source := make(map[string]interface{})
- deleteCommand := make(map[string]interface{})
- if r.index != "" {
- deleteCommand["_index"] = r.index
+ command := bulkDeleteRequestCommand{
+ "delete": bulkDeleteRequestCommandOp{
+ Index: r.index,
+ Type: r.typ,
+ Id: r.id,
+ Routing: r.routing,
+ Parent: r.parent,
+ Version: r.version,
+ VersionType: r.versionType,
+ },
}
- if r.typ != "" {
- deleteCommand["_type"] = r.typ
- }
- if r.id != "" {
- deleteCommand["_id"] = r.id
- }
- if r.parent != "" {
- deleteCommand["_parent"] = r.parent
- }
- if r.routing != "" {
- deleteCommand["_routing"] = r.routing
- }
- if r.version > 0 {
- deleteCommand["_version"] = r.version
- }
- if r.versionType != "" {
- deleteCommand["_version_type"] = r.versionType
- }
- source["delete"] = deleteCommand
- body, err := json.Marshal(source)
+ var err error
+ var body []byte
+ if r.useEasyJSON {
+ // easyjson
+ body, err = command.MarshalJSON()
+ } else {
+ // encoding/json
+ body, err = json.Marshal(command)
+ }
if err != nil {
return nil, err
}
- lines[0] = string(body)
+ lines := []string{string(body)}
r.source = lines
return lines, nil
diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request_easyjson.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request_easyjson.go
new file mode 100644
index 000000000..df3452ce6
--- /dev/null
+++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request_easyjson.go
@@ -0,0 +1,230 @@
+// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT.
+
+package elastic
+
+import (
+ json "encoding/json"
+ easyjson "github.com/mailru/easyjson"
+ jlexer "github.com/mailru/easyjson/jlexer"
+ jwriter "github.com/mailru/easyjson/jwriter"
+)
+
+// suppress unused package warning
+var (
+ _ *json.RawMessage
+ _ *jlexer.Lexer
+ _ *jwriter.Writer
+ _ easyjson.Marshaler
+)
+
+func easyjson8092efb6DecodeGithubComOlivereElastic(in *jlexer.Lexer, out *bulkDeleteRequestCommandOp) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeString()
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "_index":
+ out.Index = string(in.String())
+ case "_type":
+ out.Type = string(in.String())
+ case "_id":
+ out.Id = string(in.String())
+ case "parent":
+ out.Parent = string(in.String())
+ case "routing":
+ out.Routing = string(in.String())
+ case "version":
+ out.Version = int64(in.Int64())
+ case "version_type":
+ out.VersionType = string(in.String())
+ default:
+ in.SkipRecursive()
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+func easyjson8092efb6EncodeGithubComOlivereElastic(out *jwriter.Writer, in bulkDeleteRequestCommandOp) {
+ out.RawByte('{')
+ first := true
+ _ = first
+ if in.Index != "" {
+ const prefix string = ",\"_index\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Index))
+ }
+ if in.Type != "" {
+ const prefix string = ",\"_type\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Type))
+ }
+ if in.Id != "" {
+ const prefix string = ",\"_id\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Id))
+ }
+ if in.Parent != "" {
+ const prefix string = ",\"parent\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Parent))
+ }
+ if in.Routing != "" {
+ const prefix string = ",\"routing\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Routing))
+ }
+ if in.Version != 0 {
+ const prefix string = ",\"version\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Int64(int64(in.Version))
+ }
+ if in.VersionType != "" {
+ const prefix string = ",\"version_type\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.VersionType))
+ }
+ out.RawByte('}')
+}
+
+// MarshalJSON supports json.Marshaler interface
+func (v bulkDeleteRequestCommandOp) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ easyjson8092efb6EncodeGithubComOlivereElastic(&w, v)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// MarshalEasyJSON supports easyjson.Marshaler interface
+func (v bulkDeleteRequestCommandOp) MarshalEasyJSON(w *jwriter.Writer) {
+ easyjson8092efb6EncodeGithubComOlivereElastic(w, v)
+}
+
+// UnmarshalJSON supports json.Unmarshaler interface
+func (v *bulkDeleteRequestCommandOp) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ easyjson8092efb6DecodeGithubComOlivereElastic(&r, v)
+ return r.Error()
+}
+
+// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
+func (v *bulkDeleteRequestCommandOp) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ easyjson8092efb6DecodeGithubComOlivereElastic(l, v)
+}
+func easyjson8092efb6DecodeGithubComOlivereElastic1(in *jlexer.Lexer, out *bulkDeleteRequestCommand) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ in.Skip()
+ } else {
+ in.Delim('{')
+ if !in.IsDelim('}') {
+ *out = make(bulkDeleteRequestCommand)
+ } else {
+ *out = nil
+ }
+ for !in.IsDelim('}') {
+ key := string(in.String())
+ in.WantColon()
+ var v1 bulkDeleteRequestCommandOp
+ (v1).UnmarshalEasyJSON(in)
+ (*out)[key] = v1
+ in.WantComma()
+ }
+ in.Delim('}')
+ }
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+func easyjson8092efb6EncodeGithubComOlivereElastic1(out *jwriter.Writer, in bulkDeleteRequestCommand) {
+ if in == nil && (out.Flags&jwriter.NilMapAsEmpty) == 0 {
+ out.RawString(`null`)
+ } else {
+ out.RawByte('{')
+ v2First := true
+ for v2Name, v2Value := range in {
+ if v2First {
+ v2First = false
+ } else {
+ out.RawByte(',')
+ }
+ out.String(string(v2Name))
+ out.RawByte(':')
+ (v2Value).MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+ }
+}
+
+// MarshalJSON supports json.Marshaler interface
+func (v bulkDeleteRequestCommand) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ easyjson8092efb6EncodeGithubComOlivereElastic1(&w, v)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// MarshalEasyJSON supports easyjson.Marshaler interface
+func (v bulkDeleteRequestCommand) MarshalEasyJSON(w *jwriter.Writer) {
+ easyjson8092efb6EncodeGithubComOlivereElastic1(w, v)
+}
+
+// UnmarshalJSON supports json.Unmarshaler interface
+func (v *bulkDeleteRequestCommand) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ easyjson8092efb6DecodeGithubComOlivereElastic1(&r, v)
+ return r.Error()
+}
+
+// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
+func (v *bulkDeleteRequestCommand) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ easyjson8092efb6DecodeGithubComOlivereElastic1(l, v)
+}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request_test.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request_test.go
index 6ac429d8b..8635e34d1 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request_test.go
@@ -15,23 +15,23 @@ func TestBulkDeleteRequestSerialization(t *testing.T) {
}{
// #0
{
- Request: NewBulkDeleteRequest().Index("index1").Type("tweet").Id("1"),
+ Request: NewBulkDeleteRequest().Index("index1").Type("doc").Id("1"),
Expected: []string{
- `{"delete":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+ `{"delete":{"_index":"index1","_type":"doc","_id":"1"}}`,
},
},
// #1
{
- Request: NewBulkDeleteRequest().Index("index1").Type("tweet").Id("1").Parent("2"),
+ Request: NewBulkDeleteRequest().Index("index1").Type("doc").Id("1").Parent("2"),
Expected: []string{
- `{"delete":{"_id":"1","_index":"index1","_parent":"2","_type":"tweet"}}`,
+ `{"delete":{"_index":"index1","_type":"doc","_id":"1","parent":"2"}}`,
},
},
// #2
{
- Request: NewBulkDeleteRequest().Index("index1").Type("tweet").Id("1").Routing("3"),
+ Request: NewBulkDeleteRequest().Index("index1").Type("doc").Id("1").Routing("3"),
Expected: []string{
- `{"delete":{"_id":"1","_index":"index1","_routing":"3","_type":"tweet"}}`,
+ `{"delete":{"_index":"index1","_type":"doc","_id":"1","routing":"3"}}`,
},
},
}
@@ -58,11 +58,22 @@ func TestBulkDeleteRequestSerialization(t *testing.T) {
var bulkDeleteRequestSerializationResult string
func BenchmarkBulkDeleteRequestSerialization(b *testing.B) {
- r := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
+ b.Run("stdlib", func(b *testing.B) {
+ r := NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1")
+ benchmarkBulkDeleteRequestSerialization(b, r.UseEasyJSON(false))
+ })
+ b.Run("easyjson", func(b *testing.B) {
+ r := NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1")
+ benchmarkBulkDeleteRequestSerialization(b, r.UseEasyJSON(true))
+ })
+}
+
+func benchmarkBulkDeleteRequestSerialization(b *testing.B, r *BulkDeleteRequest) {
var s string
for n := 0; n < b.N; n++ {
s = r.String()
r.source = nil // Don't let caching spoil the benchmark
}
bulkDeleteRequestSerializationResult = s // ensure the compiler doesn't optimize
+ b.ReportAllocs()
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_index_request.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_index_request.go
index 1c9302881..321d2e25a 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/bulk_index_request.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_index_request.go
@@ -4,6 +4,8 @@
package elastic
+//go:generate easyjson bulk_index_request.go
+
import (
"encoding/json"
"fmt"
@@ -12,7 +14,7 @@ import (
// BulkIndexRequest is a request to add a document to Elasticsearch.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
// for details.
type BulkIndexRequest struct {
BulkableRequest
@@ -27,9 +29,27 @@ type BulkIndexRequest struct {
doc interface{}
pipeline string
retryOnConflict *int
- ttl string
source []string
+
+ useEasyJSON bool
+}
+
+//easyjson:json
+type bulkIndexRequestCommand map[string]bulkIndexRequestCommandOp
+
+//easyjson:json
+type bulkIndexRequestCommandOp struct {
+ Index string `json:"_index,omitempty"`
+ Id string `json:"_id,omitempty"`
+ Type string `json:"_type,omitempty"`
+ Parent string `json:"parent,omitempty"`
+ // RetryOnConflict is "_retry_on_conflict" for 6.0 and "retry_on_conflict" for 6.1+.
+ RetryOnConflict *int `json:"retry_on_conflict,omitempty"`
+ Routing string `json:"routing,omitempty"`
+ Version int64 `json:"version,omitempty"`
+ VersionType string `json:"version_type,omitempty"`
+ Pipeline string `json:"pipeline,omitempty"`
}
// NewBulkIndexRequest returns a new BulkIndexRequest.
@@ -40,6 +60,16 @@ func NewBulkIndexRequest() *BulkIndexRequest {
}
}
+// UseEasyJSON is an experimental setting that enables serialization
+// with github.com/mailru/easyjson, which should in faster serialization
+// time and less allocations, but removed compatibility with encoding/json,
+// usage of unsafe etc. See https://github.com/mailru/easyjson#issues-notes-and-limitations
+// for details. This setting is disabled by default.
+func (r *BulkIndexRequest) UseEasyJSON(enable bool) *BulkIndexRequest {
+ r.useEasyJSON = enable
+ return r
+}
+
// Index specifies the Elasticsearch index to use for this index request.
// If unspecified, the index set on the BulkService will be used.
func (r *BulkIndexRequest) Index(index string) *BulkIndexRequest {
@@ -65,7 +95,7 @@ func (r *BulkIndexRequest) Id(id string) *BulkIndexRequest {
// OpType specifies if this request should follow create-only or upsert
// behavior. This follows the OpType of the standard document index API.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-index_.html#operation-type
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-index_.html#operation-type
// for details.
func (r *BulkIndexRequest) OpType(opType string) *BulkIndexRequest {
r.opType = opType
@@ -98,7 +128,7 @@ func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest {
// VersionType specifies how versions are created. It can be e.g. internal,
// external, external_gte, or force.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-index_.html#index-versioning
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-index_.html#index-versioning
// for details.
func (r *BulkIndexRequest) VersionType(versionType string) *BulkIndexRequest {
r.versionType = versionType
@@ -120,13 +150,6 @@ func (r *BulkIndexRequest) RetryOnConflict(retryOnConflict int) *BulkIndexReques
return r
}
-// TTL is an expiration time for the document.
-func (r *BulkIndexRequest) TTL(ttl string) *BulkIndexRequest {
- r.ttl = ttl
- r.source = nil
- return r
-}
-
// Pipeline to use while processing the request.
func (r *BulkIndexRequest) Pipeline(pipeline string) *BulkIndexRequest {
r.pipeline = pipeline
@@ -146,7 +169,7 @@ func (r *BulkIndexRequest) String() string {
// Source returns the on-wire representation of the index request,
// split into an action-and-meta-data line and an (optional) source line.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
// for details.
func (r *BulkIndexRequest) Source() ([]string, error) {
// { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } }
@@ -159,44 +182,35 @@ func (r *BulkIndexRequest) Source() ([]string, error) {
lines := make([]string, 2)
// "index" ...
- command := make(map[string]interface{})
- indexCommand := make(map[string]interface{})
- if r.index != "" {
- indexCommand["_index"] = r.index
- }
- if r.typ != "" {
- indexCommand["_type"] = r.typ
- }
- if r.id != "" {
- indexCommand["_id"] = r.id
- }
- if r.routing != "" {
- indexCommand["_routing"] = r.routing
- }
- if r.parent != "" {
- indexCommand["_parent"] = r.parent
- }
- if r.version > 0 {
- indexCommand["_version"] = r.version
+ indexCommand := bulkIndexRequestCommandOp{
+ Index: r.index,
+ Type: r.typ,
+ Id: r.id,
+ Routing: r.routing,
+ Parent: r.parent,
+ Version: r.version,
+ VersionType: r.versionType,
+ RetryOnConflict: r.retryOnConflict,
+ Pipeline: r.pipeline,
}
- if r.versionType != "" {
- indexCommand["_version_type"] = r.versionType
+ command := bulkIndexRequestCommand{
+ r.opType: indexCommand,
}
- if r.retryOnConflict != nil {
- indexCommand["_retry_on_conflict"] = *r.retryOnConflict
- }
- if r.ttl != "" {
- indexCommand["_ttl"] = r.ttl
- }
- if r.pipeline != "" {
- indexCommand["pipeline"] = r.pipeline
+
+ var err error
+ var body []byte
+ if r.useEasyJSON {
+ // easyjson
+ body, err = command.MarshalJSON()
+ } else {
+ // encoding/json
+ body, err = json.Marshal(command)
}
- command[r.opType] = indexCommand
- line, err := json.Marshal(command)
if err != nil {
return nil, err
}
- lines[0] = string(line)
+
+ lines[0] = string(body)
// "field1" ...
if r.doc != nil {
diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_index_request_easyjson.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_index_request_easyjson.go
new file mode 100644
index 000000000..f8792978f
--- /dev/null
+++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_index_request_easyjson.go
@@ -0,0 +1,262 @@
+// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT.
+
+package elastic
+
+import (
+ json "encoding/json"
+ easyjson "github.com/mailru/easyjson"
+ jlexer "github.com/mailru/easyjson/jlexer"
+ jwriter "github.com/mailru/easyjson/jwriter"
+)
+
+// suppress unused package warning
+var (
+ _ *json.RawMessage
+ _ *jlexer.Lexer
+ _ *jwriter.Writer
+ _ easyjson.Marshaler
+)
+
+func easyjson9de0fcbfDecodeGithubComOlivereElastic(in *jlexer.Lexer, out *bulkIndexRequestCommandOp) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeString()
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "_index":
+ out.Index = string(in.String())
+ case "_id":
+ out.Id = string(in.String())
+ case "_type":
+ out.Type = string(in.String())
+ case "parent":
+ out.Parent = string(in.String())
+ case "retry_on_conflict":
+ if in.IsNull() {
+ in.Skip()
+ out.RetryOnConflict = nil
+ } else {
+ if out.RetryOnConflict == nil {
+ out.RetryOnConflict = new(int)
+ }
+ *out.RetryOnConflict = int(in.Int())
+ }
+ case "routing":
+ out.Routing = string(in.String())
+ case "version":
+ out.Version = int64(in.Int64())
+ case "version_type":
+ out.VersionType = string(in.String())
+ case "pipeline":
+ out.Pipeline = string(in.String())
+ default:
+ in.SkipRecursive()
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+func easyjson9de0fcbfEncodeGithubComOlivereElastic(out *jwriter.Writer, in bulkIndexRequestCommandOp) {
+ out.RawByte('{')
+ first := true
+ _ = first
+ if in.Index != "" {
+ const prefix string = ",\"_index\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Index))
+ }
+ if in.Id != "" {
+ const prefix string = ",\"_id\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Id))
+ }
+ if in.Type != "" {
+ const prefix string = ",\"_type\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Type))
+ }
+ if in.Parent != "" {
+ const prefix string = ",\"parent\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Parent))
+ }
+ if in.RetryOnConflict != nil {
+ const prefix string = ",\"retry_on_conflict\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Int(int(*in.RetryOnConflict))
+ }
+ if in.Routing != "" {
+ const prefix string = ",\"routing\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Routing))
+ }
+ if in.Version != 0 {
+ const prefix string = ",\"version\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Int64(int64(in.Version))
+ }
+ if in.VersionType != "" {
+ const prefix string = ",\"version_type\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.VersionType))
+ }
+ if in.Pipeline != "" {
+ const prefix string = ",\"pipeline\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Pipeline))
+ }
+ out.RawByte('}')
+}
+
+// MarshalJSON supports json.Marshaler interface
+func (v bulkIndexRequestCommandOp) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ easyjson9de0fcbfEncodeGithubComOlivereElastic(&w, v)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// MarshalEasyJSON supports easyjson.Marshaler interface
+func (v bulkIndexRequestCommandOp) MarshalEasyJSON(w *jwriter.Writer) {
+ easyjson9de0fcbfEncodeGithubComOlivereElastic(w, v)
+}
+
+// UnmarshalJSON supports json.Unmarshaler interface
+func (v *bulkIndexRequestCommandOp) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ easyjson9de0fcbfDecodeGithubComOlivereElastic(&r, v)
+ return r.Error()
+}
+
+// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
+func (v *bulkIndexRequestCommandOp) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ easyjson9de0fcbfDecodeGithubComOlivereElastic(l, v)
+}
+func easyjson9de0fcbfDecodeGithubComOlivereElastic1(in *jlexer.Lexer, out *bulkIndexRequestCommand) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ in.Skip()
+ } else {
+ in.Delim('{')
+ if !in.IsDelim('}') {
+ *out = make(bulkIndexRequestCommand)
+ } else {
+ *out = nil
+ }
+ for !in.IsDelim('}') {
+ key := string(in.String())
+ in.WantColon()
+ var v1 bulkIndexRequestCommandOp
+ (v1).UnmarshalEasyJSON(in)
+ (*out)[key] = v1
+ in.WantComma()
+ }
+ in.Delim('}')
+ }
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+func easyjson9de0fcbfEncodeGithubComOlivereElastic1(out *jwriter.Writer, in bulkIndexRequestCommand) {
+ if in == nil && (out.Flags&jwriter.NilMapAsEmpty) == 0 {
+ out.RawString(`null`)
+ } else {
+ out.RawByte('{')
+ v2First := true
+ for v2Name, v2Value := range in {
+ if v2First {
+ v2First = false
+ } else {
+ out.RawByte(',')
+ }
+ out.String(string(v2Name))
+ out.RawByte(':')
+ (v2Value).MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+ }
+}
+
+// MarshalJSON supports json.Marshaler interface
+func (v bulkIndexRequestCommand) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ easyjson9de0fcbfEncodeGithubComOlivereElastic1(&w, v)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// MarshalEasyJSON supports easyjson.Marshaler interface
+func (v bulkIndexRequestCommand) MarshalEasyJSON(w *jwriter.Writer) {
+ easyjson9de0fcbfEncodeGithubComOlivereElastic1(w, v)
+}
+
+// UnmarshalJSON supports json.Unmarshaler interface
+func (v *bulkIndexRequestCommand) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ easyjson9de0fcbfDecodeGithubComOlivereElastic1(&r, v)
+ return r.Error()
+}
+
+// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
+func (v *bulkIndexRequestCommand) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ easyjson9de0fcbfDecodeGithubComOlivereElastic1(l, v)
+}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_index_request_test.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_index_request_test.go
index fe95bd65c..79baf51fb 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/bulk_index_request_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_index_request_test.go
@@ -16,55 +16,56 @@ func TestBulkIndexRequestSerialization(t *testing.T) {
}{
// #0
{
- Request: NewBulkIndexRequest().Index("index1").Type("tweet").Id("1").
+ Request: NewBulkIndexRequest().Index("index1").Type("doc").Id("1").
Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
Expected: []string{
- `{"index":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+ `{"index":{"_index":"index1","_id":"1","_type":"doc"}}`,
`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
},
},
// #1
{
- Request: NewBulkIndexRequest().OpType("create").Index("index1").Type("tweet").Id("1").
+ Request: NewBulkIndexRequest().OpType("create").Index("index1").Type("doc").Id("1").
Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
Expected: []string{
- `{"create":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+ `{"create":{"_index":"index1","_id":"1","_type":"doc"}}`,
`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
},
},
// #2
{
- Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1").
+ Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("doc").Id("1").
Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
Expected: []string{
- `{"index":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+ `{"index":{"_index":"index1","_id":"1","_type":"doc"}}`,
`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
},
},
// #3
{
- Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1").RetryOnConflict(42).
+ Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("doc").Id("1").RetryOnConflict(42).
Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
Expected: []string{
- `{"index":{"_id":"1","_index":"index1","_retry_on_conflict":42,"_type":"tweet"}}`,
+ `{"index":{"_index":"index1","_id":"1","_type":"doc","retry_on_conflict":42}}`,
`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
},
},
// #4
{
- Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1").Pipeline("my_pipeline").
+ Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("doc").Id("1").Pipeline("my_pipeline").
Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
Expected: []string{
- `{"index":{"_id":"1","_index":"index1","_type":"tweet","pipeline":"my_pipeline"}}`,
+ `{"index":{"_index":"index1","_id":"1","_type":"doc","pipeline":"my_pipeline"}}`,
`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
},
},
// #5
{
- Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1").TTL("1m").
+ Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("doc").Id("1").
+ Routing("123").
Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
Expected: []string{
- `{"index":{"_id":"1","_index":"index1","_ttl":"1m","_type":"tweet"}}`,
+ `{"index":{"_index":"index1","_id":"1","_type":"doc","routing":"123"}}`,
`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
},
},
@@ -92,12 +93,24 @@ func TestBulkIndexRequestSerialization(t *testing.T) {
var bulkIndexRequestSerializationResult string
func BenchmarkBulkIndexRequestSerialization(b *testing.B) {
- r := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").
- Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)})
+ b.Run("stdlib", func(b *testing.B) {
+ r := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").
+ Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)})
+ benchmarkBulkIndexRequestSerialization(b, r.UseEasyJSON(false))
+ })
+ b.Run("easyjson", func(b *testing.B) {
+ r := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").
+ Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)})
+ benchmarkBulkIndexRequestSerialization(b, r.UseEasyJSON(true))
+ })
+}
+
+func benchmarkBulkIndexRequestSerialization(b *testing.B, r *BulkIndexRequest) {
var s string
for n := 0; n < b.N; n++ {
s = r.String()
r.source = nil // Don't let caching spoil the benchmark
}
bulkIndexRequestSerializationResult = s // ensure the compiler doesn't optimize
+ b.ReportAllocs()
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_processor.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_processor.go
index 9566c9e9e..b2709a880 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/bulk_processor.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_processor.go
@@ -29,28 +29,29 @@ import (
// Elasticsearch Java API as documented in
// https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/java-docs-bulk-processor.html.
type BulkProcessorService struct {
- c *Client
- beforeFn BulkBeforeFunc
- afterFn BulkAfterFunc
- name string // name of processor
- numWorkers int // # of workers (>= 1)
- bulkActions int // # of requests after which to commit
- bulkSize int // # of bytes after which to commit
- flushInterval time.Duration // periodic flush interval
- wantStats bool // indicates whether to gather statistics
- initialTimeout time.Duration // initial wait time before retry on errors
- maxTimeout time.Duration // max time to wait for retry on errors
+ c *Client
+ beforeFn BulkBeforeFunc
+ afterFn BulkAfterFunc
+ name string // name of processor
+ numWorkers int // # of workers (>= 1)
+ bulkActions int // # of requests after which to commit
+ bulkSize int // # of bytes after which to commit
+ flushInterval time.Duration // periodic flush interval
+ wantStats bool // indicates whether to gather statistics
+ backoff Backoff // a custom Backoff to use for errors
}
// NewBulkProcessorService creates a new BulkProcessorService.
func NewBulkProcessorService(client *Client) *BulkProcessorService {
return &BulkProcessorService{
- c: client,
- numWorkers: 1,
- bulkActions: 1000,
- bulkSize: 5 << 20, // 5 MB
- initialTimeout: time.Duration(200) * time.Millisecond,
- maxTimeout: time.Duration(10000) * time.Millisecond,
+ c: client,
+ numWorkers: 1,
+ bulkActions: 1000,
+ bulkSize: 5 << 20, // 5 MB
+ backoff: NewExponentialBackoff(
+ time.Duration(200)*time.Millisecond,
+ time.Duration(10000)*time.Millisecond,
+ ),
}
}
@@ -120,6 +121,12 @@ func (s *BulkProcessorService) Stats(wantStats bool) *BulkProcessorService {
return s
}
+// Set the backoff strategy to use for errors
+func (s *BulkProcessorService) Backoff(backoff Backoff) *BulkProcessorService {
+ s.backoff = backoff
+ return s
+}
+
// Do creates a new BulkProcessor and starts it.
// Consider the BulkProcessor as a running instance that accepts bulk requests
// and commits them to Elasticsearch, spreading the work across one or more
@@ -146,8 +153,7 @@ func (s *BulkProcessorService) Do(ctx context.Context) (*BulkProcessor, error) {
s.bulkSize,
s.flushInterval,
s.wantStats,
- s.initialTimeout,
- s.maxTimeout)
+ s.backoff)
err := p.Start(ctx)
if err != nil {
@@ -221,22 +227,21 @@ func (st *BulkProcessorWorkerStats) dup() *BulkProcessorWorkerStats {
// BulkProcessor is returned by setting up a BulkProcessorService and
// calling the Do method.
type BulkProcessor struct {
- c *Client
- beforeFn BulkBeforeFunc
- afterFn BulkAfterFunc
- name string
- bulkActions int
- bulkSize int
- numWorkers int
- executionId int64
- requestsC chan BulkableRequest
- workerWg sync.WaitGroup
- workers []*bulkWorker
- flushInterval time.Duration
- flusherStopC chan struct{}
- wantStats bool
- initialTimeout time.Duration // initial wait time before retry on errors
- maxTimeout time.Duration // max time to wait for retry on errors
+ c *Client
+ beforeFn BulkBeforeFunc
+ afterFn BulkAfterFunc
+ name string
+ bulkActions int
+ bulkSize int
+ numWorkers int
+ executionId int64
+ requestsC chan BulkableRequest
+ workerWg sync.WaitGroup
+ workers []*bulkWorker
+ flushInterval time.Duration
+ flusherStopC chan struct{}
+ wantStats bool
+ backoff Backoff
startedMu sync.Mutex // guards the following block
started bool
@@ -255,20 +260,18 @@ func newBulkProcessor(
bulkSize int,
flushInterval time.Duration,
wantStats bool,
- initialTimeout time.Duration,
- maxTimeout time.Duration) *BulkProcessor {
+ backoff Backoff) *BulkProcessor {
return &BulkProcessor{
- c: client,
- beforeFn: beforeFn,
- afterFn: afterFn,
- name: name,
- numWorkers: numWorkers,
- bulkActions: bulkActions,
- bulkSize: bulkSize,
- flushInterval: flushInterval,
- wantStats: wantStats,
- initialTimeout: initialTimeout,
- maxTimeout: maxTimeout,
+ c: client,
+ beforeFn: beforeFn,
+ afterFn: afterFn,
+ name: name,
+ numWorkers: numWorkers,
+ bulkActions: bulkActions,
+ bulkSize: bulkSize,
+ flushInterval: flushInterval,
+ wantStats: wantStats,
+ backoff: backoff,
}
}
@@ -473,7 +476,7 @@ func (w *bulkWorker) commit(ctx context.Context) error {
}
// notifyFunc will be called if retry fails
notifyFunc := func(err error) {
- w.p.c.errorf("elastic: bulk processor %q failed but will retry: %v", w.p.name, err)
+ w.p.c.errorf("elastic: bulk processor %q failed but may retry: %v", w.p.name, err)
}
id := atomic.AddInt64(&w.p.executionId, 1)
@@ -494,8 +497,7 @@ func (w *bulkWorker) commit(ctx context.Context) error {
}
// Commit bulk requests
- policy := NewExponentialBackoff(w.p.initialTimeout, w.p.maxTimeout)
- err := RetryNotify(commitFunc, policy, notifyFunc)
+ err := RetryNotify(commitFunc, w.p.backoff, notifyFunc)
w.updateStats(res)
if err != nil {
w.p.c.errorf("elastic: bulk processor %q failed: %v", w.p.name, err)
diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_processor_test.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_processor_test.go
index a47e99652..bb97ca217 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/bulk_processor_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_processor_test.go
@@ -38,6 +38,9 @@ func TestBulkProcessorDefaults(t *testing.T) {
if got, want := p.wantStats, false; got != want {
t.Errorf("expected %v; got: %v", want, got)
}
+ if p.backoff == nil {
+ t.Fatalf("expected non-nill backoff; got: %v", p.backoff)
+ }
}
func TestBulkProcessorCommitOnBulkActions(t *testing.T) {
@@ -126,7 +129,7 @@ func TestBulkProcessorBasedOnFlushInterval(t *testing.T) {
for i := 1; i <= numDocs; i++ {
tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))}
- request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet)
+ request := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id(fmt.Sprintf("%d", i)).Doc(tweet)
p.Add(request)
}
@@ -209,7 +212,7 @@ func TestBulkProcessorClose(t *testing.T) {
for i := 1; i <= numDocs; i++ {
tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))}
- request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet)
+ request := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id(fmt.Sprintf("%d", i)).Doc(tweet)
p.Add(request)
}
@@ -275,7 +278,7 @@ func TestBulkProcessorFlush(t *testing.T) {
for i := 1; i <= numDocs; i++ {
tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))}
- request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet)
+ request := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id(fmt.Sprintf("%d", i)).Doc(tweet)
p.Add(request)
}
@@ -356,7 +359,7 @@ func testBulkProcessor(t *testing.T, numDocs int, svc *BulkProcessorService) {
for i := 1; i <= numDocs; i++ {
tweet := tweet{User: "olivere", Message: fmt.Sprintf("%07d. %s", i, randomString(1+rand.Intn(63)))}
- request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet)
+ request := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id(fmt.Sprintf("%d", i)).Doc(tweet)
p.Add(request)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_test.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_test.go
index 394439630..f31ed6613 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/bulk_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_test.go
@@ -8,6 +8,7 @@ import (
"context"
"encoding/json"
"fmt"
+ "math/rand"
"net/http"
"net/http/httptest"
"testing"
@@ -19,9 +20,9 @@ func TestBulk(t *testing.T) {
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
- index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1)
- index2Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("2").Doc(tweet2)
- delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
+ index1Req := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").Doc(tweet1)
+ index2Req := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("2").Doc(tweet2)
+ delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1")
bulkRequest := client.Bulk()
bulkRequest = bulkRequest.Add(index1Req)
@@ -45,7 +46,7 @@ func TestBulk(t *testing.T) {
}
// Document with Id="1" should not exist
- exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO())
+ exists, err := client.Exists().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -54,7 +55,7 @@ func TestBulk(t *testing.T) {
}
// Document with Id="2" should exist
- exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do(context.TODO())
+ exists, err = client.Exists().Index(testIndexName).Type("doc").Id("2").Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -68,7 +69,7 @@ func TestBulk(t *testing.T) {
}{
42,
}
- update1Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2").Doc(&updateDoc)
+ update1Req := NewBulkUpdateRequest().Index(testIndexName).Type("doc").Id("2").Doc(&updateDoc)
bulkRequest = client.Bulk()
bulkRequest = bulkRequest.Add(update1Req)
@@ -89,7 +90,7 @@ func TestBulk(t *testing.T) {
}
// Document with Id="1" should have a retweets count of 42
- doc, err := client.Get().Index(testIndexName).Type("tweet").Id("2").Do(context.TODO())
+ doc, err := client.Get().Index(testIndexName).Type("doc").Id("2").Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -112,7 +113,7 @@ func TestBulk(t *testing.T) {
}
// Update with script
- update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2").
+ update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("doc").Id("2").
RetryOnConflict(3).
Script(NewScript("ctx._source.retweets += params.v").Param("v", 1))
bulkRequest = client.Bulk()
@@ -133,7 +134,7 @@ func TestBulk(t *testing.T) {
}
// Document with Id="1" should have a retweets count of 43
- doc, err = client.Get().Index(testIndexName).Type("tweet").Id("2").Do(context.TODO())
+ doc, err = client.Get().Index(testIndexName).Type("doc").Id("2").Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -161,11 +162,11 @@ func TestBulkWithIndexSetOnClient(t *testing.T) {
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
- index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1)
- index2Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("2").Doc(tweet2)
- delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
+ index1Req := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").Doc(tweet1).Routing("1")
+ index2Req := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("2").Doc(tweet2)
+ delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1")
- bulkRequest := client.Bulk().Index(testIndexName).Type("tweet")
+ bulkRequest := client.Bulk().Index(testIndexName).Type("doc")
bulkRequest = bulkRequest.Add(index1Req)
bulkRequest = bulkRequest.Add(index2Req)
bulkRequest = bulkRequest.Add(delete1Req)
@@ -183,7 +184,7 @@ func TestBulkWithIndexSetOnClient(t *testing.T) {
}
// Document with Id="1" should not exist
- exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO())
+ exists, err := client.Exists().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -192,7 +193,7 @@ func TestBulkWithIndexSetOnClient(t *testing.T) {
}
// Document with Id="2" should exist
- exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do(context.TODO())
+ exists, err = client.Exists().Index(testIndexName).Type("doc").Id("2").Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -201,16 +202,18 @@ func TestBulkWithIndexSetOnClient(t *testing.T) {
}
}
-func TestBulkRequestsSerialization(t *testing.T) {
+func TestBulkIndexDeleteUpdate(t *testing.T) {
client := setupTestClientAndCreateIndex(t)
+ //client := setupTestClientAndCreateIndexAndLog(t)
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
- index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1)
- index2Req := NewBulkIndexRequest().OpType("create").Index(testIndexName).Type("tweet").Id("2").Doc(tweet2)
- delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
- update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2").
+ index1Req := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").Doc(tweet1)
+ index2Req := NewBulkIndexRequest().OpType("create").Index(testIndexName).Type("doc").Id("2").Doc(tweet2)
+ delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1")
+ update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("doc").Id("2").
+ ReturnSource(true).
Doc(struct {
Retweets int `json:"retweets"`
}{
@@ -227,13 +230,13 @@ func TestBulkRequestsSerialization(t *testing.T) {
t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 4, bulkRequest.NumberOfActions())
}
- expected := `{"index":{"_id":"1","_index":"` + testIndexName + `","_type":"tweet"}}
+ expected := `{"index":{"_index":"` + testIndexName + `","_id":"1","_type":"doc"}}
{"user":"olivere","message":"Welcome to Golang and Elasticsearch.","retweets":0,"created":"0001-01-01T00:00:00Z"}
-{"create":{"_id":"2","_index":"` + testIndexName + `","_type":"tweet"}}
+{"create":{"_index":"` + testIndexName + `","_id":"2","_type":"doc"}}
{"user":"sandrae","message":"Dancing all night long. Yeah.","retweets":0,"created":"0001-01-01T00:00:00Z"}
-{"delete":{"_id":"1","_index":"` + testIndexName + `","_type":"tweet"}}
-{"update":{"_id":"2","_index":"` + testIndexName + `","_type":"tweet"}}
-{"doc":{"retweets":42}}
+{"delete":{"_index":"` + testIndexName + `","_type":"doc","_id":"1"}}
+{"update":{"_index":"` + testIndexName + `","_type":"doc","_id":"2"}}
+{"doc":{"retweets":42},"_source":true}
`
got, err := bulkRequest.bodyAsString()
if err != nil {
@@ -244,7 +247,7 @@ func TestBulkRequestsSerialization(t *testing.T) {
}
// Run the bulk request
- bulkResponse, err := bulkRequest.Do(context.TODO())
+ bulkResponse, err := bulkRequest.Pretty(true).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -290,6 +293,9 @@ func TestBulkRequestsSerialization(t *testing.T) {
if created[0].Status != 201 {
t.Errorf("expected created[0].Status == %d; got %d", 201, created[0].Status)
}
+ if want, have := "created", created[0].Result; want != have {
+ t.Errorf("expected created[0].Result == %q; got %q", want, have)
+ }
// Deleted actions
deleted := bulkResponse.Deleted()
@@ -305,8 +311,8 @@ func TestBulkRequestsSerialization(t *testing.T) {
if deleted[0].Status != 200 {
t.Errorf("expected deleted[0].Status == %d; got %d", 200, deleted[0].Status)
}
- if !deleted[0].Found {
- t.Errorf("expected deleted[0].Found == %v; got %v", true, deleted[0].Found)
+ if want, have := "deleted", deleted[0].Result; want != have {
+ t.Errorf("expected deleted[0].Result == %q; got %q", want, have)
}
// Updated actions
@@ -326,6 +332,25 @@ func TestBulkRequestsSerialization(t *testing.T) {
if updated[0].Version != 2 {
t.Errorf("expected updated[0].Version == %d; got %d", 2, updated[0].Version)
}
+ if want, have := "updated", updated[0].Result; want != have {
+ t.Errorf("expected updated[0].Result == %q; got %q", want, have)
+ }
+ if updated[0].GetResult == nil {
+ t.Fatalf("expected updated[0].GetResult to be != nil; got nil")
+ }
+ if updated[0].GetResult.Source == nil {
+ t.Fatalf("expected updated[0].GetResult.Source to be != nil; got nil")
+ }
+ if want, have := true, updated[0].GetResult.Found; want != have {
+ t.Fatalf("expected updated[0].GetResult.Found to be != %v; got %v", want, have)
+ }
+ var doc tweet
+ if err := json.Unmarshal(*updated[0].GetResult.Source, &doc); err != nil {
+ t.Fatalf("expected to unmarshal updated[0].GetResult.Source; got %v", err)
+ }
+ if want, have := 42, doc.Retweets; want != have {
+ t.Fatalf("expected updated tweet to have Retweets = %v; got %v", want, have)
+ }
// Succeeded actions
succeeded := bulkResponse.Succeeded()
@@ -371,7 +396,7 @@ func TestFailedBulkRequests(t *testing.T) {
"items" : [ {
"index" : {
"_index" : "elastic-test",
- "_type" : "tweet",
+ "_type" : "doc",
"_id" : "1",
"_version" : 1,
"status" : 201
@@ -379,7 +404,7 @@ func TestFailedBulkRequests(t *testing.T) {
}, {
"create" : {
"_index" : "elastic-test",
- "_type" : "tweet",
+ "_type" : "doc",
"_id" : "2",
"_version" : 1,
"status" : 423,
@@ -391,7 +416,7 @@ func TestFailedBulkRequests(t *testing.T) {
}, {
"delete" : {
"_index" : "elastic-test",
- "_type" : "tweet",
+ "_type" : "doc",
"_id" : "1",
"_version" : 2,
"status" : 404,
@@ -400,7 +425,7 @@ func TestFailedBulkRequests(t *testing.T) {
}, {
"update" : {
"_index" : "elastic-test",
- "_type" : "tweet",
+ "_type" : "doc",
"_id" : "2",
"_version" : 2,
"status" : 200
@@ -425,10 +450,10 @@ func TestBulkEstimatedSizeInBytes(t *testing.T) {
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
- index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1)
- index2Req := NewBulkIndexRequest().OpType("create").Index(testIndexName).Type("tweet").Id("2").Doc(tweet2)
- delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
- update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2").
+ index1Req := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").Doc(tweet1)
+ index2Req := NewBulkIndexRequest().OpType("create").Index(testIndexName).Type("doc").Id("2").Doc(tweet2)
+ delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1")
+ update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("doc").Id("2").
Doc(struct {
Retweets int `json:"retweets"`
}{
@@ -468,13 +493,39 @@ func TestBulkEstimatedSizeInBytes(t *testing.T) {
func TestBulkEstimateSizeInBytesLength(t *testing.T) {
client := setupTestClientAndCreateIndex(t)
s := client.Bulk()
- r := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
+ r := NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1")
s = s.Add(r)
if got, want := s.estimateSizeInBytes(r), int64(1+len(r.String())); got != want {
t.Fatalf("expected %d; got: %d", want, got)
}
}
+func TestBulkContentType(t *testing.T) {
+ var header http.Header
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ header = r.Header
+ fmt.Fprintln(w, `{}`)
+ }))
+ defer ts.Close()
+
+ client, err := NewSimpleClient(SetURL(ts.URL))
+ if err != nil {
+ t.Fatal(err)
+ }
+ indexReq := NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").Doc(tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."})
+ if _, err := client.Bulk().Add(indexReq).Do(context.Background()); err != nil {
+ t.Fatal(err)
+ }
+ if header == nil {
+ t.Fatalf("expected header, got %v", header)
+ }
+ if want, have := "application/x-ndjson", header.Get("Content-Type"); want != have {
+ t.Fatalf("Content-Type: want %q, have %q", want, have)
+ }
+}
+
+// -- Benchmarks --
+
var benchmarkBulkEstimatedSizeInBytes int64
func BenchmarkBulkEstimatedSizeInBytesWith1Request(b *testing.B) {
@@ -482,9 +533,9 @@ func BenchmarkBulkEstimatedSizeInBytesWith1Request(b *testing.B) {
s := client.Bulk()
var result int64
for n := 0; n < b.N; n++ {
- s = s.Add(NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(struct{ A string }{"1"}))
- s = s.Add(NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("1").Doc(struct{ A string }{"2"}))
- s = s.Add(NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1"))
+ s = s.Add(NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").Doc(struct{ A string }{"1"}))
+ s = s.Add(NewBulkUpdateRequest().Index(testIndexName).Type("doc").Id("1").Doc(struct{ A string }{"2"}))
+ s = s.Add(NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1"))
result = s.EstimatedSizeInBytes()
s.reset()
}
@@ -498,9 +549,9 @@ func BenchmarkBulkEstimatedSizeInBytesWith100Requests(b *testing.B) {
var result int64
for n := 0; n < b.N; n++ {
for i := 0; i < 100; i++ {
- s = s.Add(NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(struct{ A string }{"1"}))
- s = s.Add(NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("1").Doc(struct{ A string }{"2"}))
- s = s.Add(NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1"))
+ s = s.Add(NewBulkIndexRequest().Index(testIndexName).Type("doc").Id("1").Doc(struct{ A string }{"1"}))
+ s = s.Add(NewBulkUpdateRequest().Index(testIndexName).Type("doc").Id("1").Doc(struct{ A string }{"2"}))
+ s = s.Add(NewBulkDeleteRequest().Index(testIndexName).Type("doc").Id("1"))
}
result = s.EstimatedSizeInBytes()
s.reset()
@@ -509,26 +560,41 @@ func BenchmarkBulkEstimatedSizeInBytesWith100Requests(b *testing.B) {
benchmarkBulkEstimatedSizeInBytes = result // ensure the compiler doesn't optimize
}
-func TestBulkContentType(t *testing.T) {
- var header http.Header
- ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- header = r.Header
- fmt.Fprintln(w, `{}`)
- }))
- defer ts.Close()
+func BenchmarkBulkAllocs(b *testing.B) {
+ b.Run("1000 docs with 64 byte", func(b *testing.B) { benchmarkBulkAllocs(b, 64, 1000) })
+ b.Run("1000 docs with 1 KiB", func(b *testing.B) { benchmarkBulkAllocs(b, 1024, 1000) })
+ b.Run("1000 docs with 4 KiB", func(b *testing.B) { benchmarkBulkAllocs(b, 4096, 1000) })
+ b.Run("1000 docs with 16 KiB", func(b *testing.B) { benchmarkBulkAllocs(b, 16*1024, 1000) })
+ b.Run("1000 docs with 64 KiB", func(b *testing.B) { benchmarkBulkAllocs(b, 64*1024, 1000) })
+ b.Run("1000 docs with 256 KiB", func(b *testing.B) { benchmarkBulkAllocs(b, 256*1024, 1000) })
+ b.Run("1000 docs with 1 MiB", func(b *testing.B) { benchmarkBulkAllocs(b, 1024*1024, 1000) })
+}
- client, err := NewSimpleClient(SetURL(ts.URL))
- if err != nil {
- t.Fatal(err)
- }
- indexReq := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."})
- if _, err := client.Bulk().Add(indexReq).Do(context.Background()); err != nil {
- t.Fatal(err)
+const (
+ charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-"
+)
+
+func benchmarkBulkAllocs(b *testing.B, size, num int) {
+ buf := make([]byte, size)
+ for i := range buf {
+ buf[i] = charset[rand.Intn(len(charset))]
}
- if header == nil {
- t.Fatalf("expected header, got %v", header)
+
+ s := &BulkService{}
+ n := 0
+ for {
+ n++
+ s = s.Add(NewBulkIndexRequest().Index("test").Type("doc").Id("1").Doc(struct {
+ S string `json:"s"`
+ }{
+ S: string(buf),
+ }))
+ if n >= num {
+ break
+ }
}
- if want, have := "application/x-ndjson", header.Get("Content-Type"); want != have {
- t.Fatalf("Content-Type: want %q, have %q", want, have)
+ for i := 0; i < b.N; i++ {
+ s.bodyAsString()
}
+ b.ReportAllocs()
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_update_request.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_update_request.go
index b0dbf0917..50e5adb8f 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/bulk_update_request.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_update_request.go
@@ -4,6 +4,8 @@
package elastic
+//go:generate easyjson bulk_update_request.go
+
import (
"encoding/json"
"fmt"
@@ -12,7 +14,7 @@ import (
// BulkUpdateRequest is a request to update a document in Elasticsearch.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
// for details.
type BulkUpdateRequest struct {
BulkableRequest
@@ -31,8 +33,38 @@ type BulkUpdateRequest struct {
docAsUpsert *bool
detectNoop *bool
doc interface{}
+ returnSource *bool
source []string
+
+ useEasyJSON bool
+}
+
+//easyjson:json
+type bulkUpdateRequestCommand map[string]bulkUpdateRequestCommandOp
+
+//easyjson:json
+type bulkUpdateRequestCommandOp struct {
+ Index string `json:"_index,omitempty"`
+ Type string `json:"_type,omitempty"`
+ Id string `json:"_id,omitempty"`
+ Parent string `json:"parent,omitempty"`
+ // RetryOnConflict is "_retry_on_conflict" for 6.0 and "retry_on_conflict" for 6.1+.
+ RetryOnConflict *int `json:"retry_on_conflict,omitempty"`
+ Routing string `json:"routing,omitempty"`
+ Version int64 `json:"version,omitempty"`
+ VersionType string `json:"version_type,omitempty"`
+}
+
+//easyjson:json
+type bulkUpdateRequestCommandData struct {
+ DetectNoop *bool `json:"detect_noop,omitempty"`
+ Doc interface{} `json:"doc,omitempty"`
+ DocAsUpsert *bool `json:"doc_as_upsert,omitempty"`
+ Script interface{} `json:"script,omitempty"`
+ ScriptedUpsert *bool `json:"scripted_upsert,omitempty"`
+ Upsert interface{} `json:"upsert,omitempty"`
+ Source *bool `json:"_source,omitempty"`
}
// NewBulkUpdateRequest returns a new BulkUpdateRequest.
@@ -40,6 +72,16 @@ func NewBulkUpdateRequest() *BulkUpdateRequest {
return &BulkUpdateRequest{}
}
+// UseEasyJSON is an experimental setting that enables serialization
+// with github.com/mailru/easyjson, which should in faster serialization
+// time and less allocations, but removed compatibility with encoding/json,
+// usage of unsafe etc. See https://github.com/mailru/easyjson#issues-notes-and-limitations
+// for details. This setting is disabled by default.
+func (r *BulkUpdateRequest) UseEasyJSON(enable bool) *BulkUpdateRequest {
+ r.useEasyJSON = enable
+ return r
+}
+
// Index specifies the Elasticsearch index to use for this update request.
// If unspecified, the index set on the BulkService will be used.
func (r *BulkUpdateRequest) Index(index string) *BulkUpdateRequest {
@@ -78,8 +120,8 @@ func (r *BulkUpdateRequest) Parent(parent string) *BulkUpdateRequest {
}
// Script specifies an update script.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html#bulk-update
-// and https://www.elastic.co/guide/en/elasticsearch/reference/5.2/modules-scripting.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html#bulk-update
+// and https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-scripting.html
// for details.
func (r *BulkUpdateRequest) Script(script *Script) *BulkUpdateRequest {
r.script = script
@@ -90,7 +132,7 @@ func (r *BulkUpdateRequest) Script(script *Script) *BulkUpdateRequest {
// ScripedUpsert specifies if your script will run regardless of
// whether the document exists or not.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-update.html#_literal_scripted_upsert_literal
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-update.html#_literal_scripted_upsert_literal
func (r *BulkUpdateRequest) ScriptedUpsert(upsert bool) *BulkUpdateRequest {
r.scriptedUpsert = &upsert
r.source = nil
@@ -113,7 +155,7 @@ func (r *BulkUpdateRequest) Version(version int64) *BulkUpdateRequest {
}
// VersionType can be "internal" (default), "external", "external_gte",
-// "external_gt", or "force".
+// or "external_gt".
func (r *BulkUpdateRequest) VersionType(versionType string) *BulkUpdateRequest {
r.versionType = versionType
r.source = nil
@@ -130,7 +172,7 @@ func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest {
// DocAsUpsert indicates whether the contents of Doc should be used as
// the Upsert value.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-update.html#_literal_doc_as_upsert_literal
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-update.html#_literal_doc_as_upsert_literal
// for details.
func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest {
r.docAsUpsert = &docAsUpsert
@@ -155,6 +197,15 @@ func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest {
return r
}
+// ReturnSource specifies whether Elasticsearch should return the source
+// after the update. In the request, this responds to the `_source` field.
+// It is false by default.
+func (r *BulkUpdateRequest) ReturnSource(source bool) *BulkUpdateRequest {
+ r.returnSource = &source
+ r.source = nil
+ return r
+}
+
// String returns the on-wire representation of the update request,
// concatenated as a single string.
func (r *BulkUpdateRequest) String() string {
@@ -165,28 +216,9 @@ func (r *BulkUpdateRequest) String() string {
return strings.Join(lines, "\n")
}
-func (r *BulkUpdateRequest) getSourceAsString(data interface{}) (string, error) {
- switch t := data.(type) {
- default:
- body, err := json.Marshal(data)
- if err != nil {
- return "", err
- }
- return string(body), nil
- case json.RawMessage:
- return string(t), nil
- case *json.RawMessage:
- return string(*t), nil
- case string:
- return t, nil
- case *string:
- return *t, nil
- }
-}
-
// Source returns the on-wire representation of the update request,
// split into an action-and-meta-data line and an (optional) source line.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
// for details.
func (r *BulkUpdateRequest) Source() ([]string, error) {
// { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
@@ -202,69 +234,65 @@ func (r *BulkUpdateRequest) Source() ([]string, error) {
lines := make([]string, 2)
// "update" ...
- command := make(map[string]interface{})
- updateCommand := make(map[string]interface{})
- if r.index != "" {
- updateCommand["_index"] = r.index
- }
- if r.typ != "" {
- updateCommand["_type"] = r.typ
- }
- if r.id != "" {
- updateCommand["_id"] = r.id
- }
- if r.routing != "" {
- updateCommand["_routing"] = r.routing
- }
- if r.parent != "" {
- updateCommand["_parent"] = r.parent
- }
- if r.version > 0 {
- updateCommand["_version"] = r.version
+ updateCommand := bulkUpdateRequestCommandOp{
+ Index: r.index,
+ Type: r.typ,
+ Id: r.id,
+ Routing: r.routing,
+ Parent: r.parent,
+ Version: r.version,
+ VersionType: r.versionType,
+ RetryOnConflict: r.retryOnConflict,
}
- if r.versionType != "" {
- updateCommand["_version_type"] = r.versionType
+ command := bulkUpdateRequestCommand{
+ "update": updateCommand,
}
- if r.retryOnConflict != nil {
- updateCommand["_retry_on_conflict"] = *r.retryOnConflict
+
+ var err error
+ var body []byte
+ if r.useEasyJSON {
+ // easyjson
+ body, err = command.MarshalJSON()
+ } else {
+ // encoding/json
+ body, err = json.Marshal(command)
}
- command["update"] = updateCommand
- line, err := json.Marshal(command)
if err != nil {
return nil, err
}
- lines[0] = string(line)
+
+ lines[0] = string(body)
// 2nd line: {"doc" : { ... }} or {"script": {...}}
- source := make(map[string]interface{})
- if r.docAsUpsert != nil {
- source["doc_as_upsert"] = *r.docAsUpsert
- }
- if r.detectNoop != nil {
- source["detect_noop"] = *r.detectNoop
- }
- if r.upsert != nil {
- source["upsert"] = r.upsert
+ data := bulkUpdateRequestCommandData{
+ DocAsUpsert: r.docAsUpsert,
+ DetectNoop: r.detectNoop,
+ Upsert: r.upsert,
+ ScriptedUpsert: r.scriptedUpsert,
+ Doc: r.doc,
+ Source: r.returnSource,
}
- if r.scriptedUpsert != nil {
- source["scripted_upsert"] = *r.scriptedUpsert
- }
- if r.doc != nil {
- // {"doc":{...}}
- source["doc"] = r.doc
- } else if r.script != nil {
- // {"script":...}
- src, err := r.script.Source()
+ if r.script != nil {
+ script, err := r.script.Source()
if err != nil {
return nil, err
}
- source["script"] = src
+ data.Script = script
+ }
+
+ if r.useEasyJSON {
+ // easyjson
+ body, err = data.MarshalJSON()
+ } else {
+ // encoding/json
+ body, err = json.Marshal(data)
}
- lines[1], err = r.getSourceAsString(source)
if err != nil {
return nil, err
}
+ lines[1] = string(body)
+
r.source = lines
return lines, nil
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_update_request_easyjson.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_update_request_easyjson.go
new file mode 100644
index 000000000..d2c2cbfc7
--- /dev/null
+++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_update_request_easyjson.go
@@ -0,0 +1,461 @@
+// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT.
+
+package elastic
+
+import (
+ json "encoding/json"
+ easyjson "github.com/mailru/easyjson"
+ jlexer "github.com/mailru/easyjson/jlexer"
+ jwriter "github.com/mailru/easyjson/jwriter"
+)
+
+// suppress unused package warning
+var (
+ _ *json.RawMessage
+ _ *jlexer.Lexer
+ _ *jwriter.Writer
+ _ easyjson.Marshaler
+)
+
+func easyjson1ed00e60DecodeGithubComOlivereElastic(in *jlexer.Lexer, out *bulkUpdateRequestCommandOp) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeString()
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "_index":
+ out.Index = string(in.String())
+ case "_type":
+ out.Type = string(in.String())
+ case "_id":
+ out.Id = string(in.String())
+ case "parent":
+ out.Parent = string(in.String())
+ case "retry_on_conflict":
+ if in.IsNull() {
+ in.Skip()
+ out.RetryOnConflict = nil
+ } else {
+ if out.RetryOnConflict == nil {
+ out.RetryOnConflict = new(int)
+ }
+ *out.RetryOnConflict = int(in.Int())
+ }
+ case "routing":
+ out.Routing = string(in.String())
+ case "version":
+ out.Version = int64(in.Int64())
+ case "version_type":
+ out.VersionType = string(in.String())
+ default:
+ in.SkipRecursive()
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+func easyjson1ed00e60EncodeGithubComOlivereElastic(out *jwriter.Writer, in bulkUpdateRequestCommandOp) {
+ out.RawByte('{')
+ first := true
+ _ = first
+ if in.Index != "" {
+ const prefix string = ",\"_index\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Index))
+ }
+ if in.Type != "" {
+ const prefix string = ",\"_type\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Type))
+ }
+ if in.Id != "" {
+ const prefix string = ",\"_id\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Id))
+ }
+ if in.Parent != "" {
+ const prefix string = ",\"parent\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Parent))
+ }
+ if in.RetryOnConflict != nil {
+ const prefix string = ",\"retry_on_conflict\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Int(int(*in.RetryOnConflict))
+ }
+ if in.Routing != "" {
+ const prefix string = ",\"routing\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.Routing))
+ }
+ if in.Version != 0 {
+ const prefix string = ",\"version\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Int64(int64(in.Version))
+ }
+ if in.VersionType != "" {
+ const prefix string = ",\"version_type\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.String(string(in.VersionType))
+ }
+ out.RawByte('}')
+}
+
+// MarshalJSON supports json.Marshaler interface
+func (v bulkUpdateRequestCommandOp) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ easyjson1ed00e60EncodeGithubComOlivereElastic(&w, v)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// MarshalEasyJSON supports easyjson.Marshaler interface
+func (v bulkUpdateRequestCommandOp) MarshalEasyJSON(w *jwriter.Writer) {
+ easyjson1ed00e60EncodeGithubComOlivereElastic(w, v)
+}
+
+// UnmarshalJSON supports json.Unmarshaler interface
+func (v *bulkUpdateRequestCommandOp) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ easyjson1ed00e60DecodeGithubComOlivereElastic(&r, v)
+ return r.Error()
+}
+
+// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
+func (v *bulkUpdateRequestCommandOp) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ easyjson1ed00e60DecodeGithubComOlivereElastic(l, v)
+}
+func easyjson1ed00e60DecodeGithubComOlivereElastic1(in *jlexer.Lexer, out *bulkUpdateRequestCommandData) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeString()
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "detect_noop":
+ if in.IsNull() {
+ in.Skip()
+ out.DetectNoop = nil
+ } else {
+ if out.DetectNoop == nil {
+ out.DetectNoop = new(bool)
+ }
+ *out.DetectNoop = bool(in.Bool())
+ }
+ case "doc":
+ if m, ok := out.Doc.(easyjson.Unmarshaler); ok {
+ m.UnmarshalEasyJSON(in)
+ } else if m, ok := out.Doc.(json.Unmarshaler); ok {
+ _ = m.UnmarshalJSON(in.Raw())
+ } else {
+ out.Doc = in.Interface()
+ }
+ case "doc_as_upsert":
+ if in.IsNull() {
+ in.Skip()
+ out.DocAsUpsert = nil
+ } else {
+ if out.DocAsUpsert == nil {
+ out.DocAsUpsert = new(bool)
+ }
+ *out.DocAsUpsert = bool(in.Bool())
+ }
+ case "script":
+ if m, ok := out.Script.(easyjson.Unmarshaler); ok {
+ m.UnmarshalEasyJSON(in)
+ } else if m, ok := out.Script.(json.Unmarshaler); ok {
+ _ = m.UnmarshalJSON(in.Raw())
+ } else {
+ out.Script = in.Interface()
+ }
+ case "scripted_upsert":
+ if in.IsNull() {
+ in.Skip()
+ out.ScriptedUpsert = nil
+ } else {
+ if out.ScriptedUpsert == nil {
+ out.ScriptedUpsert = new(bool)
+ }
+ *out.ScriptedUpsert = bool(in.Bool())
+ }
+ case "upsert":
+ if m, ok := out.Upsert.(easyjson.Unmarshaler); ok {
+ m.UnmarshalEasyJSON(in)
+ } else if m, ok := out.Upsert.(json.Unmarshaler); ok {
+ _ = m.UnmarshalJSON(in.Raw())
+ } else {
+ out.Upsert = in.Interface()
+ }
+ case "_source":
+ if in.IsNull() {
+ in.Skip()
+ out.Source = nil
+ } else {
+ if out.Source == nil {
+ out.Source = new(bool)
+ }
+ *out.Source = bool(in.Bool())
+ }
+ default:
+ in.SkipRecursive()
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+func easyjson1ed00e60EncodeGithubComOlivereElastic1(out *jwriter.Writer, in bulkUpdateRequestCommandData) {
+ out.RawByte('{')
+ first := true
+ _ = first
+ if in.DetectNoop != nil {
+ const prefix string = ",\"detect_noop\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Bool(bool(*in.DetectNoop))
+ }
+ if in.Doc != nil {
+ const prefix string = ",\"doc\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ if m, ok := in.Doc.(easyjson.Marshaler); ok {
+ m.MarshalEasyJSON(out)
+ } else if m, ok := in.Doc.(json.Marshaler); ok {
+ out.Raw(m.MarshalJSON())
+ } else {
+ out.Raw(json.Marshal(in.Doc))
+ }
+ }
+ if in.DocAsUpsert != nil {
+ const prefix string = ",\"doc_as_upsert\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Bool(bool(*in.DocAsUpsert))
+ }
+ if in.Script != nil {
+ const prefix string = ",\"script\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ if m, ok := in.Script.(easyjson.Marshaler); ok {
+ m.MarshalEasyJSON(out)
+ } else if m, ok := in.Script.(json.Marshaler); ok {
+ out.Raw(m.MarshalJSON())
+ } else {
+ out.Raw(json.Marshal(in.Script))
+ }
+ }
+ if in.ScriptedUpsert != nil {
+ const prefix string = ",\"scripted_upsert\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Bool(bool(*in.ScriptedUpsert))
+ }
+ if in.Upsert != nil {
+ const prefix string = ",\"upsert\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ if m, ok := in.Upsert.(easyjson.Marshaler); ok {
+ m.MarshalEasyJSON(out)
+ } else if m, ok := in.Upsert.(json.Marshaler); ok {
+ out.Raw(m.MarshalJSON())
+ } else {
+ out.Raw(json.Marshal(in.Upsert))
+ }
+ }
+ if in.Source != nil {
+ const prefix string = ",\"_source\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ out.Bool(bool(*in.Source))
+ }
+ out.RawByte('}')
+}
+
+// MarshalJSON supports json.Marshaler interface
+func (v bulkUpdateRequestCommandData) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ easyjson1ed00e60EncodeGithubComOlivereElastic1(&w, v)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// MarshalEasyJSON supports easyjson.Marshaler interface
+func (v bulkUpdateRequestCommandData) MarshalEasyJSON(w *jwriter.Writer) {
+ easyjson1ed00e60EncodeGithubComOlivereElastic1(w, v)
+}
+
+// UnmarshalJSON supports json.Unmarshaler interface
+func (v *bulkUpdateRequestCommandData) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ easyjson1ed00e60DecodeGithubComOlivereElastic1(&r, v)
+ return r.Error()
+}
+
+// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
+func (v *bulkUpdateRequestCommandData) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ easyjson1ed00e60DecodeGithubComOlivereElastic1(l, v)
+}
+func easyjson1ed00e60DecodeGithubComOlivereElastic2(in *jlexer.Lexer, out *bulkUpdateRequestCommand) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ in.Skip()
+ } else {
+ in.Delim('{')
+ if !in.IsDelim('}') {
+ *out = make(bulkUpdateRequestCommand)
+ } else {
+ *out = nil
+ }
+ for !in.IsDelim('}') {
+ key := string(in.String())
+ in.WantColon()
+ var v1 bulkUpdateRequestCommandOp
+ (v1).UnmarshalEasyJSON(in)
+ (*out)[key] = v1
+ in.WantComma()
+ }
+ in.Delim('}')
+ }
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+func easyjson1ed00e60EncodeGithubComOlivereElastic2(out *jwriter.Writer, in bulkUpdateRequestCommand) {
+ if in == nil && (out.Flags&jwriter.NilMapAsEmpty) == 0 {
+ out.RawString(`null`)
+ } else {
+ out.RawByte('{')
+ v2First := true
+ for v2Name, v2Value := range in {
+ if v2First {
+ v2First = false
+ } else {
+ out.RawByte(',')
+ }
+ out.String(string(v2Name))
+ out.RawByte(':')
+ (v2Value).MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+ }
+}
+
+// MarshalJSON supports json.Marshaler interface
+func (v bulkUpdateRequestCommand) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ easyjson1ed00e60EncodeGithubComOlivereElastic2(&w, v)
+ return w.Buffer.BuildBytes(), w.Error
+}
+
+// MarshalEasyJSON supports easyjson.Marshaler interface
+func (v bulkUpdateRequestCommand) MarshalEasyJSON(w *jwriter.Writer) {
+ easyjson1ed00e60EncodeGithubComOlivereElastic2(w, v)
+}
+
+// UnmarshalJSON supports json.Unmarshaler interface
+func (v *bulkUpdateRequestCommand) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ easyjson1ed00e60DecodeGithubComOlivereElastic2(&r, v)
+ return r.Error()
+}
+
+// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
+func (v *bulkUpdateRequestCommand) UnmarshalEasyJSON(l *jlexer.Lexer) {
+ easyjson1ed00e60DecodeGithubComOlivereElastic2(l, v)
+}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_update_request_test.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_update_request_test.go
index afe873890..53e73bd40 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/bulk_update_request_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_update_request_test.go
@@ -15,19 +15,20 @@ func TestBulkUpdateRequestSerialization(t *testing.T) {
}{
// #0
{
- Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").Doc(struct {
+ Request: NewBulkUpdateRequest().Index("index1").Type("doc").Id("1").Doc(struct {
Counter int64 `json:"counter"`
}{
Counter: 42,
}),
Expected: []string{
- `{"update":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+ `{"update":{"_index":"index1","_type":"doc","_id":"1"}}`,
`{"doc":{"counter":42}}`,
},
},
// #1
{
- Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").
+ Request: NewBulkUpdateRequest().Index("index1").Type("doc").Id("1").
+ Routing("123").
RetryOnConflict(3).
DocAsUpsert(true).
Doc(struct {
@@ -36,13 +37,13 @@ func TestBulkUpdateRequestSerialization(t *testing.T) {
Counter: 42,
}),
Expected: []string{
- `{"update":{"_id":"1","_index":"index1","_retry_on_conflict":3,"_type":"tweet"}}`,
+ `{"update":{"_index":"index1","_type":"doc","_id":"1","retry_on_conflict":3,"routing":"123"}}`,
`{"doc":{"counter":42},"doc_as_upsert":true}`,
},
},
// #2
{
- Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").
+ Request: NewBulkUpdateRequest().Index("index1").Type("doc").Id("1").
RetryOnConflict(3).
Script(NewScript(`ctx._source.retweets += param1`).Lang("javascript").Param("param1", 42)).
Upsert(struct {
@@ -51,25 +52,25 @@ func TestBulkUpdateRequestSerialization(t *testing.T) {
Counter: 42,
}),
Expected: []string{
- `{"update":{"_id":"1","_index":"index1","_retry_on_conflict":3,"_type":"tweet"}}`,
- `{"script":{"inline":"ctx._source.retweets += param1","lang":"javascript","params":{"param1":42}},"upsert":{"counter":42}}`,
+ `{"update":{"_index":"index1","_type":"doc","_id":"1","retry_on_conflict":3}}`,
+ `{"script":{"lang":"javascript","params":{"param1":42},"source":"ctx._source.retweets += param1"},"upsert":{"counter":42}}`,
},
},
// #3
{
- Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").DetectNoop(true).Doc(struct {
+ Request: NewBulkUpdateRequest().Index("index1").Type("doc").Id("1").DetectNoop(true).Doc(struct {
Counter int64 `json:"counter"`
}{
Counter: 42,
}),
Expected: []string{
- `{"update":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+ `{"update":{"_index":"index1","_type":"doc","_id":"1"}}`,
`{"detect_noop":true,"doc":{"counter":42}}`,
},
},
// #4
{
- Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").
+ Request: NewBulkUpdateRequest().Index("index1").Type("doc").Id("1").
RetryOnConflict(3).
ScriptedUpsert(true).
Script(NewScript(`ctx._source.retweets += param1`).Lang("javascript").Param("param1", 42)).
@@ -79,8 +80,20 @@ func TestBulkUpdateRequestSerialization(t *testing.T) {
Counter: 42,
}),
Expected: []string{
- `{"update":{"_id":"1","_index":"index1","_retry_on_conflict":3,"_type":"tweet"}}`,
- `{"script":{"inline":"ctx._source.retweets += param1","lang":"javascript","params":{"param1":42}},"scripted_upsert":true,"upsert":{"counter":42}}`,
+ `{"update":{"_index":"index1","_type":"doc","_id":"1","retry_on_conflict":3}}`,
+ `{"script":{"lang":"javascript","params":{"param1":42},"source":"ctx._source.retweets += param1"},"scripted_upsert":true,"upsert":{"counter":42}}`,
+ },
+ },
+ // #5
+ {
+ Request: NewBulkUpdateRequest().Index("index1").Type("doc").Id("4").ReturnSource(true).Doc(struct {
+ Counter int64 `json:"counter"`
+ }{
+ Counter: 42,
+ }),
+ Expected: []string{
+ `{"update":{"_index":"index1","_type":"doc","_id":"4"}}`,
+ `{"doc":{"counter":42},"_source":true}`,
},
},
}
@@ -107,15 +120,30 @@ func TestBulkUpdateRequestSerialization(t *testing.T) {
var bulkUpdateRequestSerializationResult string
func BenchmarkBulkUpdateRequestSerialization(b *testing.B) {
- r := NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").Doc(struct {
- Counter int64 `json:"counter"`
- }{
- Counter: 42,
+ b.Run("stdlib", func(b *testing.B) {
+ r := NewBulkUpdateRequest().Index("index1").Type("doc").Id("1").Doc(struct {
+ Counter int64 `json:"counter"`
+ }{
+ Counter: 42,
+ })
+ benchmarkBulkUpdateRequestSerialization(b, r.UseEasyJSON(false))
})
+ b.Run("easyjson", func(b *testing.B) {
+ r := NewBulkUpdateRequest().Index("index1").Type("doc").Id("1").Doc(struct {
+ Counter int64 `json:"counter"`
+ }{
+ Counter: 42,
+ }).UseEasyJSON(false)
+ benchmarkBulkUpdateRequestSerialization(b, r.UseEasyJSON(true))
+ })
+}
+
+func benchmarkBulkUpdateRequestSerialization(b *testing.B, r *BulkUpdateRequest) {
var s string
for n := 0; n < b.N; n++ {
s = r.String()
r.source = nil // Don't let caching spoil the benchmark
}
bulkUpdateRequestSerializationResult = s // ensure the compiler doesn't optimize
+ b.ReportAllocs()
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/clear_scroll.go b/vendor/gopkg.in/olivere/elastic.v5/clear_scroll.go
index 83e592875..4f449504c 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/clear_scroll.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/clear_scroll.go
@@ -12,7 +12,7 @@ import (
// ClearScrollService clears one or more scroll contexts by their ids.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-scroll.html#_clear_scroll_api
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-scroll.html#_clear_scroll_api
// for details.
type ClearScrollService struct {
client *Client
@@ -49,7 +49,7 @@ func (s *ClearScrollService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
return path, params, nil
}
@@ -85,7 +85,12 @@ func (s *ClearScrollService) Do(ctx context.Context) (*ClearScrollResponse, erro
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "DELETE", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "DELETE",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/clear_scroll_test.go b/vendor/gopkg.in/olivere/elastic.v5/clear_scroll_test.go
index 56a9d936c..4037d3cd6 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/clear_scroll_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/clear_scroll_test.go
@@ -19,17 +19,17 @@ func TestClearScroll(t *testing.T) {
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/client.go b/vendor/gopkg.in/olivere/elastic.v5/client.go
index 9a48d9ac7..1eb0ec54f 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/client.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/client.go
@@ -21,12 +21,12 @@ import (
"github.com/pkg/errors"
- "gopkg.in/olivere/elastic.v5/config"
+ "github.com/olivere/elastic/config"
)
const (
// Version is the current version of Elastic.
- Version = "5.0.53"
+ Version = "6.1.4"
// DefaultURL is the default endpoint of Elasticsearch on the local machine.
// It is used e.g. when initializing a new Client without a specific URL.
@@ -76,9 +76,6 @@ const (
// a GET request with a body.
DefaultSendGetBodyAs = "GET"
- // DefaultGzipEnabled specifies if gzip compression is enabled by default.
- DefaultGzipEnabled = false
-
// off is used to disable timeouts.
off = -1 * time.Second
)
@@ -135,7 +132,6 @@ type Client struct {
basicAuthPassword string // password for HTTP Basic Auth
sendGetBodyAs string // override for when sending a GET with a body
requiredPlugins []string // list of required plugins
- gzipEnabled bool // gzip compression enabled or disabled (default)
retrier Retrier // strategy for retries
}
@@ -158,7 +154,7 @@ type Client struct {
//
// If the sniffer is enabled (the default), the new client then sniffes
// the cluster via the Nodes Info API
-// (see https://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-nodes-info.html#cluster-nodes-info).
+// (see https://www.elastic.co/guide/en/elasticsearch/reference/6.0/cluster-nodes-info.html#cluster-nodes-info).
// It uses the URLs specified by the caller. The caller is responsible
// to only pass a list of URLs of nodes that belong to the same cluster.
// This sniffing process is run on startup and periodically.
@@ -209,7 +205,6 @@ func NewClient(options ...ClientOptionFunc) (*Client, error) {
snifferCallback: nopSnifferCallback,
snifferStop: make(chan bool),
sendGetBodyAs: DefaultSendGetBodyAs,
- gzipEnabled: DefaultGzipEnabled,
retrier: noRetries, // no retries by default
}
@@ -367,7 +362,6 @@ func NewSimpleClient(options ...ClientOptionFunc) (*Client, error) {
snifferCallback: nopSnifferCallback,
snifferStop: make(chan bool),
sendGetBodyAs: DefaultSendGetBodyAs,
- gzipEnabled: DefaultGzipEnabled,
retrier: noRetries, // no retries by default
}
@@ -596,14 +590,6 @@ func SetMaxRetries(maxRetries int) ClientOptionFunc {
}
}
-// SetGzip enables or disables gzip compression (disabled by default).
-func SetGzip(enabled bool) ClientOptionFunc {
- return func(c *Client) error {
- c.gzipEnabled = enabled
- return nil
- }
-}
-
// SetDecoder sets the Decoder to use when decoding data from Elasticsearch.
// DefaultDecoder is used by default.
func SetDecoder(decoder Decoder) ClientOptionFunc {
@@ -1086,6 +1072,7 @@ func (c *Client) startupHealthcheck(timeout time.Duration) error {
c.mu.Unlock()
// If we don't get a connection after "timeout", we bail.
+ var lastErr error
start := time.Now()
for {
// Make a copy of the HTTP client provided via options to respect
@@ -1104,6 +1091,8 @@ func (c *Client) startupHealthcheck(timeout time.Duration) error {
res, err := cl.Do(req)
if err == nil && res != nil && res.StatusCode >= 200 && res.StatusCode < 300 {
return nil
+ } else if err != nil {
+ lastErr = err
}
}
time.Sleep(1 * time.Second)
@@ -1111,6 +1100,9 @@ func (c *Client) startupHealthcheck(timeout time.Duration) error {
break
}
}
+ if lastErr != nil {
+ return errors.Wrapf(ErrNoClient, "health check timeout: %v", lastErr)
+ }
return errors.Wrap(ErrNoClient, "health check timeout")
}
@@ -1167,19 +1159,26 @@ func (c *Client) mustActiveConn() error {
return errors.Wrap(ErrNoClient, "no active connection found")
}
-// PerformRequest does a HTTP request to Elasticsearch.
-// See PerformRequestWithContentType for details.
-func (c *Client) PerformRequest(ctx context.Context, method, path string, params url.Values, body interface{}, ignoreErrors ...int) (*Response, error) {
- return c.PerformRequestWithContentType(ctx, method, path, params, body, "application/json", ignoreErrors...)
+// -- PerformRequest --
+
+// PerformRequestOptions must be passed into PerformRequest.
+type PerformRequestOptions struct {
+ Method string
+ Path string
+ Params url.Values
+ Body interface{}
+ ContentType string
+ IgnoreErrors []int
+ Retrier Retrier
}
-// PerformRequestWithContentType executes a HTTP request with a specific content type.
+// PerformRequest does a HTTP request to Elasticsearch.
// It returns a response (which might be nil) and an error on failure.
//
// Optionally, a list of HTTP error codes to ignore can be passed.
// This is necessary for services that expect e.g. HTTP status 404 as a
// valid outcome (Exists, IndicesExists, IndicesTypeExists).
-func (c *Client) PerformRequestWithContentType(ctx context.Context, method, path string, params url.Values, body interface{}, contentType string, ignoreErrors ...int) (*Response, error) {
+func (c *Client) PerformRequest(ctx context.Context, opt PerformRequestOptions) (*Response, error) {
start := time.Now().UTC()
c.mu.RLock()
@@ -1188,7 +1187,10 @@ func (c *Client) PerformRequestWithContentType(ctx context.Context, method, path
basicAuthUsername := c.basicAuthUsername
basicAuthPassword := c.basicAuthPassword
sendGetBodyAs := c.sendGetBodyAs
- gzipEnabled := c.gzipEnabled
+ retrier := c.retrier
+ if opt.Retrier != nil {
+ retrier = opt.Retrier
+ }
c.mu.RUnlock()
var err error
@@ -1199,14 +1201,14 @@ func (c *Client) PerformRequestWithContentType(ctx context.Context, method, path
var n int
// Change method if sendGetBodyAs is specified.
- if method == "GET" && body != nil && sendGetBodyAs != "GET" {
- method = sendGetBodyAs
+ if opt.Method == "GET" && opt.Body != nil && sendGetBodyAs != "GET" {
+ opt.Method = sendGetBodyAs
}
for {
- pathWithParams := path
- if len(params) > 0 {
- pathWithParams += "?" + params.Encode()
+ pathWithParams := opt.Path
+ if len(opt.Params) > 0 {
+ pathWithParams += "?" + opt.Params.Encode()
}
// Get a connection
@@ -1217,7 +1219,7 @@ func (c *Client) PerformRequestWithContentType(ctx context.Context, method, path
// Force a healtcheck as all connections seem to be dead.
c.healthcheck(timeout, false)
}
- wait, ok, rerr := c.retrier.Retry(ctx, n, nil, nil, err)
+ wait, ok, rerr := retrier.Retry(ctx, n, nil, nil, err)
if rerr != nil {
return nil, rerr
}
@@ -1233,24 +1235,24 @@ func (c *Client) PerformRequestWithContentType(ctx context.Context, method, path
return nil, err
}
- req, err = NewRequest(method, conn.URL()+pathWithParams)
+ req, err = NewRequest(opt.Method, conn.URL()+pathWithParams)
if err != nil {
- c.errorf("elastic: cannot create request for %s %s: %v", strings.ToUpper(method), conn.URL()+pathWithParams, err)
+ c.errorf("elastic: cannot create request for %s %s: %v", strings.ToUpper(opt.Method), conn.URL()+pathWithParams, err)
return nil, err
}
if basicAuth {
req.SetBasicAuth(basicAuthUsername, basicAuthPassword)
}
- if contentType != "" {
- req.Header.Set("Content-Type", contentType)
+ if opt.ContentType != "" {
+ req.Header.Set("Content-Type", opt.ContentType)
}
// Set body
- if body != nil {
- err = req.SetBody(body, gzipEnabled)
+ if opt.Body != nil {
+ err = req.SetBody(opt.Body)
if err != nil {
- c.errorf("elastic: couldn't set body %+v for request: %v", body, err)
+ c.errorf("elastic: couldn't set body %+v for request: %v", opt.Body, err)
return nil, err
}
}
@@ -1273,7 +1275,7 @@ func (c *Client) PerformRequestWithContentType(ctx context.Context, method, path
}
if err != nil {
n++
- wait, ok, rerr := c.retrier.Retry(ctx, n, (*http.Request)(req), res, err)
+ wait, ok, rerr := retrier.Retry(ctx, n, (*http.Request)(req), res, err)
if rerr != nil {
c.errorf("elastic: %s is dead", conn.URL())
conn.MarkAsDead()
@@ -1295,8 +1297,13 @@ func (c *Client) PerformRequestWithContentType(ctx context.Context, method, path
// Tracing
c.dumpResponse(res)
+ // Log deprecation warnings as errors
+ if s := res.Header.Get("Warning"); s != "" {
+ c.errorf(s)
+ }
+
// Check for errors
- if err := checkResponse((*http.Request)(req), res, ignoreErrors...); err != nil {
+ if err := checkResponse((*http.Request)(req), res, opt.IgnoreErrors...); err != nil {
// No retry if request succeeded
// We still try to return a response.
resp, _ = c.newResponse(res)
@@ -1316,7 +1323,7 @@ func (c *Client) PerformRequestWithContentType(ctx context.Context, method, path
duration := time.Now().UTC().Sub(start)
c.infof("%s %s [status:%d, request:%.3fs]",
- strings.ToUpper(method),
+ strings.ToUpper(opt.Method),
req.URL,
resp.StatusCode,
float64(int64(duration/time.Millisecond))/1000)
@@ -1378,7 +1385,7 @@ func (c *Client) BulkProcessor() *BulkProcessorService {
// Reindex copies data from a source index into a destination index.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-reindex.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-reindex.html
// for details on the Reindex API.
func (c *Client) Reindex() *ReindexService {
return NewReindexService(c)
@@ -1405,11 +1412,6 @@ func (c *Client) Search(indices ...string) *SearchService {
return NewSearchService(c).Index(indices...)
}
-// Suggest returns a service to return suggestions.
-func (c *Client) Suggest(indices ...string) *SuggestService {
- return NewSuggestService(c).Index(indices...)
-}
-
// MultiSearch is the entry point for multi searches.
func (c *Client) MultiSearch() *MultiSearchService {
return NewMultiSearchService(c)
@@ -1430,9 +1432,9 @@ func (c *Client) Explain(index, typ, id string) *ExplainService {
// TODO Search Exists API
// TODO Validate API
-// FieldStats returns statistical information about fields in indices.
-func (c *Client) FieldStats(indices ...string) *FieldStatsService {
- return NewFieldStatsService(c).Index(indices...)
+// FieldCaps returns statistical information about fields in indices.
+func (c *Client) FieldCaps(indices ...string) *FieldCapsService {
+ return NewFieldCapsService(c).Index(indices...)
}
// Exists checks if a document exists.
@@ -1516,6 +1518,11 @@ func (c *Client) IndexPutSettings(indices ...string) *IndicesPutSettingsService
return NewIndicesPutSettingsService(c).Index(indices...)
}
+// IndexSegments retrieves low level segment information for all, one or more indices.
+func (c *Client) IndexSegments(indices ...string) *IndicesSegmentsService {
+ return NewIndicesSegmentsService(c).Index(indices...)
+}
+
// IndexAnalyze performs the analysis process on a text and returns the
// token breakdown of the text.
func (c *Client) IndexAnalyze() *IndicesAnalyzeService {
@@ -1549,24 +1556,6 @@ func (c *Client) Aliases() *AliasesService {
return NewAliasesService(c)
}
-// GetTemplate gets a search template.
-// Use IndexXXXTemplate funcs to manage index templates.
-func (c *Client) GetTemplate() *GetTemplateService {
- return NewGetTemplateService(c)
-}
-
-// PutTemplate creates or updates a search template.
-// Use IndexXXXTemplate funcs to manage index templates.
-func (c *Client) PutTemplate() *PutTemplateService {
- return NewPutTemplateService(c)
-}
-
-// DeleteTemplate deletes a search template.
-// Use IndexXXXTemplate funcs to manage index templates.
-func (c *Client) DeleteTemplate() *DeleteTemplateService {
- return NewDeleteTemplateService(c)
-}
-
// IndexGetTemplate gets an index template.
// Use XXXTemplate funcs to manage search templates.
func (c *Client) IndexGetTemplate(names ...string) *IndicesGetTemplateService {
diff --git a/vendor/gopkg.in/olivere/elastic.v5/client_test.go b/vendor/gopkg.in/olivere/elastic.v5/client_test.go
index 6caf7b797..4d0440ee0 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/client_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/client_test.go
@@ -279,6 +279,9 @@ func TestClientHealthcheckStartupTimeout(t *testing.T) {
if !IsConnErr(err) {
t.Fatal(err)
}
+ if !strings.Contains(err.Error(), "connection refused") {
+ t.Fatalf("expected error to contain %q, have %q", "connection refused", err.Error())
+ }
if duration < 5*time.Second {
t.Fatalf("expected a timeout in more than 5 seconds; got: %v", duration)
}
@@ -873,7 +876,10 @@ func TestPerformRequest(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- res, err := client.PerformRequest(context.TODO(), "GET", "/", nil, nil)
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/",
+ })
if err != nil {
t.Fatal(err)
}
@@ -895,7 +901,10 @@ func TestPerformRequestWithSimpleClient(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- res, err := client.PerformRequest(context.TODO(), "GET", "/", nil, nil)
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/",
+ })
if err != nil {
t.Fatal(err)
}
@@ -921,7 +930,10 @@ func TestPerformRequestWithLogger(t *testing.T) {
t.Fatal(err)
}
- res, err := client.PerformRequest(context.TODO(), "GET", "/", nil, nil)
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/",
+ })
if err != nil {
t.Fatal(err)
}
@@ -960,7 +972,10 @@ func TestPerformRequestWithLoggerAndTracer(t *testing.T) {
t.Fatal(err)
}
- res, err := client.PerformRequest(context.TODO(), "GET", "/", nil, nil)
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/",
+ })
if err != nil {
t.Fatal(err)
}
@@ -995,7 +1010,10 @@ func TestPerformRequestWithTracerOnError(t *testing.T) {
t.Fatal(err)
}
- client.PerformRequest(context.TODO(), "GET", "/no-such-index", nil, nil)
+ client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/no-such-index",
+ })
tgot := tw.String()
if tgot == "" {
@@ -1019,7 +1037,10 @@ func TestPerformRequestWithCustomLogger(t *testing.T) {
t.Fatal(err)
}
- res, err := client.PerformRequest(context.TODO(), "GET", "/", nil, nil)
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/",
+ })
if err != nil {
t.Fatal(err)
}
@@ -1082,7 +1103,10 @@ func TestPerformRequestRetryOnHttpError(t *testing.T) {
t.Fatal(err)
}
- res, err := client.PerformRequest(context.TODO(), "GET", "/fail", nil, nil)
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/fail",
+ })
if err == nil {
t.Fatal("expected error")
}
@@ -1112,7 +1136,10 @@ func TestPerformRequestNoRetryOnValidButUnsuccessfulHttpStatus(t *testing.T) {
t.Fatal(err)
}
- res, err := client.PerformRequest(context.TODO(), "GET", "/fail", nil, nil)
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/fail",
+ })
if err == nil {
t.Fatal("expected error")
}
@@ -1141,7 +1168,11 @@ func TestPerformRequestWithSetBodyError(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- res, err := client.PerformRequest(context.TODO(), "GET", "/", nil, failingBody{})
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/",
+ Body: failingBody{},
+ })
if err == nil {
t.Fatal("expected error")
}
@@ -1178,7 +1209,10 @@ func TestPerformRequestWithCancel(t *testing.T) {
resc := make(chan result, 1)
go func() {
- res, err := client.PerformRequest(ctx, "GET", "/", nil, nil)
+ res, err := client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: "/",
+ })
resc <- result{res: res, err: err}
}()
select {
@@ -1213,7 +1247,10 @@ func TestPerformRequestWithTimeout(t *testing.T) {
resc := make(chan result, 1)
go func() {
- res, err := client.PerformRequest(ctx, "GET", "/", nil, nil)
+ res, err := client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: "/",
+ })
resc <- result{res: res, err: err}
}()
select {
@@ -1261,7 +1298,10 @@ func testPerformRequestWithCompression(t *testing.T, hc *http.Client) {
if err != nil {
t.Fatal(err)
}
- res, err := client.PerformRequest(context.TODO(), "GET", "/", nil, nil)
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/",
+ })
if err != nil {
t.Fatal(err)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/cluster-test/cluster-test.go b/vendor/gopkg.in/olivere/elastic.v5/cluster-test/cluster-test.go
index 249b35c04..96b0c5d9b 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/cluster-test/cluster-test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/cluster-test/cluster-test.go
@@ -18,7 +18,7 @@ import (
"sync/atomic"
"time"
- elastic "gopkg.in/olivere/elastic.v5"
+ elastic "github.com/olivere/elastic"
)
type Tweet struct {
diff --git a/vendor/gopkg.in/olivere/elastic.v5/cluster_health.go b/vendor/gopkg.in/olivere/elastic.v5/cluster_health.go
index a3ee72cd1..f960cfe8e 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/cluster_health.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/cluster_health.go
@@ -10,7 +10,7 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// ClusterHealthService allows to get a very simple status on the health of the cluster.
@@ -131,7 +131,7 @@ func (s *ClusterHealthService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.level != "" {
params.Set("level", s.level)
@@ -179,7 +179,11 @@ func (s *ClusterHealthService) Do(ctx context.Context) (*ClusterHealthResponse,
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/cluster_state.go b/vendor/gopkg.in/olivere/elastic.v5/cluster_state.go
index d6e608211..54e9aa428 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/cluster_state.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/cluster_state.go
@@ -10,12 +10,12 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// ClusterStateService allows to get a comprehensive state information of the whole cluster.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-state.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/cluster-state.html
// for details.
type ClusterStateService struct {
client *Client
@@ -123,7 +123,7 @@ func (s *ClusterStateService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.allowNoIndices != nil {
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
@@ -165,7 +165,11 @@ func (s *ClusterStateService) Do(ctx context.Context) (*ClusterStateResponse, er
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
@@ -235,10 +239,10 @@ type clusterStateRoutingNode struct {
}
type indexTemplateMetaData struct {
- Template string `json:"template"` // e.g. "store-*"
- Order int `json:"order"`
- Settings map[string]interface{} `json:"settings"` // index settings
- Mappings map[string]interface{} `json:"mappings"` // type name -> mapping
+ IndexPatterns []string `json:"index_patterns"` // e.g. ["store-*"]
+ Order int `json:"order"`
+ Settings map[string]interface{} `json:"settings"` // index settings
+ Mappings map[string]interface{} `json:"mappings"` // type name -> mapping
}
type indexMetaData struct {
diff --git a/vendor/gopkg.in/olivere/elastic.v5/cluster_stats.go b/vendor/gopkg.in/olivere/elastic.v5/cluster_stats.go
index 8c5374d0f..4d05c2e97 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/cluster_stats.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/cluster_stats.go
@@ -10,11 +10,11 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// ClusterStatsService is documented at
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-stats.html.
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/cluster-stats.html.
type ClusterStatsService struct {
client *Client
pretty bool
@@ -78,7 +78,7 @@ func (s *ClusterStatsService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.flatSettings != nil {
params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
@@ -108,7 +108,11 @@ func (s *ClusterStatsService) Do(ctx context.Context) (*ClusterStatsResponse, er
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
@@ -175,10 +179,8 @@ type ClusterStatsIndicesDocs struct {
}
type ClusterStatsIndicesStore struct {
- Size string `json:"size"` // e.g. "5.3gb"
- SizeInBytes int64 `json:"size_in_bytes"`
- ThrottleTime string `json:"throttle_time"` // e.g. "0s"
- ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"`
+ Size string `json:"size"` // e.g. "5.3gb"
+ SizeInBytes int64 `json:"size_in_bytes"`
}
type ClusterStatsIndicesFieldData struct {
diff --git a/vendor/gopkg.in/olivere/elastic.v5/count.go b/vendor/gopkg.in/olivere/elastic.v5/count.go
index 89b69ce03..44416fab0 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/count.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/count.go
@@ -10,7 +10,7 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// CountService is a convenient service for determining the
@@ -207,7 +207,7 @@ func (s *CountService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.allowNoIndices != nil {
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
@@ -286,7 +286,12 @@ func (s *CountService) Do(ctx context.Context) (int64, error) {
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return 0, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/count_test.go b/vendor/gopkg.in/olivere/elastic.v5/count_test.go
index dd2b7556f..a0ee52112 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/count_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/count_test.go
@@ -58,17 +58,17 @@ func TestCount(t *testing.T) {
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -88,7 +88,7 @@ func TestCount(t *testing.T) {
}
// Count documents
- count, err = client.Count(testIndexName).Type("tweet").Do(context.TODO())
+ count, err = client.Count(testIndexName).Type("doc").Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -117,7 +117,7 @@ func TestCount(t *testing.T) {
// Count with query and type
query = NewTermQuery("user", "olivere")
- count, err = client.Count(testIndexName).Type("tweet").Query(query).Do(context.TODO())
+ count, err = client.Count(testIndexName).Type("doc").Query(query).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/decoder_test.go b/vendor/gopkg.in/olivere/elastic.v5/decoder_test.go
index 15263fb8d..2c3dde8ca 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/decoder_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/decoder_test.go
@@ -34,7 +34,7 @@ func TestDecoder(t *testing.T) {
// Add a document
indexResult, err := client.Index().
Index(testIndexName).
- Type("tweet").
+ Type("doc").
Id("1").
BodyJson(&tweet).
Do(context.TODO())
diff --git a/vendor/gopkg.in/olivere/elastic.v5/delete.go b/vendor/gopkg.in/olivere/elastic.v5/delete.go
index b1b06c2c3..1e20de11f 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/delete.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/delete.go
@@ -7,17 +7,16 @@ package elastic
import (
"context"
"fmt"
- "net/url"
-
"net/http"
+ "net/url"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// DeleteService allows to delete a typed JSON document from a specified
// index based on its id.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-delete.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-delete.html
// for details.
type DeleteService struct {
client *Client
@@ -126,7 +125,7 @@ func (s *DeleteService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.refresh != "" {
params.Set("refresh", s.refresh)
@@ -186,7 +185,12 @@ func (s *DeleteService) Do(ctx context.Context) (*DeleteResponse, error) {
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil, http.StatusNotFound)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "DELETE",
+ Path: path,
+ Params: params,
+ IgnoreErrors: []int{http.StatusNotFound},
+ })
if err != nil {
return nil, err
}
@@ -209,12 +213,14 @@ func (s *DeleteService) Do(ctx context.Context) (*DeleteResponse, error) {
// DeleteResponse is the outcome of running DeleteService.Do.
type DeleteResponse struct {
- Index string `json:"_index"`
- Type string `json:"_type"`
- Id string `json:"_id"`
- Version int64 `json:"_version"`
- Shards *shardsInfo `json:"_shards"`
+ Index string `json:"_index,omitempty"`
+ Type string `json:"_type,omitempty"`
+ Id string `json:"_id,omitempty"`
+ Version int64 `json:"_version,omitempty"`
Result string `json:"result,omitempty"`
+ Shards *shardsInfo `json:"_shards,omitempty"`
+ SeqNo int64 `json:"_seq_no,omitempty"`
+ PrimaryTerm int64 `json:"_primary_term,omitempty"`
+ Status int `json:"status,omitempty"`
ForcedRefresh bool `json:"forced_refresh,omitempty"`
- Found bool `json:"found"`
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/delete_by_query.go b/vendor/gopkg.in/olivere/elastic.v5/delete_by_query.go
index a890bd84f..694d81c2a 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/delete_by_query.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/delete_by_query.go
@@ -10,11 +10,11 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// DeleteByQueryService deletes documents that match a query.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-delete-by-query.html.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-delete-by-query.html.
type DeleteByQueryService struct {
client *Client
index []string
@@ -598,7 +598,12 @@ func (s *DeleteByQueryService) Do(ctx context.Context) (*BulkIndexByScrollRespon
}
// Get response
- res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/delete_by_query_test.go b/vendor/gopkg.in/olivere/elastic.v5/delete_by_query_test.go
index 9208ebce3..40e45b871 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/delete_by_query_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/delete_by_query_test.go
@@ -87,17 +87,17 @@ func TestDeleteByQuery(t *testing.T) {
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -120,7 +120,7 @@ func TestDeleteByQuery(t *testing.T) {
q := NewTermQuery("user", "sandrae")
res, err := client.DeleteByQuery().
Index(testIndexName).
- Type("tweet").
+ Type("doc").
Query(q).
Pretty(true).
Do(context.TODO())
diff --git a/vendor/gopkg.in/olivere/elastic.v5/delete_template.go b/vendor/gopkg.in/olivere/elastic.v5/delete_template.go
deleted file mode 100644
index 4775baf84..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/delete_template.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
-
- "gopkg.in/olivere/elastic.v5/uritemplates"
-)
-
-// DeleteTemplateService deletes a search template. More information can
-// be found at https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-template.html.
-type DeleteTemplateService struct {
- client *Client
- pretty bool
- id string
- version *int
- versionType string
-}
-
-// NewDeleteTemplateService creates a new DeleteTemplateService.
-func NewDeleteTemplateService(client *Client) *DeleteTemplateService {
- return &DeleteTemplateService{
- client: client,
- }
-}
-
-// Id is the template ID.
-func (s *DeleteTemplateService) Id(id string) *DeleteTemplateService {
- s.id = id
- return s
-}
-
-// Version an explicit version number for concurrency control.
-func (s *DeleteTemplateService) Version(version int) *DeleteTemplateService {
- s.version = &version
- return s
-}
-
-// VersionType specifies a version type.
-func (s *DeleteTemplateService) VersionType(versionType string) *DeleteTemplateService {
- s.versionType = versionType
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *DeleteTemplateService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{
- "id": s.id,
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.version != nil {
- params.Set("version", fmt.Sprintf("%d", *s.version))
- }
- if s.versionType != "" {
- params.Set("version_type", s.versionType)
- }
-
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *DeleteTemplateService) Validate() error {
- var invalid []string
- if s.id == "" {
- invalid = append(invalid, "Id")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *DeleteTemplateService) Do(ctx context.Context) (*AcknowledgedResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil)
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(AcknowledgedResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/delete_test.go b/vendor/gopkg.in/olivere/elastic.v5/delete_test.go
index 1daf5499d..571fcf589 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/delete_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/delete_test.go
@@ -17,17 +17,17 @@ func TestDelete(t *testing.T) {
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -47,12 +47,12 @@ func TestDelete(t *testing.T) {
}
// Delete document 1
- res, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO())
+ res, err := client.Delete().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- if res.Found != true {
- t.Errorf("expected Found = true; got %v", res.Found)
+ if want, have := "deleted", res.Result; want != have {
+ t.Errorf("expected Result = %q; got %q", want, have)
}
_, err = client.Flush().Index(testIndexName).Do(context.TODO())
if err != nil {
@@ -67,7 +67,7 @@ func TestDelete(t *testing.T) {
}
// Delete non existent document 99
- res, err = client.Delete().Index(testIndexName).Type("tweet").Id("99").Refresh("true").Do(context.TODO())
+ res, err = client.Delete().Index(testIndexName).Type("doc").Id("99").Refresh("true").Do(context.TODO())
if err == nil {
t.Fatal("expected error")
}
@@ -80,20 +80,17 @@ func TestDelete(t *testing.T) {
if res == nil {
t.Fatal("expected response")
}
- if res.Found {
- t.Errorf("expected Found = false; got %v", res.Found)
- }
if have, want := res.Id, "99"; have != want {
t.Errorf("expected _id = %q, got %q", have, want)
}
if have, want := res.Index, testIndexName; have != want {
t.Errorf("expected _index = %q, got %q", have, want)
}
- if have, want := res.Type, "tweet"; have != want {
+ if have, want := res.Type, "doc"; have != want {
t.Errorf("expected _type = %q, got %q", have, want)
}
if have, want := res.Result, "not_found"; have != want {
- t.Errorf("expected result = %q, got %q", have, want)
+ t.Errorf("expected Result = %q, got %q", have, want)
}
count, err = client.Count(testIndexName).Do(context.TODO())
@@ -109,7 +106,7 @@ func TestDeleteValidate(t *testing.T) {
client := setupTestClientAndCreateIndexAndAddDocs(t)
// No index name -> fail with error
- res, err := NewDeleteService(client).Type("tweet").Id("1").Do(context.TODO())
+ res, err := NewDeleteService(client).Type("doc").Id("1").Do(context.TODO())
if err == nil {
t.Fatalf("expected Delete to fail without index name")
}
@@ -127,7 +124,7 @@ func TestDeleteValidate(t *testing.T) {
}
// No id -> fail with error
- res, err = NewDeleteService(client).Index(testIndexName).Type("tweet").Do(context.TODO())
+ res, err = NewDeleteService(client).Index(testIndexName).Type("doc").Do(context.TODO())
if err == nil {
t.Fatalf("expected Delete to fail without id")
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/docker-compose.yml b/vendor/gopkg.in/olivere/elastic.v5/docker-compose.yml
deleted file mode 100644
index 002eac7fa..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/docker-compose.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-version: '3'
-
-services:
- elasticsearch:
- image: docker.elastic.co/elasticsearch/elasticsearch:5.6.3
- # container_name: elasticsearch
- hostname: elasticsearch
- environment:
- - bootstrap.memory_lock=true
- - xpack.security.enabled=false
- - "ES_JAVA_OPTS=-Xms1g -Xmx1g"
- ulimits:
- nproc: 65536
- nofile:
- soft: 65536
- hard: 65536
- memlock:
- soft: -1
- hard: -1
- volumes:
- - ./etc:/usr/share/elasticsearch/config
- ports:
- - 9200:9200
diff --git a/vendor/gopkg.in/olivere/elastic.v5/example_test.go b/vendor/gopkg.in/olivere/elastic.v5/example_test.go
index f8441d7bf..62dc15d89 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/example_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/example_test.go
@@ -13,7 +13,7 @@ import (
"reflect"
"time"
- elastic "gopkg.in/olivere/elastic.v5"
+ elastic "github.com/olivere/elastic"
)
type Tweet struct {
@@ -71,12 +71,7 @@ func Example() {
"number_of_replicas":0
},
"mappings":{
- "_default_": {
- "_all": {
- "enabled": true
- }
- },
- "tweet":{
+ "doc":{
"properties":{
"user":{
"type":"keyword"
@@ -117,7 +112,7 @@ func Example() {
tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0}
put1, err := client.Index().
Index("twitter").
- Type("tweet").
+ Type("doc").
Id("1").
BodyJson(tweet1).
Do(context.Background())
@@ -131,7 +126,7 @@ func Example() {
tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}`
put2, err := client.Index().
Index("twitter").
- Type("tweet").
+ Type("doc").
Id("2").
BodyString(tweet2).
Do(context.Background())
@@ -144,7 +139,7 @@ func Example() {
// Get tweet with specified ID
get1, err := client.Get().
Index("twitter").
- Type("tweet").
+ Type("doc").
Id("1").
Do(context.Background())
if err != nil {
@@ -217,7 +212,7 @@ func Example() {
// Update a tweet by the update API of Elasticsearch.
// We just increment the number of retweets.
script := elastic.NewScript("ctx._source.retweets += params.num").Param("num", 1)
- update, err := client.Update().Index("twitter").Type("tweet").Id("1").
+ update, err := client.Update().Index("twitter").Type("doc").Id("1").
Script(script).
Upsert(map[string]interface{}{"retweets": 0}).
Do(context.Background())
@@ -485,58 +480,6 @@ func ExampleSearchResult() {
}
}
-func ExamplePutTemplateService() {
- client, err := elastic.NewClient()
- if err != nil {
- panic(err)
- }
-
- // Create search template
- tmpl := `{"template":{"query":{"match":{"title":"{{query_string}}"}}}}`
-
- // Create template
- resp, err := client.PutTemplate().
- Id("my-search-template"). // Name of the template
- BodyString(tmpl). // Search template itself
- Do(context.Background()) // Execute
- if err != nil {
- panic(err)
- }
- if resp.Acknowledged {
- fmt.Println("search template creation acknowledged")
- }
-}
-
-func ExampleGetTemplateService() {
- client, err := elastic.NewClient()
- if err != nil {
- panic(err)
- }
-
- // Get template stored under "my-search-template"
- resp, err := client.GetTemplate().Id("my-search-template").Do(context.Background())
- if err != nil {
- panic(err)
- }
- fmt.Printf("search template is: %q\n", resp.Template)
-}
-
-func ExampleDeleteTemplateService() {
- client, err := elastic.NewClient()
- if err != nil {
- panic(err)
- }
-
- // Delete template
- resp, err := client.DeleteTemplate().Id("my-search-template").Do(context.Background())
- if err != nil {
- panic(err)
- }
- if resp != nil && resp.Acknowledged {
- fmt.Println("template deleted")
- }
-}
-
func ExampleClusterHealthService() {
client, err := elastic.NewClient()
if err != nil {
diff --git a/vendor/gopkg.in/olivere/elastic.v5/exists.go b/vendor/gopkg.in/olivere/elastic.v5/exists.go
index 7cae2bde8..ae5a88fa7 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/exists.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/exists.go
@@ -10,12 +10,12 @@ import (
"net/http"
"net/url"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// ExistsService checks for the existence of a document using HEAD.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-get.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-get.html
// for details.
type ExistsService struct {
client *Client
@@ -107,7 +107,7 @@ func (s *ExistsService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.realtime != nil {
params.Set("realtime", fmt.Sprintf("%v", *s.realtime))
@@ -159,7 +159,12 @@ func (s *ExistsService) Do(ctx context.Context) (bool, error) {
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "HEAD",
+ Path: path,
+ Params: params,
+ IgnoreErrors: []int{404},
+ })
if err != nil {
return false, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/exists_test.go b/vendor/gopkg.in/olivere/elastic.v5/exists_test.go
index af6a04e80..9b834223d 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/exists_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/exists_test.go
@@ -12,7 +12,7 @@ import (
func TestExists(t *testing.T) {
client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
- exists, err := client.Exists().Index(testIndexName).Type("comment").Id("1").Parent("tweet").Do(context.TODO())
+ exists, err := client.Exists().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -25,7 +25,7 @@ func TestExistsValidate(t *testing.T) {
client := setupTestClient(t)
// No index -> fail with error
- res, err := NewExistsService(client).Type("tweet").Id("1").Do(context.TODO())
+ res, err := NewExistsService(client).Type("doc").Id("1").Do(context.TODO())
if err == nil {
t.Fatalf("expected Delete to fail without index name")
}
@@ -43,7 +43,7 @@ func TestExistsValidate(t *testing.T) {
}
// No id -> fail with error
- res, err = NewExistsService(client).Index(testIndexName).Type("tweet").Do(context.TODO())
+ res, err = NewExistsService(client).Index(testIndexName).Type("doc").Do(context.TODO())
if err == nil {
t.Fatalf("expected Delete to fail without index name")
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/explain.go b/vendor/gopkg.in/olivere/elastic.v5/explain.go
index bdfa00f1d..2b975ad5d 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/explain.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/explain.go
@@ -10,12 +10,12 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// ExplainService computes a score explanation for a query and
// a specific document.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-explain.html.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-explain.html.
type ExplainService struct {
client *Client
pretty bool
@@ -208,7 +208,7 @@ func (s *ExplainService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if len(s.xSource) > 0 {
params.Set("_source", strings.Join(s.xSource, ","))
@@ -298,7 +298,12 @@ func (s *ExplainService) Do(ctx context.Context) (*ExplainResponse, error) {
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/explain_test.go b/vendor/gopkg.in/olivere/elastic.v5/explain_test.go
index e71bf6675..22cb9668a 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/explain_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/explain_test.go
@@ -17,7 +17,7 @@ func TestExplain(t *testing.T) {
// Add a document
indexResult, err := client.Index().
Index(testIndexName).
- Type("tweet").
+ Type("doc").
Id("1").
BodyJson(&tweet1).
Refresh("true").
@@ -31,7 +31,7 @@ func TestExplain(t *testing.T) {
// Explain
query := NewTermQuery("user", "olivere")
- expl, err := client.Explain(testIndexName, "tweet", "1").Query(query).Do(context.TODO())
+ expl, err := client.Explain(testIndexName, "doc", "1").Query(query).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/fetch_source_context.go b/vendor/gopkg.in/olivere/elastic.v5/fetch_source_context.go
index 53e7fcd90..874c4c1da 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/fetch_source_context.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/fetch_source_context.go
@@ -14,7 +14,7 @@ import (
// with various endpoints, e.g. when searching for documents, retrieving
// individual documents, or even updating documents.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.5/search-request-source-filtering.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-source-filtering.html
// for details.
type FetchSourceContext struct {
fetchSource bool
diff --git a/vendor/gopkg.in/olivere/elastic.v5/field_caps.go b/vendor/gopkg.in/olivere/elastic.v5/field_caps.go
new file mode 100644
index 000000000..393cd3ce8
--- /dev/null
+++ b/vendor/gopkg.in/olivere/elastic.v5/field_caps.go
@@ -0,0 +1,202 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// FieldCapsService allows retrieving the capabilities of fields among multiple indices.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.1/search-field-caps.html
+// for details
+type FieldCapsService struct {
+ client *Client
+ pretty bool
+ index []string
+ allowNoIndices *bool
+ expandWildcards string
+ fields []string
+ ignoreUnavailable *bool
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewFieldCapsService creates a new FieldCapsService
+func NewFieldCapsService(client *Client) *FieldCapsService {
+ return &FieldCapsService{
+ client: client,
+ }
+}
+
+// Index is a list of index names; use `_all` or empty string to perform
+// the operation on all indices.
+func (s *FieldCapsService) Index(index ...string) *FieldCapsService {
+ s.index = append(s.index, index...)
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices expression
+// resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *FieldCapsService) AllowNoIndices(allowNoIndices bool) *FieldCapsService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *FieldCapsService) ExpandWildcards(expandWildcards string) *FieldCapsService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// Fields is a list of fields for to get field capabilities.
+func (s *FieldCapsService) Fields(fields ...string) *FieldCapsService {
+ s.fields = append(s.fields, fields...)
+ return s
+}
+
+// IgnoreUnavailable is documented as: Whether specified concrete indices should be ignored when unavailable (missing or closed).
+func (s *FieldCapsService) IgnoreUnavailable(ignoreUnavailable bool) *FieldCapsService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *FieldCapsService) Pretty(pretty bool) *FieldCapsService {
+ s.pretty = pretty
+ return s
+}
+
+// BodyJson is documented as: Field json objects containing the name and optionally a range to filter out indices result, that have results outside the defined bounds.
+func (s *FieldCapsService) BodyJson(body interface{}) *FieldCapsService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString is documented as: Field json objects containing the name and optionally a range to filter out indices result, that have results outside the defined bounds.
+func (s *FieldCapsService) BodyString(body string) *FieldCapsService {
+ s.bodyString = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *FieldCapsService) buildURL() (string, url.Values, error) {
+ // Build URL
+ var err error
+ var path string
+ if len(s.index) > 0 {
+ path, err = uritemplates.Expand("/{index}/_field_caps", map[string]string{
+ "index": strings.Join(s.index, ","),
+ })
+ } else {
+ path = "/_field_caps"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if len(s.fields) > 0 {
+ params.Set("fields", strings.Join(s.fields, ","))
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *FieldCapsService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *FieldCapsService) Do(ctx context.Context) (*FieldCapsResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else {
+ body = s.bodyString
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ IgnoreErrors: []int{http.StatusNotFound},
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO(oe): Is 404 really a valid response here?
+ if res.StatusCode == http.StatusNotFound {
+ return &FieldCapsResponse{}, nil
+ }
+
+ // Return operation response
+ ret := new(FieldCapsResponse)
+ if err := s.client.decoder.Decode(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Request --
+
+// FieldCapsRequest can be used to set up the body to be used in the
+// Field Capabilities API.
+type FieldCapsRequest struct {
+ Fields []string `json:"fields"`
+}
+
+// -- Response --
+
+// FieldCapsResponse contains field capabilities.
+type FieldCapsResponse struct {
+ Fields map[string]FieldCaps `json:"fields,omitempty"`
+}
+
+// FieldCaps contains capabilities of an individual field.
+type FieldCaps struct {
+ Type string `json:"type"`
+ Searchable bool `json:"searchable"`
+ Aggregatable bool `json:"aggregatable"`
+ Indices []string `json:"indices,omitempty"`
+ NonSearchableIndices []string `json:"non_searchable_indices,omitempty"`
+ NonAggregatableIndices []string `json:"non_aggregatable_indices,omitempty"`
+}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/field_caps_test.go b/vendor/gopkg.in/olivere/elastic.v5/field_caps_test.go
new file mode 100644
index 000000000..e299fd516
--- /dev/null
+++ b/vendor/gopkg.in/olivere/elastic.v5/field_caps_test.go
@@ -0,0 +1,146 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "net/url"
+ "reflect"
+ "sort"
+ "testing"
+)
+
+func TestFieldCapsURLs(t *testing.T) {
+ tests := []struct {
+ Service *FieldCapsService
+ ExpectedPath string
+ ExpectedParams url.Values
+ }{
+ {
+ Service: &FieldCapsService{},
+ ExpectedPath: "/_field_caps",
+ ExpectedParams: url.Values{},
+ },
+ {
+ Service: &FieldCapsService{
+ index: []string{"index1", "index2"},
+ },
+ ExpectedPath: "/index1%2Cindex2/_field_caps",
+ ExpectedParams: url.Values{},
+ },
+ {
+ Service: &FieldCapsService{
+ index: []string{"index_*"},
+ pretty: true,
+ },
+ ExpectedPath: "/index_%2A/_field_caps",
+ ExpectedParams: url.Values{"pretty": []string{"true"}},
+ },
+ }
+
+ for _, test := range tests {
+ gotPath, gotParams, err := test.Service.buildURL()
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if gotPath != test.ExpectedPath {
+ t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath)
+ }
+ if gotParams.Encode() != test.ExpectedParams.Encode() {
+ t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams)
+ }
+ }
+}
+
+func TestFieldCapsRequestSerialize(t *testing.T) {
+ req := &FieldCapsRequest{
+ Fields: []string{"creation_date", "answer_count"},
+ }
+ data, err := json.Marshal(req)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"fields":["creation_date","answer_count"]}`
+ if got != expected {
+ t.Fatalf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFieldCapsRequestDeserialize(t *testing.T) {
+ body := `{
+ "fields" : ["creation_date", "answer_count"]
+ }`
+
+ var request FieldCapsRequest
+ if err := json.Unmarshal([]byte(body), &request); err != nil {
+ t.Fatalf("unexpected error during unmarshalling: %v", err)
+ }
+
+ sort.Sort(lexicographically{request.Fields})
+
+ expectedFields := []string{"answer_count", "creation_date"}
+ if !reflect.DeepEqual(request.Fields, expectedFields) {
+ t.Fatalf("expected fields to be %v, got %v", expectedFields, request.Fields)
+ }
+}
+
+func TestFieldCapsResponseUnmarshalling(t *testing.T) {
+ clusterStats := `{
+ "_shards": {
+ "total": 1,
+ "successful": 1,
+ "failed": 0
+ },
+ "fields": {
+ "creation_date": {
+ "type": "date",
+ "searchable": true,
+ "aggregatable": true,
+ "indices": ["index1", "index2"],
+ "non_searchable_indices": null,
+ "non_aggregatable_indices": null
+ },
+ "answer": {
+ "type": "keyword",
+ "searchable": true,
+ "aggregatable": true
+ }
+ }
+ }`
+
+ var resp FieldCapsResponse
+ if err := json.Unmarshal([]byte(clusterStats), &resp); err != nil {
+ t.Errorf("unexpected error during unmarshalling: %v", err)
+ }
+
+ caps, ok := resp.Fields["creation_date"]
+ if !ok {
+ t.Errorf("expected creation_date to be in the fields map, didn't find it")
+ }
+ if want, have := true, caps.Searchable; want != have {
+ t.Errorf("expected creation_date searchable to be %v, got %v", want, have)
+ }
+ if want, have := true, caps.Aggregatable; want != have {
+ t.Errorf("expected creation_date aggregatable to be %v, got %v", want, have)
+ }
+ if want, have := []string{"index1", "index2"}, caps.Indices; !reflect.DeepEqual(want, have) {
+ t.Errorf("expected creation_date indices to be %v, got %v", want, have)
+ }
+}
+
+func TestFieldCaps123(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+ // client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ res, err := client.FieldCaps("_all").Fields("user", "message", "retweets", "created").Pretty(true).Do(context.TODO())
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if res == nil {
+ t.Fatalf("expected response; got: %v", res)
+ }
+}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/field_stats.go b/vendor/gopkg.in/olivere/elastic.v5/field_stats.go
deleted file mode 100644
index c2104c5cb..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/field_stats.go
+++ /dev/null
@@ -1,259 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/http"
- "net/url"
- "strings"
-
- "gopkg.in/olivere/elastic.v5/uritemplates"
-)
-
-const (
- FieldStatsClusterLevel = "cluster"
- FieldStatsIndicesLevel = "indices"
-)
-
-// FieldStatsService allows finding statistical properties of a field without executing a search,
-// but looking up measurements that are natively available in the Lucene index.
-//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-field-stats.html
-// for details
-type FieldStatsService struct {
- client *Client
- pretty bool
- level string
- index []string
- allowNoIndices *bool
- expandWildcards string
- fields []string
- ignoreUnavailable *bool
- bodyJson interface{}
- bodyString string
-}
-
-// NewFieldStatsService creates a new FieldStatsService
-func NewFieldStatsService(client *Client) *FieldStatsService {
- return &FieldStatsService{
- client: client,
- index: make([]string, 0),
- fields: make([]string, 0),
- }
-}
-
-// Index is a list of index names; use `_all` or empty string to perform
-// the operation on all indices.
-func (s *FieldStatsService) Index(index ...string) *FieldStatsService {
- s.index = append(s.index, index...)
- return s
-}
-
-// AllowNoIndices indicates whether to ignore if a wildcard indices expression
-// resolves into no concrete indices.
-// (This includes `_all` string or when no indices have been specified).
-func (s *FieldStatsService) AllowNoIndices(allowNoIndices bool) *FieldStatsService {
- s.allowNoIndices = &allowNoIndices
- return s
-}
-
-// ExpandWildcards indicates whether to expand wildcard expression to
-// concrete indices that are open, closed or both.
-func (s *FieldStatsService) ExpandWildcards(expandWildcards string) *FieldStatsService {
- s.expandWildcards = expandWildcards
- return s
-}
-
-// Fields is a list of fields for to get field statistics
-// for (min value, max value, and more).
-func (s *FieldStatsService) Fields(fields ...string) *FieldStatsService {
- s.fields = append(s.fields, fields...)
- return s
-}
-
-// IgnoreUnavailable is documented as: Whether specified concrete indices should be ignored when unavailable (missing or closed).
-func (s *FieldStatsService) IgnoreUnavailable(ignoreUnavailable bool) *FieldStatsService {
- s.ignoreUnavailable = &ignoreUnavailable
- return s
-}
-
-// Level sets if stats should be returned on a per index level or on a cluster wide level;
-// should be one of 'cluster' or 'indices'; defaults to former
-func (s *FieldStatsService) Level(level string) *FieldStatsService {
- s.level = level
- return s
-}
-
-// ClusterLevel is a helper that sets Level to "cluster".
-func (s *FieldStatsService) ClusterLevel() *FieldStatsService {
- s.level = FieldStatsClusterLevel
- return s
-}
-
-// IndicesLevel is a helper that sets Level to "indices".
-func (s *FieldStatsService) IndicesLevel() *FieldStatsService {
- s.level = FieldStatsIndicesLevel
- return s
-}
-
-// Pretty indicates that the JSON response be indented and human readable.
-func (s *FieldStatsService) Pretty(pretty bool) *FieldStatsService {
- s.pretty = pretty
- return s
-}
-
-// BodyJson is documented as: Field json objects containing the name and optionally a range to filter out indices result, that have results outside the defined bounds.
-func (s *FieldStatsService) BodyJson(body interface{}) *FieldStatsService {
- s.bodyJson = body
- return s
-}
-
-// BodyString is documented as: Field json objects containing the name and optionally a range to filter out indices result, that have results outside the defined bounds.
-func (s *FieldStatsService) BodyString(body string) *FieldStatsService {
- s.bodyString = body
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *FieldStatsService) buildURL() (string, url.Values, error) {
- // Build URL
- var err error
- var path string
- if len(s.index) > 0 {
- path, err = uritemplates.Expand("/{index}/_field_stats", map[string]string{
- "index": strings.Join(s.index, ","),
- })
- } else {
- path = "/_field_stats"
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.allowNoIndices != nil {
- params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
- }
- if s.expandWildcards != "" {
- params.Set("expand_wildcards", s.expandWildcards)
- }
- if len(s.fields) > 0 {
- params.Set("fields", strings.Join(s.fields, ","))
- }
- if s.ignoreUnavailable != nil {
- params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
- }
- if s.level != "" {
- params.Set("level", s.level)
- }
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *FieldStatsService) Validate() error {
- var invalid []string
- if s.level != "" && (s.level != FieldStatsIndicesLevel && s.level != FieldStatsClusterLevel) {
- invalid = append(invalid, "Level")
- }
- if len(invalid) != 0 {
- return fmt.Errorf("missing or invalid required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *FieldStatsService) Do(ctx context.Context) (*FieldStatsResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Setup HTTP request body
- var body interface{}
- if s.bodyJson != nil {
- body = s.bodyJson
- } else {
- body = s.bodyString
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, "POST", path, params, body, http.StatusNotFound)
- if err != nil {
- return nil, err
- }
-
- // TODO(oe): Is 404 really a valid response here?
- if res.StatusCode == http.StatusNotFound {
- return &FieldStatsResponse{make(map[string]IndexFieldStats)}, nil
- }
-
- // Return operation response
- ret := new(FieldStatsResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// -- Request --
-
-// FieldStatsRequest can be used to set up the body to be used in the
-// Field Stats API.
-type FieldStatsRequest struct {
- Fields []string `json:"fields"`
- IndexConstraints map[string]*FieldStatsConstraints `json:"index_constraints,omitempty"`
-}
-
-// FieldStatsConstraints is a constraint on a field.
-type FieldStatsConstraints struct {
- Min *FieldStatsComparison `json:"min_value,omitempty"`
- Max *FieldStatsComparison `json:"max_value,omitempty"`
-}
-
-// FieldStatsComparison contain all comparison operations that can be used
-// in FieldStatsConstraints.
-type FieldStatsComparison struct {
- Lte interface{} `json:"lte,omitempty"`
- Lt interface{} `json:"lt,omitempty"`
- Gte interface{} `json:"gte,omitempty"`
- Gt interface{} `json:"gt,omitempty"`
-}
-
-// -- Response --
-
-// FieldStatsResponse is the response body content
-type FieldStatsResponse struct {
- Indices map[string]IndexFieldStats `json:"indices,omitempty"`
-}
-
-// IndexFieldStats contains field stats for an index
-type IndexFieldStats struct {
- Fields map[string]FieldStats `json:"fields,omitempty"`
-}
-
-// FieldStats contains stats of an individual field
-type FieldStats struct {
- Type string `json:"type"`
- MaxDoc int64 `json:"max_doc"`
- DocCount int64 `json:"doc_count"`
- Density int64 `json:"density"`
- SumDocFrequeny int64 `json:"sum_doc_freq"`
- SumTotalTermFrequency int64 `json:"sum_total_term_freq"`
- Searchable bool `json:"searchable"`
- Aggregatable bool `json:"aggregatable"`
- MinValue interface{} `json:"min_value"`
- MinValueAsString string `json:"min_value_as_string"`
- MaxValue interface{} `json:"max_value"`
- MaxValueAsString string `json:"max_value_as_string"`
-}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/field_stats_test.go b/vendor/gopkg.in/olivere/elastic.v5/field_stats_test.go
deleted file mode 100644
index 48e973840..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/field_stats_test.go
+++ /dev/null
@@ -1,282 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "encoding/json"
- "net/url"
- "reflect"
- "sort"
- "testing"
-)
-
-func TestFieldStatsURLs(t *testing.T) {
- tests := []struct {
- Service *FieldStatsService
- ExpectedPath string
- ExpectedParams url.Values
- }{
- {
- Service: &FieldStatsService{},
- ExpectedPath: "/_field_stats",
- ExpectedParams: url.Values{},
- },
- {
- Service: &FieldStatsService{
- level: FieldStatsClusterLevel,
- },
- ExpectedPath: "/_field_stats",
- ExpectedParams: url.Values{"level": []string{FieldStatsClusterLevel}},
- },
- {
- Service: &FieldStatsService{
- level: FieldStatsIndicesLevel,
- },
- ExpectedPath: "/_field_stats",
- ExpectedParams: url.Values{"level": []string{FieldStatsIndicesLevel}},
- },
- {
- Service: &FieldStatsService{
- level: FieldStatsClusterLevel,
- index: []string{"index1"},
- },
- ExpectedPath: "/index1/_field_stats",
- ExpectedParams: url.Values{"level": []string{FieldStatsClusterLevel}},
- },
- {
- Service: &FieldStatsService{
- level: FieldStatsIndicesLevel,
- index: []string{"index1", "index2"},
- },
- ExpectedPath: "/index1%2Cindex2/_field_stats",
- ExpectedParams: url.Values{"level": []string{FieldStatsIndicesLevel}},
- },
- {
- Service: &FieldStatsService{
- level: FieldStatsIndicesLevel,
- index: []string{"index_*"},
- },
- ExpectedPath: "/index_%2A/_field_stats",
- ExpectedParams: url.Values{"level": []string{FieldStatsIndicesLevel}},
- },
- }
-
- for _, test := range tests {
- gotPath, gotParams, err := test.Service.buildURL()
- if err != nil {
- t.Fatalf("expected no error; got: %v", err)
- }
- if gotPath != test.ExpectedPath {
- t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath)
- }
- if gotParams.Encode() != test.ExpectedParams.Encode() {
- t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams)
- }
- }
-}
-
-func TestFieldStatsValidate(t *testing.T) {
- tests := []struct {
- Service *FieldStatsService
- Valid bool
- }{
- {
- Service: &FieldStatsService{},
- Valid: true,
- },
- {
- Service: &FieldStatsService{
- fields: []string{"field"},
- },
- Valid: true,
- },
- {
- Service: &FieldStatsService{
- bodyJson: &FieldStatsRequest{
- Fields: []string{"field"},
- },
- },
- Valid: true,
- },
- {
- Service: &FieldStatsService{
- level: FieldStatsClusterLevel,
- bodyJson: &FieldStatsRequest{
- Fields: []string{"field"},
- },
- },
- Valid: true,
- },
- {
- Service: &FieldStatsService{
- level: FieldStatsIndicesLevel,
- bodyJson: &FieldStatsRequest{
- Fields: []string{"field"},
- },
- },
- Valid: true,
- },
- {
- Service: &FieldStatsService{
- level: "random",
- },
- Valid: false,
- },
- }
-
- for _, test := range tests {
- err := test.Service.Validate()
- isValid := err == nil
- if isValid != test.Valid {
- t.Errorf("expected validity to be %v, got %v", test.Valid, isValid)
- }
- }
-}
-
-func TestFieldStatsRequestSerialize(t *testing.T) {
- req := &FieldStatsRequest{
- Fields: []string{"creation_date", "answer_count"},
- IndexConstraints: map[string]*FieldStatsConstraints{
- "creation_date": &FieldStatsConstraints{
- Min: &FieldStatsComparison{Gte: "2014-01-01T00:00:00.000Z"},
- Max: &FieldStatsComparison{Lt: "2015-01-01T10:00:00.000Z"},
- },
- },
- }
- data, err := json.Marshal(req)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"fields":["creation_date","answer_count"],"index_constraints":{"creation_date":{"min_value":{"gte":"2014-01-01T00:00:00.000Z"},"max_value":{"lt":"2015-01-01T10:00:00.000Z"}}}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
-
-func TestFieldStatsRequestDeserialize(t *testing.T) {
- body := `{
- "fields" : ["creation_date", "answer_count"],
- "index_constraints" : {
- "creation_date" : {
- "min_value" : {
- "gte" : "2014-01-01T00:00:00.000Z"
- },
- "max_value" : {
- "lt" : "2015-01-01T10:00:00.000Z"
- }
- }
- }
- }`
-
- var request FieldStatsRequest
- if err := json.Unmarshal([]byte(body), &request); err != nil {
- t.Errorf("unexpected error during unmarshalling: %v", err)
- }
-
- sort.Sort(lexicographically{request.Fields})
-
- expectedFields := []string{"answer_count", "creation_date"}
- if !reflect.DeepEqual(request.Fields, expectedFields) {
- t.Errorf("expected fields to be %v, got %v", expectedFields, request.Fields)
- }
-
- constraints, ok := request.IndexConstraints["creation_date"]
- if !ok {
- t.Errorf("expected field creation_date, didn't find it!")
- }
- if constraints.Min.Lt != nil {
- t.Errorf("expected min value less than constraint to be empty, got %v", constraints.Min.Lt)
- }
- if constraints.Min.Gte != "2014-01-01T00:00:00.000Z" {
- t.Errorf("expected min value >= %v, found %v", "2014-01-01T00:00:00.000Z", constraints.Min.Gte)
- }
- if constraints.Max.Lt != "2015-01-01T10:00:00.000Z" {
- t.Errorf("expected max value < %v, found %v", "2015-01-01T10:00:00.000Z", constraints.Max.Lt)
- }
-}
-
-func TestFieldStatsResponseUnmarshalling(t *testing.T) {
- clusterStats := `{
- "_shards": {
- "total": 1,
- "successful": 1,
- "failed": 0
- },
- "indices": {
- "_all": {
- "fields": {
- "creation_date": {
- "type": "date",
- "max_doc": 1326564,
- "doc_count": 564633,
- "density": 42,
- "sum_doc_freq": 2258532,
- "sum_total_term_freq": -1,
- "searchable": true,
- "aggregatable": true,
- "min_value":1483016404000,
- "min_value_as_string": "2016-12-29T13:00:04.000Z",
- "max_value":1484152326000,
- "max_value_as_string": "2017-01-11T16:32:06.000Z"
- },
- "answer_count": {
- "max_doc": 1326564,
- "doc_count": 139885,
- "density": 10,
- "sum_doc_freq": 559540,
- "sum_total_term_freq": -1,
- "searchable": true,
- "aggregatable": true,
- "min_value":1483016404000,
- "min_value_as_string": "2016-12-29T13:00:04.000Z",
- "max_value":1484152326000,
- "max_value_as_string": "2017-01-11T16:32:06.000Z"
- }
- }
- }
- }
- }`
-
- var response FieldStatsResponse
- if err := json.Unmarshal([]byte(clusterStats), &response); err != nil {
- t.Errorf("unexpected error during unmarshalling: %v", err)
- }
-
- stats, ok := response.Indices["_all"]
- if !ok {
- t.Errorf("expected _all to be in the indices map, didn't find it")
- }
-
- fieldStats, ok := stats.Fields["creation_date"]
- if !ok {
- t.Errorf("expected creation_date to be in the fields map, didn't find it")
- }
- if want, have := true, fieldStats.Searchable; want != have {
- t.Errorf("expected creation_date searchable to be %v, got %v", want, have)
- }
- if want, have := true, fieldStats.Aggregatable; want != have {
- t.Errorf("expected creation_date aggregatable to be %v, got %v", want, have)
- }
- if want, have := "2016-12-29T13:00:04.000Z", fieldStats.MinValueAsString; want != have {
- t.Errorf("expected creation_date min value string to be %q, got %q", want, have)
- }
-}
-
-type lexicographically struct {
- strings []string
-}
-
-func (l lexicographically) Len() int {
- return len(l.strings)
-}
-
-func (l lexicographically) Less(i, j int) bool {
- return l.strings[i] < l.strings[j]
-}
-
-func (l lexicographically) Swap(i, j int) {
- l.strings[i], l.strings[j] = l.strings[j], l.strings[i]
-}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/get.go b/vendor/gopkg.in/olivere/elastic.v5/get.go
index ef569e174..efcc748bb 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/get.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/get.go
@@ -11,13 +11,13 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// GetService allows to get a typed JSON document from the index based
// on its id.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-get.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-get.html
// for details.
type GetService struct {
client *Client
@@ -172,7 +172,7 @@ func (s *GetService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.routing != "" {
params.Set("routing", s.routing)
@@ -223,7 +223,11 @@ func (s *GetService) Do(ctx context.Context) (*GetResult, error) {
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/get_template.go b/vendor/gopkg.in/olivere/elastic.v5/get_template.go
deleted file mode 100644
index 93279a3a7..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/get_template.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
-
- "gopkg.in/olivere/elastic.v5/uritemplates"
-)
-
-// GetTemplateService reads a search template.
-// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-template.html.
-type GetTemplateService struct {
- client *Client
- pretty bool
- id string
- version interface{}
- versionType string
-}
-
-// NewGetTemplateService creates a new GetTemplateService.
-func NewGetTemplateService(client *Client) *GetTemplateService {
- return &GetTemplateService{
- client: client,
- }
-}
-
-// Id is the template ID.
-func (s *GetTemplateService) Id(id string) *GetTemplateService {
- s.id = id
- return s
-}
-
-// Version is an explicit version number for concurrency control.
-func (s *GetTemplateService) Version(version interface{}) *GetTemplateService {
- s.version = version
- return s
-}
-
-// VersionType is a specific version type.
-func (s *GetTemplateService) VersionType(versionType string) *GetTemplateService {
- s.versionType = versionType
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *GetTemplateService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{
- "id": s.id,
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.version != nil {
- params.Set("version", fmt.Sprintf("%v", s.version))
- }
- if s.versionType != "" {
- params.Set("version_type", s.versionType)
- }
-
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *GetTemplateService) Validate() error {
- var invalid []string
- if s.id == "" {
- invalid = append(invalid, "Id")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation and returns the template.
-func (s *GetTemplateService) Do(ctx context.Context) (*GetTemplateResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
- if err != nil {
- return nil, err
- }
-
- // Return result
- ret := new(GetTemplateResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type GetTemplateResponse struct {
- Template string `json:"template"`
-}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/get_template_test.go b/vendor/gopkg.in/olivere/elastic.v5/get_template_test.go
deleted file mode 100644
index 16d063fcc..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/get_template_test.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestGetPutDeleteTemplate(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- // This is a search template, not an index template!
- tmpl := `{
- "template": {
- "query" : { "term" : { "{{my_field}}" : "{{my_value}}" } },
- "size" : "{{my_size}}"
- },
- "params":{
- "my_field" : "user",
- "my_value" : "olivere",
- "my_size" : 5
- }
-}`
- putres, err := client.PutTemplate().Id("elastic-template").BodyString(tmpl).Do(context.TODO())
- if err != nil {
- t.Fatalf("expected no error; got: %v", err)
- }
- if putres == nil {
- t.Fatalf("expected response; got: %v", putres)
- }
- if !putres.Acknowledged {
- t.Fatalf("expected template creation to be acknowledged; got: %v", putres.Acknowledged)
- }
-
- // Always delete template
- defer client.DeleteTemplate().Id("elastic-template").Do(context.TODO())
-
- // Get template
- getres, err := client.GetTemplate().Id("elastic-template").Do(context.TODO())
- if err != nil {
- t.Fatalf("expected no error; got: %v", err)
- }
- if getres == nil {
- t.Fatalf("expected response; got: %v", getres)
- }
- if getres.Template == "" {
- t.Errorf("expected template %q; got: %q", tmpl, getres.Template)
- }
-}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/get_test.go b/vendor/gopkg.in/olivere/elastic.v5/get_test.go
index 8ad5a43d8..f9504bdbf 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/get_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/get_test.go
@@ -14,13 +14,13 @@ func TestGet(t *testing.T) {
client := setupTestClientAndCreateIndex(t)
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
// Get document 1
- res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO())
+ res, err := client.Get().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -32,7 +32,7 @@ func TestGet(t *testing.T) {
}
// Get non existent document 99
- res, err = client.Get().Index(testIndexName).Type("tweet").Id("99").Do(context.TODO())
+ res, err = client.Get().Index(testIndexName).Type("doc").Id("99").Do(context.TODO())
if err == nil {
t.Fatalf("expected error; got: %v", err)
}
@@ -48,13 +48,13 @@ func TestGetWithSourceFiltering(t *testing.T) {
client := setupTestClientAndCreateIndex(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
// Get document 1, without source
- res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSource(false).Do(context.TODO())
+ res, err := client.Get().Index(testIndexName).Type("doc").Id("1").FetchSource(false).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -67,7 +67,7 @@ func TestGetWithSourceFiltering(t *testing.T) {
// Get document 1, exclude Message field
fsc := NewFetchSourceContext(true).Exclude("message")
- res, err = client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSourceContext(fsc).Do(context.TODO())
+ res, err = client.Get().Index(testIndexName).Type("doc").Id("1").FetchSourceContext(fsc).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -94,13 +94,13 @@ func TestGetWithFields(t *testing.T) {
client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
// Get document 1, specifying fields
- res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").StoredFields("message").Do(context.TODO())
+ res, err := client.Get().Index(testIndexName).Type("doc").Id("1").StoredFields("message").Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -151,16 +151,16 @@ func TestGetValidate(t *testing.T) {
if _, err := client.Get().Index(testIndexName).Do(context.TODO()); err == nil {
t.Fatal("expected Get to fail")
}
- if _, err := client.Get().Type("tweet").Do(context.TODO()); err == nil {
+ if _, err := client.Get().Type("doc").Do(context.TODO()); err == nil {
t.Fatal("expected Get to fail")
}
if _, err := client.Get().Id("1").Do(context.TODO()); err == nil {
t.Fatal("expected Get to fail")
}
- if _, err := client.Get().Index(testIndexName).Type("tweet").Do(context.TODO()); err == nil {
+ if _, err := client.Get().Index(testIndexName).Type("doc").Do(context.TODO()); err == nil {
t.Fatal("expected Get to fail")
}
- if _, err := client.Get().Type("tweet").Id("1").Do(context.TODO()); err == nil {
+ if _, err := client.Get().Type("doc").Id("1").Do(context.TODO()); err == nil {
t.Fatal("expected Get to fail")
}
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/highlight.go b/vendor/gopkg.in/olivere/elastic.v5/highlight.go
index d28f03c3a..6d8d2ba63 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/highlight.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/highlight.go
@@ -6,7 +6,7 @@ package elastic
// Highlight allows highlighting search results on one or more fields.
// For details, see:
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-highlighting.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-highlighting.html
type Highlight struct {
fields []*HighlighterField
tagsSchema *string
@@ -20,6 +20,8 @@ type Highlight struct {
requireFieldMatch *bool
boundaryMaxScan *int
boundaryChars *string
+ boundaryScannerType *string
+ boundaryScannerLocale *string
highlighterType *string
fragmenter *string
highlightQuery Query
@@ -103,6 +105,16 @@ func (hl *Highlight) BoundaryChars(boundaryChars string) *Highlight {
return hl
}
+func (hl *Highlight) BoundaryScannerType(boundaryScannerType string) *Highlight {
+ hl.boundaryScannerType = &boundaryScannerType
+ return hl
+}
+
+func (hl *Highlight) BoundaryScannerLocale(boundaryScannerLocale string) *Highlight {
+ hl.boundaryScannerLocale = &boundaryScannerLocale
+ return hl
+}
+
func (hl *Highlight) HighlighterType(highlighterType string) *Highlight {
hl.highlighterType = &highlighterType
return hl
@@ -178,6 +190,12 @@ func (hl *Highlight) Source() (interface{}, error) {
if hl.boundaryChars != nil {
source["boundary_chars"] = *hl.boundaryChars
}
+ if hl.boundaryScannerType != nil {
+ source["boundary_scanner"] = *hl.boundaryScannerType
+ }
+ if hl.boundaryScannerLocale != nil {
+ source["boundary_scanner_locale"] = *hl.boundaryScannerLocale
+ }
if hl.highlighterType != nil {
source["type"] = *hl.highlighterType
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/highlight_test.go b/vendor/gopkg.in/olivere/elastic.v5/highlight_test.go
index 9687cfb79..c7b972c44 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/highlight_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/highlight_test.go
@@ -117,8 +117,10 @@ func TestHighlighterWithExplicitFieldOrder(t *testing.T) {
}
}
-func TestHighlightWithBoundaryChars(t *testing.T) {
- builder := NewHighlight().BoundaryChars(" \t\r")
+func TestHighlightWithBoundarySettings(t *testing.T) {
+ builder := NewHighlight().
+ BoundaryChars(" \t\r").
+ BoundaryScannerType("word")
src, err := builder.Source()
if err != nil {
t.Fatal(err)
@@ -128,7 +130,7 @@ func TestHighlightWithBoundaryChars(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"boundary_chars":" \t\r"}`
+ expected := `{"boundary_chars":" \t\r","boundary_scanner":"word"}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
@@ -142,17 +144,17 @@ func TestHighlightWithTermQuery(t *testing.T) {
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun to do."}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/index.go b/vendor/gopkg.in/olivere/elastic.v5/index.go
index a4e4ae0d1..4a4c3278e 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/index.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/index.go
@@ -9,13 +9,13 @@ import (
"fmt"
"net/url"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IndexService adds or updates a typed JSON document in a specified index,
// making it searchable.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-index_.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-index_.html
// for details.
type IndexService struct {
client *Client
@@ -172,7 +172,7 @@ func (s *IndexService) buildURL() (string, string, url.Values, error) {
})
} else {
// Automatic ID generation
- // See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-index_.html#index-creation
+ // See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-index_.html#index-creation
method = "POST"
path, err = uritemplates.Expand("/{index}/{type}/", map[string]string{
"index": s.index,
@@ -186,7 +186,7 @@ func (s *IndexService) buildURL() (string, string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.waitForActiveShards != "" {
params.Set("wait_for_active_shards", s.waitForActiveShards)
@@ -264,7 +264,12 @@ func (s *IndexService) Do(ctx context.Context) (*IndexResponse, error) {
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, method, path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: method,
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
@@ -279,10 +284,14 @@ func (s *IndexService) Do(ctx context.Context) (*IndexResponse, error) {
// IndexResponse is the result of indexing a document in Elasticsearch.
type IndexResponse struct {
- // TODO _shards { total, failed, successful }
- Index string `json:"_index"`
- Type string `json:"_type"`
- Id string `json:"_id"`
- Version int `json:"_version"`
- Created bool `json:"created"`
+ Index string `json:"_index,omitempty"`
+ Type string `json:"_type,omitempty"`
+ Id string `json:"_id,omitempty"`
+ Version int64 `json:"_version,omitempty"`
+ Result string `json:"result,omitempty"`
+ Shards *shardsInfo `json:"_shards,omitempty"`
+ SeqNo int64 `json:"_seq_no,omitempty"`
+ PrimaryTerm int64 `json:"_primary_term,omitempty"`
+ Status int `json:"status,omitempty"`
+ ForcedRefresh bool `json:"forced_refresh,omitempty"`
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/index_test.go b/vendor/gopkg.in/olivere/elastic.v5/index_test.go
index 5e997f3b8..1a0c38576 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/index_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/index_test.go
@@ -18,7 +18,7 @@ func TestIndexLifecycle(t *testing.T) {
// Add a document
indexResult, err := client.Index().
Index(testIndexName).
- Type("tweet").
+ Type("doc").
Id("1").
BodyJson(&tweet1).
Do(context.TODO())
@@ -30,7 +30,7 @@ func TestIndexLifecycle(t *testing.T) {
}
// Exists
- exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO())
+ exists, err := client.Exists().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -41,7 +41,7 @@ func TestIndexLifecycle(t *testing.T) {
// Get document
getResult, err := client.Get().
Index(testIndexName).
- Type("tweet").
+ Type("doc").
Id("1").
Do(context.TODO())
if err != nil {
@@ -50,8 +50,8 @@ func TestIndexLifecycle(t *testing.T) {
if getResult.Index != testIndexName {
t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index)
}
- if getResult.Type != "tweet" {
- t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type)
+ if getResult.Type != "doc" {
+ t.Errorf("expected GetResult.Type %q; got %q", "doc", getResult.Type)
}
if getResult.Id != "1" {
t.Errorf("expected GetResult.Id %q; got %q", "1", getResult.Id)
@@ -74,7 +74,7 @@ func TestIndexLifecycle(t *testing.T) {
}
// Delete document again
- deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO())
+ deleteResult, err := client.Delete().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -83,7 +83,7 @@ func TestIndexLifecycle(t *testing.T) {
}
// Exists
- exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO())
+ exists, err = client.Exists().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -100,7 +100,7 @@ func TestIndexLifecycleWithAutomaticIDGeneration(t *testing.T) {
// Add a document
indexResult, err := client.Index().
Index(testIndexName).
- Type("tweet").
+ Type("doc").
BodyJson(&tweet1).
Do(context.TODO())
if err != nil {
@@ -115,7 +115,7 @@ func TestIndexLifecycleWithAutomaticIDGeneration(t *testing.T) {
id := indexResult.Id
// Exists
- exists, err := client.Exists().Index(testIndexName).Type("tweet").Id(id).Do(context.TODO())
+ exists, err := client.Exists().Index(testIndexName).Type("doc").Id(id).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -126,7 +126,7 @@ func TestIndexLifecycleWithAutomaticIDGeneration(t *testing.T) {
// Get document
getResult, err := client.Get().
Index(testIndexName).
- Type("tweet").
+ Type("doc").
Id(id).
Do(context.TODO())
if err != nil {
@@ -135,8 +135,8 @@ func TestIndexLifecycleWithAutomaticIDGeneration(t *testing.T) {
if getResult.Index != testIndexName {
t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index)
}
- if getResult.Type != "tweet" {
- t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type)
+ if getResult.Type != "doc" {
+ t.Errorf("expected GetResult.Type %q; got %q", "doc", getResult.Type)
}
if getResult.Id != id {
t.Errorf("expected GetResult.Id %q; got %q", id, getResult.Id)
@@ -159,7 +159,7 @@ func TestIndexLifecycleWithAutomaticIDGeneration(t *testing.T) {
}
// Delete document again
- deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id(id).Do(context.TODO())
+ deleteResult, err := client.Delete().Index(testIndexName).Type("doc").Id(id).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -168,7 +168,7 @@ func TestIndexLifecycleWithAutomaticIDGeneration(t *testing.T) {
}
// Exists
- exists, err = client.Exists().Index(testIndexName).Type("tweet").Id(id).Do(context.TODO())
+ exists, err = client.Exists().Index(testIndexName).Type("doc").Id(id).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -183,7 +183,7 @@ func TestIndexValidate(t *testing.T) {
tweet := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
// No index name -> fail with error
- res, err := NewIndexService(client).Type("tweet").Id("1").BodyJson(&tweet).Do(context.TODO())
+ res, err := NewIndexService(client).Type("doc").Id("1").BodyJson(&tweet).Do(context.TODO())
if err == nil {
t.Fatalf("expected Index to fail without index name")
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_analyze.go b/vendor/gopkg.in/olivere/elastic.v5/indices_analyze.go
index b9000f128..fb3a91234 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_analyze.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_analyze.go
@@ -9,13 +9,13 @@ import (
"fmt"
"net/url"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IndicesAnalyzeService performs the analysis process on a text and returns
// the tokens breakdown of the text.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-analyze.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-analyze.html
// for detail.
type IndicesAnalyzeService struct {
client *Client
@@ -152,7 +152,7 @@ func (s *IndicesAnalyzeService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.format != "" {
params.Set("format", s.format)
@@ -185,11 +185,16 @@ func (s *IndicesAnalyzeService) Do(ctx context.Context) (*IndicesAnalyzeResponse
} else {
// Request parameters are deprecated in 5.1.1, and we must use a JSON
// structure in the body to pass the parameters.
- // See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-analyze.html
+ // See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-analyze.html
body = s.request
}
- res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_close.go b/vendor/gopkg.in/olivere/elastic.v5/indices_close.go
index 9388bc973..00ecdf966 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_close.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_close.go
@@ -9,12 +9,12 @@ import (
"fmt"
"net/url"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IndicesCloseService closes an index.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-open-close.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-open-close.html
// for details.
type IndicesCloseService struct {
client *Client
@@ -134,7 +134,11 @@ func (s *IndicesCloseService) Do(ctx context.Context) (*IndicesCloseResponse, er
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "POST", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
@@ -149,5 +153,7 @@ func (s *IndicesCloseService) Do(ctx context.Context) (*IndicesCloseResponse, er
// IndicesCloseResponse is the response of IndicesCloseService.Do.
type IndicesCloseResponse struct {
- Acknowledged bool `json:"acknowledged"`
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_create.go b/vendor/gopkg.in/olivere/elastic.v5/indices_create.go
index dfd17ac09..8d8e0c25e 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_create.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_create.go
@@ -9,12 +9,12 @@ import (
"errors"
"net/url"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IndicesCreateService creates a new index.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-create-index.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-create-index.html
// for details.
type IndicesCreateService struct {
client *Client
@@ -91,7 +91,7 @@ func (b *IndicesCreateService) Do(ctx context.Context) (*IndicesCreateResult, er
params := make(url.Values)
if b.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if b.masterTimeout != "" {
params.Set("master_timeout", b.masterTimeout)
@@ -109,7 +109,12 @@ func (b *IndicesCreateService) Do(ctx context.Context) (*IndicesCreateResult, er
}
// Get response
- res, err := b.client.PerformRequest(ctx, "PUT", path, params, body)
+ res, err := b.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "PUT",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
@@ -125,6 +130,7 @@ func (b *IndicesCreateService) Do(ctx context.Context) (*IndicesCreateResult, er
// IndicesCreateResult is the outcome of creating a new index.
type IndicesCreateResult struct {
- Acknowledged bool `json:"acknowledged"`
- ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_delete.go b/vendor/gopkg.in/olivere/elastic.v5/indices_delete.go
index 997185237..2afeca978 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_delete.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_delete.go
@@ -10,12 +10,12 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IndicesDeleteService allows to delete existing indices.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-delete-index.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-delete-index.html
// for details.
type IndicesDeleteService struct {
client *Client
@@ -71,7 +71,7 @@ func (s *IndicesDeleteService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.timeout != "" {
params.Set("timeout", s.timeout)
@@ -108,7 +108,11 @@ func (s *IndicesDeleteService) Do(ctx context.Context) (*IndicesDeleteResponse,
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "DELETE",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_delete_template.go b/vendor/gopkg.in/olivere/elastic.v5/indices_delete_template.go
index d1b88d4bb..0ea34cf89 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_delete_template.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_delete_template.go
@@ -9,11 +9,11 @@ import (
"fmt"
"net/url"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IndicesDeleteTemplateService deletes index templates.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-templates.html.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-templates.html.
type IndicesDeleteTemplateService struct {
client *Client
pretty bool
@@ -66,7 +66,7 @@ func (s *IndicesDeleteTemplateService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.timeout != "" {
params.Set("timeout", s.timeout)
@@ -103,7 +103,11 @@ func (s *IndicesDeleteTemplateService) Do(ctx context.Context) (*IndicesDeleteTe
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "DELETE",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
@@ -118,5 +122,7 @@ func (s *IndicesDeleteTemplateService) Do(ctx context.Context) (*IndicesDeleteTe
// IndicesDeleteTemplateResponse is the response of IndicesDeleteTemplateService.Do.
type IndicesDeleteTemplateResponse struct {
- Acknowledged bool `json:"acknowledged,omitempty"`
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_exists.go b/vendor/gopkg.in/olivere/elastic.v5/indices_exists.go
index 928006e09..aa9391039 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_exists.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_exists.go
@@ -11,12 +11,12 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IndicesExistsService checks if an index or indices exist or not.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-exists.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-exists.html
// for details.
type IndicesExistsService struct {
client *Client
@@ -90,7 +90,7 @@ func (s *IndicesExistsService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.local != nil {
params.Set("local", fmt.Sprintf("%v", *s.local))
@@ -133,7 +133,12 @@ func (s *IndicesExistsService) Do(ctx context.Context) (bool, error) {
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "HEAD",
+ Path: path,
+ Params: params,
+ IgnoreErrors: []int{404},
+ })
if err != nil {
return false, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_exists_template.go b/vendor/gopkg.in/olivere/elastic.v5/indices_exists_template.go
index 873d8c00c..40b06e895 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_exists_template.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_exists_template.go
@@ -10,7 +10,7 @@ import (
"net/http"
"net/url"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IndicesExistsTemplateService checks if a given template exists.
@@ -62,7 +62,7 @@ func (s *IndicesExistsTemplateService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.local != nil {
params.Set("local", fmt.Sprintf("%v", *s.local))
@@ -96,7 +96,12 @@ func (s *IndicesExistsTemplateService) Do(ctx context.Context) (bool, error) {
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "HEAD",
+ Path: path,
+ Params: params,
+ IgnoreErrors: []int{404},
+ })
if err != nil {
return false, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_exists_template_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_exists_template_test.go
index cddf69f21..a97442971 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_exists_template_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_exists_template_test.go
@@ -13,13 +13,13 @@ func TestIndexExistsTemplate(t *testing.T) {
client := setupTestClientAndCreateIndex(t)
tmpl := `{
- "template":"elastic-test*",
+ "index_patterns":["elastic-test*"],
"settings":{
"number_of_shards":1,
"number_of_replicas":0
},
"mappings":{
- "tweet":{
+ "doc":{
"properties":{
"tags":{
"type":"keyword"
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_exists_type.go b/vendor/gopkg.in/olivere/elastic.v5/indices_exists_type.go
index 95eb8f1d3..a4d1ff610 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_exists_type.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_exists_type.go
@@ -11,12 +11,12 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IndicesExistsTypeService checks if one or more types exist in one or more indices.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-types-exists.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-types-exists.html
// for details.
type IndicesExistsTypeService struct {
client *Client
@@ -97,7 +97,7 @@ func (s *IndicesExistsTypeService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.ignoreUnavailable != nil {
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
@@ -143,7 +143,12 @@ func (s *IndicesExistsTypeService) Do(ctx context.Context) (bool, error) {
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "HEAD",
+ Path: path,
+ Params: params,
+ IgnoreErrors: []int{404},
+ })
if err != nil {
return false, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_exists_type_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_exists_type_test.go
index 2af3c2c5d..3795bd042 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_exists_type_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_exists_type_test.go
@@ -94,12 +94,12 @@ func TestIndicesExistsType(t *testing.T) {
}
// Check if type exists
- exists, err := client.TypeExists().Index(testIndexName).Type("tweet").Do(context.TODO())
+ exists, err := client.TypeExists().Index(testIndexName).Type("doc").Do(context.TODO())
if err != nil {
t.Fatal(err)
}
if !exists {
- t.Fatalf("type %s should exist in index %s, but doesn't\n", "tweet", testIndexName)
+ t.Fatalf("type %s should exist in index %s, but doesn't\n", "doc", testIndexName)
}
// Delete index
@@ -112,12 +112,12 @@ func TestIndicesExistsType(t *testing.T) {
}
// Check if type exists
- exists, err = client.TypeExists().Index(testIndexName).Type("tweet").Do(context.TODO())
+ exists, err = client.TypeExists().Index(testIndexName).Type("doc").Do(context.TODO())
if err != nil {
t.Fatal(err)
}
if exists {
- t.Fatalf("type %s should not exist in index %s, but it does\n", "tweet", testIndexName)
+ t.Fatalf("type %s should not exist in index %s, but it does\n", "doc", testIndexName)
}
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_flush.go b/vendor/gopkg.in/olivere/elastic.v5/indices_flush.go
index 602700af6..113e53803 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_flush.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_flush.go
@@ -10,14 +10,14 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// Flush allows to flush one or more indices. The flush process of an index
// basically frees memory from the index by flushing data to the index
// storage and clearing the internal transaction log.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-flush.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-flush.html
// for details.
type IndicesFlushService struct {
client *Client
@@ -110,7 +110,7 @@ func (s *IndicesFlushService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.force != nil {
params.Set("force", fmt.Sprintf("%v", *s.force))
@@ -149,7 +149,11 @@ func (s *IndicesFlushService) Do(ctx context.Context) (*IndicesFlushResponse, er
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "POST", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_forcemerge.go b/vendor/gopkg.in/olivere/elastic.v5/indices_forcemerge.go
index 709be1bc4..0e999cf19 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_forcemerge.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_forcemerge.go
@@ -10,7 +10,7 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IndicesForcemergeService allows to force merging of one or more indices.
@@ -125,7 +125,7 @@ func (s *IndicesForcemergeService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.allowNoIndices != nil {
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
@@ -170,7 +170,11 @@ func (s *IndicesForcemergeService) Do(ctx context.Context) (*IndicesForcemergeRe
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "POST", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_get.go b/vendor/gopkg.in/olivere/elastic.v5/indices_get.go
index 3b80c246d..cb4e449d5 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_get.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_get.go
@@ -10,12 +10,12 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IndicesGetService retrieves information about one or more indices.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-get-index.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-get-index.html
// for more details.
type IndicesGetService struct {
client *Client
@@ -131,7 +131,7 @@ func (s *IndicesGetService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.expandWildcards != "" {
params.Set("expand_wildcards", s.expandWildcards)
@@ -180,7 +180,11 @@ func (s *IndicesGetService) Do(ctx context.Context) (map[string]*IndicesGetRespo
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases.go b/vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases.go
index 530d7bdcd..68b186358 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases.go
@@ -10,7 +10,7 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// AliasesService returns the aliases associated with one or more indices.
@@ -47,11 +47,11 @@ func (s *AliasesService) buildURL() (string, url.Values, error) {
var path string
if len(s.index) > 0 {
- path, err = uritemplates.Expand("/{index}/_aliases", map[string]string{
+ path, err = uritemplates.Expand("/{index}/_alias", map[string]string{
"index": strings.Join(s.index, ","),
})
} else {
- path = "/_aliases"
+ path = "/_alias"
}
if err != nil {
return "", url.Values{}, err
@@ -72,7 +72,11 @@ func (s *AliasesService) Do(ctx context.Context) (*AliasesResult, error) {
}
// Get response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases_test.go
index 75abac835..2c8da9b7f 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases_test.go
@@ -18,15 +18,15 @@ func TestAliasesBuildURL(t *testing.T) {
}{
{
[]string{},
- "/_aliases",
+ "/_alias",
},
{
[]string{"index1"},
- "/index1/_aliases",
+ "/index1/_alias",
},
{
[]string{"index1", "index2"},
- "/index1%2Cindex2/_aliases",
+ "/index1%2Cindex2/_alias",
},
}
@@ -45,6 +45,7 @@ func TestAliasesBuildURL(t *testing.T) {
func TestAliases(t *testing.T) {
var err error
+ //client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0)))
client := setupTestClientAndCreateIndex(t)
// Some tweets
@@ -53,16 +54,16 @@ func TestAliases(t *testing.T) {
tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."}
// Add tweets to first index
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
// Add tweets to second index
- _, err = client.Index().Index(testIndexName2).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName2).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -80,7 +81,7 @@ func TestAliases(t *testing.T) {
// Alias should not yet exist
aliasesResult1, err := client.Aliases().
Index(testIndexName, testIndexName2).
- //Pretty(true).
+ Pretty(true).
Do(context.TODO())
if err != nil {
t.Fatal(err)
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_get_field_mapping.go b/vendor/gopkg.in/olivere/elastic.v5/indices_get_field_mapping.go
index 3cb43d54b..e3b7eac07 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_get_field_mapping.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_get_field_mapping.go
@@ -10,13 +10,13 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IndicesGetFieldMappingService retrieves the mapping definitions for the fields in an index
// or index/type.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-get-field-mapping.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-get-field-mapping.html
// for details.
type IndicesGetFieldMappingService struct {
client *Client
@@ -131,7 +131,7 @@ func (s *IndicesGetFieldMappingService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.ignoreUnavailable != nil {
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
@@ -170,7 +170,11 @@ func (s *IndicesGetFieldMappingService) Do(ctx context.Context) (map[string]inte
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_get_mapping.go b/vendor/gopkg.in/olivere/elastic.v5/indices_get_mapping.go
index 3b2258915..7f9c9cb22 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_get_mapping.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_get_mapping.go
@@ -10,13 +10,13 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IndicesGetMappingService retrieves the mapping definitions for an index or
// index/type.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-get-mapping.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-get-mapping.html
// for details.
type IndicesGetMappingService struct {
client *Client
@@ -119,7 +119,7 @@ func (s *IndicesGetMappingService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.ignoreUnavailable != nil {
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
@@ -156,7 +156,11 @@ func (s *IndicesGetMappingService) Do(ctx context.Context) (map[string]interface
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_get_settings.go b/vendor/gopkg.in/olivere/elastic.v5/indices_get_settings.go
index 0e8f28d70..06fce0dfa 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_get_settings.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_get_settings.go
@@ -10,13 +10,13 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IndicesGetSettingsService allows to retrieve settings of one
// or more indices.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-get-settings.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-get-settings.html
// for more details.
type IndicesGetSettingsService struct {
client *Client
@@ -125,7 +125,7 @@ func (s *IndicesGetSettingsService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.ignoreUnavailable != nil {
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
@@ -164,7 +164,11 @@ func (s *IndicesGetSettingsService) Do(ctx context.Context) (map[string]*Indices
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_get_template.go b/vendor/gopkg.in/olivere/elastic.v5/indices_get_template.go
index 15590dc06..ad3a091a0 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_get_template.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_get_template.go
@@ -10,11 +10,11 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IndicesGetTemplateService returns an index template.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-templates.html.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-templates.html.
type IndicesGetTemplateService struct {
client *Client
pretty bool
@@ -75,7 +75,7 @@ func (s *IndicesGetTemplateService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.flatSettings != nil {
params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
@@ -105,7 +105,11 @@ func (s *IndicesGetTemplateService) Do(ctx context.Context) (map[string]*Indices
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_open.go b/vendor/gopkg.in/olivere/elastic.v5/indices_open.go
index 38277c385..1b58c5721 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_open.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_open.go
@@ -9,12 +9,12 @@ import (
"fmt"
"net/url"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IndicesOpenService opens an index.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-open-close.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-open-close.html
// for details.
type IndicesOpenService struct {
client *Client
@@ -91,7 +91,7 @@ func (s *IndicesOpenService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.timeout != "" {
params.Set("timeout", s.timeout)
@@ -138,7 +138,11 @@ func (s *IndicesOpenService) Do(ctx context.Context) (*IndicesOpenResponse, erro
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "POST", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
@@ -153,5 +157,7 @@ func (s *IndicesOpenService) Do(ctx context.Context) (*IndicesOpenResponse, erro
// IndicesOpenResponse is the response of IndicesOpenService.Do.
type IndicesOpenResponse struct {
- Acknowledged bool `json:"acknowledged"`
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_put_alias.go b/vendor/gopkg.in/olivere/elastic.v5/indices_put_alias.go
index 5d965bd66..12f8e1bd5 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_put_alias.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_put_alias.go
@@ -192,7 +192,7 @@ func (a *AliasRemoveAction) Source() (interface{}, error) {
// -- Service --
// AliasService enables users to add or remove an alias.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-aliases.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-aliases.html
// for details.
type AliasService struct {
client *Client
@@ -274,7 +274,12 @@ func (s *AliasService) Do(ctx context.Context) (*AliasResult, error) {
body["actions"] = actions
// Get response
- res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
@@ -291,5 +296,7 @@ func (s *AliasService) Do(ctx context.Context) (*AliasResult, error) {
// AliasResult is the outcome of calling Do on AliasService.
type AliasResult struct {
- Acknowledged bool `json:"acknowledged"`
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_put_alias_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_put_alias_test.go
index 82ab6a07b..ada1dfdef 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_put_alias_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_put_alias_test.go
@@ -25,18 +25,18 @@ func TestAliasLifecycle(t *testing.T) {
tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."}
// Add tweets to first index
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
// Add tweets to second index
- _, err = client.Index().Index(testIndexName2).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName2).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_put_mapping.go b/vendor/gopkg.in/olivere/elastic.v5/indices_put_mapping.go
index 03f26ca91..2f8a35e4c 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_put_mapping.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_put_mapping.go
@@ -10,13 +10,13 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IndicesPutMappingService allows to register specific mapping definition
// for a specific type.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-put-mapping.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-put-mapping.html
// for details.
type IndicesPutMappingService struct {
client *Client
@@ -142,7 +142,7 @@ func (s *IndicesPutMappingService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.ignoreUnavailable != nil {
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
@@ -202,7 +202,12 @@ func (s *IndicesPutMappingService) Do(ctx context.Context) (*PutMappingResponse,
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "PUT", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "PUT",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
@@ -217,5 +222,7 @@ func (s *IndicesPutMappingService) Do(ctx context.Context) (*PutMappingResponse,
// PutMappingResponse is the response of IndicesPutMappingService.Do.
type PutMappingResponse struct {
- Acknowledged bool `json:"acknowledged"`
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_put_mapping_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_put_mapping_test.go
index ffac0d0f2..644e1187a 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_put_mapping_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_put_mapping_test.go
@@ -19,18 +19,18 @@ func TestPutMappingURL(t *testing.T) {
}{
{
[]string{},
- "tweet",
- "/_mapping/tweet",
+ "doc",
+ "/_mapping/doc",
},
{
[]string{"*"},
- "tweet",
- "/%2A/_mapping/tweet",
+ "doc",
+ "/%2A/_mapping/doc",
},
{
[]string{"store-1", "store-2"},
- "tweet",
- "/store-1%2Cstore-2/_mapping/tweet",
+ "doc",
+ "/store-1%2Cstore-2/_mapping/doc",
},
}
@@ -47,9 +47,19 @@ func TestPutMappingURL(t *testing.T) {
func TestMappingLifecycle(t *testing.T) {
client := setupTestClientAndCreateIndex(t)
+ //client := setupTestClientAndCreateIndexAndLog(t)
+
+ // Create index
+ createIndex, err := client.CreateIndex(testIndexName3).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if createIndex == nil {
+ t.Errorf("expected result to be != nil; got: %v", createIndex)
+ }
mapping := `{
- "tweetdoc":{
+ "doc":{
"properties":{
"field":{
"type":"keyword"
@@ -58,7 +68,7 @@ func TestMappingLifecycle(t *testing.T) {
}
}`
- putresp, err := client.PutMapping().Index(testIndexName2).Type("tweetdoc").BodyString(mapping).Do(context.TODO())
+ putresp, err := client.PutMapping().Index(testIndexName3).Type("doc").BodyString(mapping).Do(context.TODO())
if err != nil {
t.Fatalf("expected put mapping to succeed; got: %v", err)
}
@@ -69,14 +79,14 @@ func TestMappingLifecycle(t *testing.T) {
t.Fatalf("expected put mapping ack; got: %v", putresp.Acknowledged)
}
- getresp, err := client.GetMapping().Index(testIndexName2).Type("tweetdoc").Do(context.TODO())
+ getresp, err := client.GetMapping().Index(testIndexName3).Type("doc").Do(context.TODO())
if err != nil {
t.Fatalf("expected get mapping to succeed; got: %v", err)
}
if getresp == nil {
t.Fatalf("expected get mapping response; got: %v", getresp)
}
- props, ok := getresp[testIndexName2]
+ props, ok := getresp[testIndexName3]
if !ok {
t.Fatalf("expected JSON root to be of type map[string]interface{}; got: %#v", props)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_put_settings.go b/vendor/gopkg.in/olivere/elastic.v5/indices_put_settings.go
index 20ae95207..1283eb669 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_put_settings.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_put_settings.go
@@ -10,14 +10,14 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IndicesPutSettingsService changes specific index level settings in
// real time.
//
// See the documentation at
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-update-settings.html.
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-update-settings.html.
type IndicesPutSettingsService struct {
client *Client
pretty bool
@@ -118,7 +118,7 @@ func (s *IndicesPutSettingsService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.allowNoIndices != nil {
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
@@ -165,7 +165,12 @@ func (s *IndicesPutSettingsService) Do(ctx context.Context) (*IndicesPutSettings
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "PUT", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "PUT",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
@@ -180,5 +185,7 @@ func (s *IndicesPutSettingsService) Do(ctx context.Context) (*IndicesPutSettings
// IndicesPutSettingsResponse is the response of IndicesPutSettingsService.Do.
type IndicesPutSettingsResponse struct {
- Acknowledged bool `json:"acknowledged"`
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_put_template.go b/vendor/gopkg.in/olivere/elastic.v5/indices_put_template.go
index 49df95f54..c0b959647 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_put_template.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_put_template.go
@@ -9,11 +9,11 @@ import (
"fmt"
"net/url"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IndicesPutTemplateService creates or updates index mappings.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-templates.html.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-templates.html.
type IndicesPutTemplateService struct {
client *Client
pretty bool
@@ -118,7 +118,7 @@ func (s *IndicesPutTemplateService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.order != nil {
params.Set("order", fmt.Sprintf("%v", s.order))
@@ -181,7 +181,12 @@ func (s *IndicesPutTemplateService) Do(ctx context.Context) (*IndicesPutTemplate
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "PUT", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "PUT",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
@@ -196,5 +201,7 @@ func (s *IndicesPutTemplateService) Do(ctx context.Context) (*IndicesPutTemplate
// IndicesPutTemplateResponse is the response of IndicesPutTemplateService.Do.
type IndicesPutTemplateResponse struct {
- Acknowledged bool `json:"acknowledged,omitempty"`
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_refresh.go b/vendor/gopkg.in/olivere/elastic.v5/indices_refresh.go
index dbc83ac2c..f6c7f165e 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_refresh.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_refresh.go
@@ -10,11 +10,11 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// RefreshService explicitly refreshes one or more indices.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.6/indices-refresh.html.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-refresh.html.
type RefreshService struct {
client *Client
index []string
@@ -73,7 +73,11 @@ func (s *RefreshService) Do(ctx context.Context) (*RefreshResult, error) {
}
// Get response
- res, err := s.client.PerformRequest(ctx, "POST", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_refresh_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_refresh_test.go
index a6aac44ea..8640fb602 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_refresh_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_refresh_test.go
@@ -50,17 +50,17 @@ func TestRefresh(t *testing.T) {
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
// Add some documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_rollover.go b/vendor/gopkg.in/olivere/elastic.v5/indices_rollover.go
index ef1cad033..841b3836f 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_rollover.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_rollover.go
@@ -10,14 +10,14 @@ import (
"fmt"
"net/url"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IndicesRolloverService rolls an alias over to a new index when the
// existing index is considered to be too large or too old.
//
// It is documented at
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-rollover-index.html.
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-rollover-index.html.
type IndicesRolloverService struct {
client *Client
pretty bool
@@ -189,10 +189,10 @@ func (s *IndicesRolloverService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.dryRun {
- params.Set("dry_run", "1")
+ params.Set("dry_run", "true")
}
if s.masterTimeout != "" {
params.Set("master_timeout", s.masterTimeout)
@@ -242,7 +242,12 @@ func (s *IndicesRolloverService) Do(ctx context.Context) (*IndicesRolloverRespon
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_rollover_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_rollover_test.go
index 77ac1e851..81d7099e0 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_rollover_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_rollover_test.go
@@ -97,7 +97,7 @@ func TestIndicesRolloverBodyComplex(t *testing.T) {
AddMaxIndexAgeCondition("2d").
AddMaxIndexDocsCondition(1000000).
AddSetting("index.number_of_shards", 2).
- AddMapping("tweet", map[string]interface{}{
+ AddMapping("doc", map[string]interface{}{
"properties": map[string]interface{}{
"user": map[string]interface{}{
"type": "keyword",
@@ -109,7 +109,7 @@ func TestIndicesRolloverBodyComplex(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"conditions":{"max_age":"2d","max_docs":1000000},"mappings":{"tweet":{"properties":{"user":{"type":"keyword"}}}},"settings":{"index.number_of_shards":2}}`
+ expected := `{"conditions":{"max_age":"2d","max_docs":1000000},"mappings":{"doc":{"properties":{"user":{"type":"keyword"}}}},"settings":{"index.number_of_shards":2}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_segments.go b/vendor/gopkg.in/olivere/elastic.v5/indices_segments.go
new file mode 100644
index 000000000..133d1101e
--- /dev/null
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_segments.go
@@ -0,0 +1,237 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesSegmentsService provides low level segments information that a
+// Lucene index (shard level) is built with. Allows to be used to provide
+// more information on the state of a shard and an index, possibly
+// optimization information, data "wasted" on deletes, and so on.
+//
+// Find further documentation at
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.1/indices-segments.html.
+type IndicesSegmentsService struct {
+ client *Client
+ pretty bool
+ index []string
+ allowNoIndices *bool
+ expandWildcards string
+ ignoreUnavailable *bool
+ human *bool
+ operationThreading interface{}
+ verbose *bool
+}
+
+// NewIndicesSegmentsService creates a new IndicesSegmentsService.
+func NewIndicesSegmentsService(client *Client) *IndicesSegmentsService {
+ return &IndicesSegmentsService{
+ client: client,
+ }
+}
+
+// Index is a comma-separated list of index names; use `_all` or empty string
+// to perform the operation on all indices.
+func (s *IndicesSegmentsService) Index(indices ...string) *IndicesSegmentsService {
+ s.index = append(s.index, indices...)
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices expression
+// resolves into no concrete indices. (This includes `_all` string or when
+// no indices have been specified).
+func (s *IndicesSegmentsService) AllowNoIndices(allowNoIndices bool) *IndicesSegmentsService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to concrete indices
+// that are open, closed or both.
+func (s *IndicesSegmentsService) ExpandWildcards(expandWildcards string) *IndicesSegmentsService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *IndicesSegmentsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesSegmentsService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// Human, when set to true, returns time and byte-values in human-readable format.
+func (s *IndicesSegmentsService) Human(human bool) *IndicesSegmentsService {
+ s.human = &human
+ return s
+}
+
+// OperationThreading is undocumented in Elasticsearch as of now.
+func (s *IndicesSegmentsService) OperationThreading(operationThreading interface{}) *IndicesSegmentsService {
+ s.operationThreading = operationThreading
+ return s
+}
+
+// Verbose, when set to true, includes detailed memory usage by Lucene.
+func (s *IndicesSegmentsService) Verbose(verbose bool) *IndicesSegmentsService {
+ s.verbose = &verbose
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesSegmentsService) Pretty(pretty bool) *IndicesSegmentsService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesSegmentsService) buildURL() (string, url.Values, error) {
+ var err error
+ var path string
+
+ if len(s.index) > 0 {
+ path, err = uritemplates.Expand("/{index}/_segments", map[string]string{
+ "index": strings.Join(s.index, ","),
+ })
+ } else {
+ path = "/_segments"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "true")
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.human != nil {
+ params.Set("human", fmt.Sprintf("%v", *s.human))
+ }
+ if s.operationThreading != nil {
+ params.Set("operation_threading", fmt.Sprintf("%v", s.operationThreading))
+ }
+ if s.verbose != nil {
+ params.Set("verbose", fmt.Sprintf("%v", *s.verbose))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesSegmentsService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesSegmentsService) Do(ctx context.Context) (*IndicesSegmentsResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IndicesSegmentsResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesSegmentsResponse is the response of IndicesSegmentsService.Do.
+type IndicesSegmentsResponse struct {
+ // Shards provides information returned from shards.
+ Shards shardsInfo `json:"_shards"`
+
+ // Indices provides a map into the stats of an index.
+ // The key of the map is the index name.
+ Indices map[string]*IndexSegments `json:"indices,omitempty"`
+}
+
+type IndexSegments struct {
+ // Shards provides a map into the shard related information of an index.
+ // The key of the map is the number of a specific shard.
+ Shards map[string][]*IndexSegmentsShards `json:"shards,omitempty"`
+}
+
+type IndexSegmentsShards struct {
+ Routing *IndexSegmentsRouting `json:"routing,omitempty"`
+ NumCommittedSegments int64 `json:"num_committed_segments,omitempty"`
+ NumSearchSegments int64 `json:"num_search_segments"`
+
+ // Segments provides a map into the segment related information of a shard.
+ // The key of the map is the specific lucene segment id.
+ Segments map[string]*IndexSegmentsDetails `json:"segments,omitempty"`
+}
+
+type IndexSegmentsRouting struct {
+ State string `json:"state,omitempty"`
+ Primary bool `json:"primary,omitempty"`
+ Node string `json:"node,omitempty"`
+ RelocatingNode string `json:"relocating_node,omitempty"`
+}
+
+type IndexSegmentsDetails struct {
+ Generation int64 `json:"generation,omitempty"`
+ NumDocs int64 `json:"num_docs,omitempty"`
+ DeletedDocs int64 `json:"deleted_docs,omitempty"`
+ Size string `json:"size,omitempty"`
+ SizeInBytes int64 `json:"size_in_bytes,omitempty"`
+ Memory string `json:"memory,omitempty"`
+ MemoryInBytes int64 `json:"memory_in_bytes,omitempty"`
+ Committed bool `json:"committed,omitempty"`
+ Search bool `json:"search,omitempty"`
+ Version string `json:"version,omitempty"`
+ Compound bool `json:"compound,omitempty"`
+ MergeId string `json:"merge_id,omitempty"`
+ Sort []*IndexSegmentsSort `json:"sort,omitempty"`
+ RAMTree []*IndexSegmentsRamTree `json:"ram_tree,omitempty"`
+ Attributes map[string]string `json:"attributes,omitempty"`
+}
+
+type IndexSegmentsSort struct {
+ Field string `json:"field,omitempty"`
+ Mode string `json:"mode,omitempty"`
+ Missing interface{} `json:"missing,omitempty"`
+ Reverse bool `json:"reverse,omitempty"`
+}
+
+type IndexSegmentsRamTree struct {
+ Description string `json:"description,omitempty"`
+ Size string `json:"size,omitempty"`
+ SizeInBytes int64 `json:"size_in_bytes,omitempty"`
+ Children []*IndexSegmentsRamTree `json:"children,omitempty"`
+}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_segments_test.go b/vendor/gopkg.in/olivere/elastic.v5/indices_segments_test.go
new file mode 100644
index 000000000..2ec181cc1
--- /dev/null
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_segments_test.go
@@ -0,0 +1,86 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestIndicesSegments(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t)
+
+ tests := []struct {
+ Indices []string
+ Expected string
+ }{
+ {
+ []string{},
+ "/_segments",
+ },
+ {
+ []string{"index1"},
+ "/index1/_segments",
+ },
+ {
+ []string{"index1", "index2"},
+ "/index1%2Cindex2/_segments",
+ },
+ }
+
+ for i, test := range tests {
+ path, _, err := client.IndexSegments().Index(test.Indices...).buildURL()
+ if err != nil {
+ t.Errorf("case #%d: %v", i+1, err)
+ }
+ if path != test.Expected {
+ t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
+ }
+ }
+}
+
+func TestIndexSegments(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+ //client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ segments, err := client.IndexSegments(testIndexName).Pretty(true).Human(true).Do(context.TODO())
+ if err != nil {
+ t.Fatalf("expected no error; got: %v", err)
+ }
+ if segments == nil {
+ t.Fatalf("expected response; got: %v", segments)
+ }
+ indices, found := segments.Indices[testIndexName]
+ if !found {
+ t.Fatalf("expected index information about index %v; got: %v", testIndexName, found)
+ }
+ shards, found := indices.Shards["0"]
+ if !found {
+ t.Fatalf("expected shard information about index %v", testIndexName)
+ }
+ if shards == nil {
+ t.Fatalf("expected shard information to be != nil for index %v", testIndexName)
+ }
+ shard := shards[0]
+ if shard == nil {
+ t.Fatalf("expected shard information to be != nil for shard 0 in index %v", testIndexName)
+ }
+ if shard.Routing == nil {
+ t.Fatalf("expected shard routing information to be != nil for index %v", testIndexName)
+ }
+ segmentDetail, found := shard.Segments["_0"]
+ if !found {
+ t.Fatalf("expected segment detail to be != nil for index %v", testIndexName)
+ }
+ if segmentDetail == nil {
+ t.Fatalf("expected segment detail to be != nil for index %v", testIndexName)
+ }
+ if segmentDetail.NumDocs == 0 {
+ t.Fatal("expected segment to contain >= 1 docs")
+ }
+ if len(segmentDetail.Attributes) == 0 {
+ t.Fatalf("expected segment attributes map to contain at least one key, value pair for index %v", testIndexName)
+ }
+}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_shrink.go b/vendor/gopkg.in/olivere/elastic.v5/indices_shrink.go
index ea20804a6..6ea72b281 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_shrink.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_shrink.go
@@ -10,14 +10,14 @@ import (
"fmt"
"net/url"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IndicesShrinkService allows you to shrink an existing index into a
// new index with fewer primary shards.
//
// For further details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-shrink-index.html.
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-shrink-index.html.
type IndicesShrinkService struct {
client *Client
pretty bool
@@ -102,7 +102,7 @@ func (s *IndicesShrinkService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.masterTimeout != "" {
params.Set("master_timeout", s.masterTimeout)
@@ -153,7 +153,12 @@ func (s *IndicesShrinkService) Do(ctx context.Context) (*IndicesShrinkResponse,
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
@@ -168,6 +173,7 @@ func (s *IndicesShrinkService) Do(ctx context.Context) (*IndicesShrinkResponse,
// IndicesShrinkResponse is the response of IndicesShrinkService.Do.
type IndicesShrinkResponse struct {
- Acknowledged bool `json:"acknowledged"`
- ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_stats.go b/vendor/gopkg.in/olivere/elastic.v5/indices_stats.go
index 90ae6837a..20d35a6d4 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/indices_stats.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/indices_stats.go
@@ -10,11 +10,11 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IndicesStatsService provides stats on various metrics of one or more
-// indices. See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-stats.html.
+// indices. See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/indices-stats.html.
type IndicesStatsService struct {
client *Client
pretty bool
@@ -135,7 +135,7 @@ func (s *IndicesStatsService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if len(s.groups) > 0 {
params.Set("groups", strings.Join(s.groups, ","))
@@ -180,7 +180,11 @@ func (s *IndicesStatsService) Do(ctx context.Context) (*IndicesStatsResponse, er
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
@@ -239,25 +243,20 @@ type IndexStatsDocs struct {
}
type IndexStatsStore struct {
- Size string `json:"size,omitempty"` // human size, e.g. 119.3mb
- SizeInBytes int64 `json:"size_in_bytes,omitempty"`
- ThrottleTime string `json:"throttle_time,omitempty"` // human time, e.g. 0s
- ThrottleTimeInMillis int64 `json:"throttle_time_in_millis,omitempty"`
+ Size string `json:"size,omitempty"` // human size, e.g. 119.3mb
+ SizeInBytes int64 `json:"size_in_bytes,omitempty"`
}
type IndexStatsIndexing struct {
- IndexTotal int64 `json:"index_total,omitempty"`
- IndexTime string `json:"index_time,omitempty"`
- IndexTimeInMillis int64 `json:"index_time_in_millis,omitempty"`
- IndexCurrent int64 `json:"index_current,omitempty"`
- DeleteTotal int64 `json:"delete_total,omitempty"`
- DeleteTime string `json:"delete_time,omitempty"`
- DeleteTimeInMillis int64 `json:"delete_time_in_millis,omitempty"`
- DeleteCurrent int64 `json:"delete_current,omitempty"`
- NoopUpdateTotal int64 `json:"noop_update_total,omitempty"`
- IsThrottled bool `json:"is_throttled,omitempty"`
- ThrottleTime string `json:"throttle_time,omitempty"`
- ThrottleTimeInMillis int64 `json:"throttle_time_in_millis,omitempty"`
+ IndexTotal int64 `json:"index_total,omitempty"`
+ IndexTime string `json:"index_time,omitempty"`
+ IndexTimeInMillis int64 `json:"index_time_in_millis,omitempty"`
+ IndexCurrent int64 `json:"index_current,omitempty"`
+ DeleteTotal int64 `json:"delete_total,omitempty"`
+ DeleteTime string `json:"delete_time,omitempty"`
+ DeleteTimeInMillis int64 `json:"delete_time_in_millis,omitempty"`
+ DeleteCurrent int64 `json:"delete_current,omitempty"`
+ NoopUpdateTotal int64 `json:"noop_update_total,omitempty"`
}
type IndexStatsGet struct {
diff --git a/vendor/gopkg.in/olivere/elastic.v5/ingest_delete_pipeline.go b/vendor/gopkg.in/olivere/elastic.v5/ingest_delete_pipeline.go
index 1e33d9707..78b6d04f2 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/ingest_delete_pipeline.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/ingest_delete_pipeline.go
@@ -10,11 +10,11 @@ import (
"fmt"
"net/url"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IngestDeletePipelineService deletes pipelines by ID.
-// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/5.2/delete-pipeline-api.html.
+// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/6.0/delete-pipeline-api.html.
type IngestDeletePipelineService struct {
client *Client
pretty bool
@@ -67,7 +67,7 @@ func (s *IngestDeletePipelineService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.masterTimeout != "" {
params.Set("master_timeout", s.masterTimeout)
@@ -104,7 +104,11 @@ func (s *IngestDeletePipelineService) Do(ctx context.Context) (*IngestDeletePipe
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "DELETE",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
@@ -119,5 +123,7 @@ func (s *IngestDeletePipelineService) Do(ctx context.Context) (*IngestDeletePipe
// IngestDeletePipelineResponse is the response of IngestDeletePipelineService.Do.
type IngestDeletePipelineResponse struct {
- Acknowledged bool `json:"acknowledged"`
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/ingest_get_pipeline.go b/vendor/gopkg.in/olivere/elastic.v5/ingest_get_pipeline.go
index 9a3357d7f..16a683261 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/ingest_get_pipeline.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/ingest_get_pipeline.go
@@ -10,11 +10,11 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IngestGetPipelineService returns pipelines based on ID.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/get-pipeline-api.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/get-pipeline-api.html
// for documentation.
type IngestGetPipelineService struct {
client *Client
@@ -68,7 +68,7 @@ func (s *IngestGetPipelineService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.masterTimeout != "" {
params.Set("master_timeout", s.masterTimeout)
@@ -95,7 +95,11 @@ func (s *IngestGetPipelineService) Do(ctx context.Context) (IngestGetPipelineRes
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/ingest_put_pipeline.go b/vendor/gopkg.in/olivere/elastic.v5/ingest_put_pipeline.go
index 315453e9d..5781e7072 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/ingest_put_pipeline.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/ingest_put_pipeline.go
@@ -10,13 +10,13 @@ import (
"fmt"
"net/url"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IngestPutPipelineService adds pipelines and updates existing pipelines in
// the cluster.
//
-// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/5.2/put-pipeline-api.html.
+// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/6.0/put-pipeline-api.html.
type IngestPutPipelineService struct {
client *Client
pretty bool
@@ -84,7 +84,7 @@ func (s *IngestPutPipelineService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.masterTimeout != "" {
params.Set("master_timeout", s.masterTimeout)
@@ -132,7 +132,12 @@ func (s *IngestPutPipelineService) Do(ctx context.Context) (*IngestPutPipelineRe
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "PUT", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "PUT",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
@@ -147,5 +152,7 @@ func (s *IngestPutPipelineService) Do(ctx context.Context) (*IngestPutPipelineRe
// IngestPutPipelineResponse is the response of IngestPutPipelineService.Do.
type IngestPutPipelineResponse struct {
- Acknowledged bool `json:"acknowledged"`
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/ingest_simulate_pipeline.go b/vendor/gopkg.in/olivere/elastic.v5/ingest_simulate_pipeline.go
index 04015b459..213f97bbb 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/ingest_simulate_pipeline.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/ingest_simulate_pipeline.go
@@ -10,14 +10,14 @@ import (
"fmt"
"net/url"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// IngestSimulatePipelineService executes a specific pipeline against the set of
// documents provided in the body of the request.
//
// The API is documented at
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/simulate-pipeline-api.html.
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/simulate-pipeline-api.html.
type IngestSimulatePipelineService struct {
client *Client
pretty bool
@@ -85,7 +85,7 @@ func (s *IngestSimulatePipelineService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.verbose != nil {
params.Set("verbose", fmt.Sprintf("%v", *s.verbose))
@@ -127,7 +127,12 @@ func (s *IngestSimulatePipelineService) Do(ctx context.Context) (*IngestSimulate
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/mget.go b/vendor/gopkg.in/olivere/elastic.v5/mget.go
index 0f2894854..5202a9603 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/mget.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/mget.go
@@ -16,7 +16,7 @@ import (
// a docs array with all the fetched documents, each element similar
// in structure to a document provided by the Get API.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-multi-get.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-multi-get.html
// for details.
type MgetService struct {
client *Client
@@ -124,7 +124,12 @@ func (s *MgetService) Do(ctx context.Context) (*MgetResponse, error) {
}
// Get response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
@@ -194,8 +199,8 @@ func (item *MultiGetItem) Version(version int64) *MultiGetItem {
return item
}
-// VersionType can be "internal", "external", "external_gt", "external_gte",
-// or "force". See org.elasticsearch.index.VersionType in Elasticsearch source.
+// VersionType can be "internal", "external", "external_gt", or "external_gte".
+// See org.elasticsearch.index.VersionType in Elasticsearch source.
// It is "internal" by default.
func (item *MultiGetItem) VersionType(versionType string) *MultiGetItem {
item.versionType = versionType
diff --git a/vendor/gopkg.in/olivere/elastic.v5/mget_test.go b/vendor/gopkg.in/olivere/elastic.v5/mget_test.go
index 4d6bfa0c5..6b3ecd9f6 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/mget_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/mget_test.go
@@ -18,17 +18,17 @@ func TestMultiGet(t *testing.T) {
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
// Add some documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -49,8 +49,8 @@ func TestMultiGet(t *testing.T) {
// Get documents 1 and 3
res, err := client.MultiGet().
- Add(NewMultiGetItem().Index(testIndexName).Type("tweet").Id("1")).
- Add(NewMultiGetItem().Index(testIndexName).Type("tweet").Id("3")).
+ Add(NewMultiGetItem().Index(testIndexName).Type("doc").Id("1")).
+ Add(NewMultiGetItem().Index(testIndexName).Type("doc").Id("3")).
Do(context.TODO())
if err != nil {
t.Fatal(err)
diff --git a/vendor/gopkg.in/olivere/elastic.v5/msearch.go b/vendor/gopkg.in/olivere/elastic.v5/msearch.go
index 5d0949d9c..ed54d3c2f 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/msearch.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/msearch.go
@@ -78,7 +78,12 @@ func (s *MultiSearchService) Do(ctx context.Context) (*MultiSearchResult, error)
body := strings.Join(lines, "\n") + "\n" // Don't forget trailing \n
// Get response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/msearch_test.go b/vendor/gopkg.in/olivere/elastic.v5/msearch_test.go
index 0d3670da6..79f2047e6 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/msearch_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/msearch_test.go
@@ -31,17 +31,17 @@ func TestMultiSearch(t *testing.T) {
}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -57,7 +57,7 @@ func TestMultiSearch(t *testing.T) {
sreq1 := NewSearchRequest().Index(testIndexName, testIndexName2).
Source(NewSearchSource().Query(q1).Size(10))
- sreq2 := NewSearchRequest().Index(testIndexName).Type("tweet").
+ sreq2 := NewSearchRequest().Index(testIndexName).Type("doc").
Source(NewSearchSource().Query(q2))
searchResult, err := client.MultiSearch().
@@ -136,17 +136,17 @@ func TestMultiSearchWithOneRequest(t *testing.T) {
}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/mtermvectors.go b/vendor/gopkg.in/olivere/elastic.v5/mtermvectors.go
index da0ca70c5..755718e67 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/mtermvectors.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/mtermvectors.go
@@ -11,14 +11,14 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// MultiTermvectorService returns information and statistics on terms in the
// fields of a particular document. The document could be stored in the
// index or artificially provided by the user.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-multi-termvectors.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-multi-termvectors.html
// for documentation.
type MultiTermvectorService struct {
client *Client
@@ -198,7 +198,7 @@ func (s *MultiTermvectorService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.fieldStatistics != nil {
params.Set("field_statistics", fmt.Sprintf("%v", *s.fieldStatistics))
@@ -278,7 +278,12 @@ func (s *MultiTermvectorService) Do(ctx context.Context) (*MultiTermvectorRespon
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/mtermvectors_test.go b/vendor/gopkg.in/olivere/elastic.v5/mtermvectors_test.go
index fe543cf29..5f90cd5e2 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/mtermvectors_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/mtermvectors_test.go
@@ -35,15 +35,15 @@ func TestMultiTermVectorsValidateAndBuildURL(t *testing.T) {
// #2: Type without index
{
"",
- "tweet",
+ "doc",
"",
true,
},
// #3: Both index and type
{
"twitter",
- "tweet",
- "/twitter/tweet/_mtermvectors",
+ "doc",
+ "/twitter/doc/_mtermvectors",
false,
},
}
@@ -82,17 +82,17 @@ func TestMultiTermVectorsWithIds(t *testing.T) {
tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -115,9 +115,9 @@ func TestMultiTermVectorsWithIds(t *testing.T) {
field := "Message"
res, err := client.MultiTermVectors().
Index(testIndexName).
- Type("tweet").
- Add(NewMultiTermvectorItem().Index(testIndexName).Type("tweet").Id("1").Fields(field)).
- Add(NewMultiTermvectorItem().Index(testIndexName).Type("tweet").Id("3").Fields(field)).
+ Type("doc").
+ Add(NewMultiTermvectorItem().Index(testIndexName).Type("doc").Id("1").Fields(field)).
+ Add(NewMultiTermvectorItem().Index(testIndexName).Type("doc").Id("3").Fields(field)).
Do(context.TODO())
if err != nil {
t.Fatal(err)
diff --git a/vendor/gopkg.in/olivere/elastic.v5/nodes_info.go b/vendor/gopkg.in/olivere/elastic.v5/nodes_info.go
index b3687d689..9f1422a69 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/nodes_info.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/nodes_info.go
@@ -11,12 +11,12 @@ import (
"strings"
"time"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// NodesInfoService allows to retrieve one or more or all of the
// cluster nodes information.
-// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-nodes-info.html.
+// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/6.0/cluster-nodes-info.html.
type NodesInfoService struct {
client *Client
pretty bool
@@ -89,7 +89,7 @@ func (s *NodesInfoService) buildURL() (string, url.Values, error) {
params.Set("human", fmt.Sprintf("%v", *s.human))
}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
return path, params, nil
}
@@ -113,7 +113,11 @@ func (s *NodesInfoService) Do(ctx context.Context) (*NodesInfoResponse, error) {
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/nodes_stats.go b/vendor/gopkg.in/olivere/elastic.v5/nodes_stats.go
index a955bb426..7c5f0c9d6 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/nodes_stats.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/nodes_stats.go
@@ -11,7 +11,7 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// NodesStatsService returns node statistics.
@@ -165,7 +165,7 @@ func (s *NodesStatsService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if len(s.completionFields) > 0 {
params.Set("completion_fields", strings.Join(s.completionFields, ","))
@@ -213,7 +213,11 @@ func (s *NodesStatsService) Do(ctx context.Context) (*NodesStatsResponse, error)
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
@@ -316,26 +320,21 @@ type NodesStatsDocsStats struct {
}
type NodesStatsStoreStats struct {
- Size string `json:"size"`
- SizeInBytes int64 `json:"size_in_bytes"`
- ThrottleTime string `json:"throttle_time"`
- ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"`
+ Size string `json:"size"`
+ SizeInBytes int64 `json:"size_in_bytes"`
}
type NodesStatsIndexingStats struct {
- IndexTotal int64 `json:"index_total"`
- IndexTime string `json:"index_time"`
- IndexTimeInMillis int64 `json:"index_time_in_millis"`
- IndexCurrent int64 `json:"index_current"`
- IndexFailed int64 `json:"index_failed"`
- DeleteTotal int64 `json:"delete_total"`
- DeleteTime string `json:"delete_time"`
- DeleteTimeInMillis int64 `json:"delete_time_in_millis"`
- DeleteCurrent int64 `json:"delete_current"`
- NoopUpdateTotal int64 `json:"noop_update_total"`
- IsThrottled bool `json:"is_throttled"`
- ThrottleTime string `json:"throttle_time"`
- ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"`
+ IndexTotal int64 `json:"index_total"`
+ IndexTime string `json:"index_time"`
+ IndexTimeInMillis int64 `json:"index_time_in_millis"`
+ IndexCurrent int64 `json:"index_current"`
+ IndexFailed int64 `json:"index_failed"`
+ DeleteTotal int64 `json:"delete_total"`
+ DeleteTime string `json:"delete_time"`
+ DeleteTimeInMillis int64 `json:"delete_time_in_millis"`
+ DeleteCurrent int64 `json:"delete_current"`
+ NoopUpdateTotal int64 `json:"noop_update_total"`
Types map[string]*NodesStatsIndexingStats `json:"types"` // stats for individual types
}
@@ -495,10 +494,8 @@ type NodesStatsRequestCacheStats struct {
}
type NodesStatsRecoveryStats struct {
- CurrentAsSource int `json:"current_as_source"`
- CurrentAsTarget int `json:"current_as_target"`
- ThrottleTime string `json:"throttle_time"`
- ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"`
+ CurrentAsSource int `json:"current_as_source"`
+ CurrentAsTarget int `json:"current_as_target"`
}
type NodesStatsNodeOS struct {
diff --git a/vendor/gopkg.in/olivere/elastic.v5/percolate_test.go b/vendor/gopkg.in/olivere/elastic.v5/percolate_test.go
index 43354f0b1..3b3b2efb7 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/percolate_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/percolate_test.go
@@ -10,12 +10,23 @@ import (
)
func TestPercolate(t *testing.T) {
- client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
+ //client := setupTestClientAndCreateIndex(t, SetErrorLog(log.New(os.Stdout, "", 0)))
+ //client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0)))
+ client := setupTestClientAndCreateIndex(t)
+
+ // Create query index
+ createQueryIndex, err := client.CreateIndex(testQueryIndex).Body(testQueryMapping).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if createQueryIndex == nil {
+ t.Errorf("expected result to be != nil; got: %v", createQueryIndex)
+ }
// Add a document
- _, err := client.Index().
- Index(testIndexName).
- Type("queries").
+ _, err = client.Index().
+ Index(testQueryIndex).
+ Type("doc").
Id("1").
BodyJson(`{"query":{"match":{"message":"bonsai tree"}}}`).
Refresh("wait_for").
@@ -27,9 +38,9 @@ func TestPercolate(t *testing.T) {
// Percolate should return our registered query
pq := NewPercolatorQuery().
Field("query").
- DocumentType("doctype").
+ DocumentType("doc").
Document(doctype{Message: "A new bonsai tree in the office"})
- res, err := client.Search(testIndexName).Query(pq).Do(context.TODO())
+ res, err := client.Search(testQueryIndex).Type("doc").Query(pq).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -46,8 +57,8 @@ func TestPercolate(t *testing.T) {
t.Fatalf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, got)
}
hit := res.Hits.Hits[0]
- if hit.Index != testIndexName {
- t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ if hit.Index != testQueryIndex {
+ t.Fatalf("expected SearchResult.Hits.Hit.Index = %q; got %q", testQueryIndex, hit.Index)
}
got := string(*hit.Source)
expected := `{"query":{"match":{"message":"bonsai tree"}}}`
diff --git a/vendor/gopkg.in/olivere/elastic.v5/ping.go b/vendor/gopkg.in/olivere/elastic.v5/ping.go
index a97ddeceb..5c2d34f00 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/ping.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/ping.go
@@ -86,7 +86,7 @@ func (s *PingService) Do(ctx context.Context) (*PingResult, int, error) {
params.Set("timeout", s.timeout)
}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if len(params) > 0 {
url_ += "?" + params.Encode()
diff --git a/vendor/gopkg.in/olivere/elastic.v5/put_template.go b/vendor/gopkg.in/olivere/elastic.v5/put_template.go
deleted file mode 100644
index 5bd8423b5..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/put_template.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "fmt"
- "net/url"
-
- "gopkg.in/olivere/elastic.v5/uritemplates"
-)
-
-// PutTemplateService creates or updates a search template.
-// The documentation can be found at
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-template.html.
-type PutTemplateService struct {
- client *Client
- pretty bool
- id string
- opType string
- version *int
- versionType string
- bodyJson interface{}
- bodyString string
-}
-
-// NewPutTemplateService creates a new PutTemplateService.
-func NewPutTemplateService(client *Client) *PutTemplateService {
- return &PutTemplateService{
- client: client,
- }
-}
-
-// Id is the template ID.
-func (s *PutTemplateService) Id(id string) *PutTemplateService {
- s.id = id
- return s
-}
-
-// OpType is an explicit operation type.
-func (s *PutTemplateService) OpType(opType string) *PutTemplateService {
- s.opType = opType
- return s
-}
-
-// Version is an explicit version number for concurrency control.
-func (s *PutTemplateService) Version(version int) *PutTemplateService {
- s.version = &version
- return s
-}
-
-// VersionType is a specific version type.
-func (s *PutTemplateService) VersionType(versionType string) *PutTemplateService {
- s.versionType = versionType
- return s
-}
-
-// BodyJson is the document as a JSON serializable object.
-func (s *PutTemplateService) BodyJson(body interface{}) *PutTemplateService {
- s.bodyJson = body
- return s
-}
-
-// BodyString is the document as a string.
-func (s *PutTemplateService) BodyString(body string) *PutTemplateService {
- s.bodyString = body
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *PutTemplateService) buildURL() (string, url.Values, error) {
- // Build URL
- path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{
- "id": s.id,
- })
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.version != nil {
- params.Set("version", fmt.Sprintf("%d", *s.version))
- }
- if s.versionType != "" {
- params.Set("version_type", s.versionType)
- }
- if s.opType != "" {
- params.Set("op_type", s.opType)
- }
-
- return path, params, nil
-}
-
-// Validate checks if the operation is valid.
-func (s *PutTemplateService) Validate() error {
- var invalid []string
- if s.id == "" {
- invalid = append(invalid, "Id")
- }
- if s.bodyString == "" && s.bodyJson == nil {
- invalid = append(invalid, "BodyJson")
- }
- if len(invalid) > 0 {
- return fmt.Errorf("missing required fields: %v", invalid)
- }
- return nil
-}
-
-// Do executes the operation.
-func (s *PutTemplateService) Do(ctx context.Context) (*AcknowledgedResponse, error) {
- // Check pre-conditions
- if err := s.Validate(); err != nil {
- return nil, err
- }
-
- // Get URL for request
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Setup HTTP request body
- var body interface{}
- if s.bodyJson != nil {
- body = s.bodyJson
- } else {
- body = s.bodyString
- }
-
- // Get HTTP response
- res, err := s.client.PerformRequest(ctx, "PUT", path, params, body)
- if err != nil {
- return nil, err
- }
-
- // Return operation response
- ret := new(AcknowledgedResponse)
- if err := s.client.decoder.Decode(res.Body, ret); err != nil {
- return nil, err
- }
- return ret, nil
-}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/put_template_test.go b/vendor/gopkg.in/olivere/elastic.v5/put_template_test.go
deleted file mode 100644
index 29027a185..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/put_template_test.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestSearchTemplatesLifecycle(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
-
- // Template
- tmpl := `{"template":{"query":{"match":{"title":"{{query_string}}"}}}}`
-
- // Create template
- cresp, err := client.PutTemplate().Id("elastic-test").BodyString(tmpl).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if cresp == nil {
- t.Fatalf("expected response != nil; got: %v", cresp)
- }
- if !cresp.Acknowledged {
- t.Errorf("expected acknowledged = %v; got: %v", true, cresp.Acknowledged)
- }
-
- // Get template
- resp, err := client.GetTemplate().Id("elastic-test").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatalf("expected response != nil; got: %v", resp)
- }
- if resp.Template == "" {
- t.Errorf("expected template != %q; got: %q", "", resp.Template)
- }
-
- // Delete template
- dresp, err := client.DeleteTemplate().Id("elastic-test").Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if dresp == nil {
- t.Fatalf("expected response != nil; got: %v", dresp)
- }
- if !dresp.Acknowledged {
- t.Fatalf("expected acknowledged = %v; got: %v", true, dresp.Acknowledged)
- }
-}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/recipes/bulk_insert/bulk_insert.go b/vendor/gopkg.in/olivere/elastic.v5/recipes/bulk_insert/bulk_insert.go
index 5a7909095..5a8ab39d0 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/recipes/bulk_insert/bulk_insert.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/recipes/bulk_insert/bulk_insert.go
@@ -11,7 +11,7 @@
// The number of documents after which a commit happens can be specified
// via the "bulk-size" flag.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-bulk.html
// for details on the Bulk API in Elasticsearch.
//
// Example
@@ -35,7 +35,7 @@ import (
"time"
"golang.org/x/sync/errgroup"
- "gopkg.in/olivere/elastic.v5"
+ "github.com/olivere/elastic"
)
func main() {
diff --git a/vendor/gopkg.in/olivere/elastic.v5/recipes/connect/connect.go b/vendor/gopkg.in/olivere/elastic.v5/recipes/connect/connect.go
index 156658d36..baff6c114 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/recipes/connect/connect.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/recipes/connect/connect.go
@@ -16,7 +16,7 @@ import (
"fmt"
"log"
- "gopkg.in/olivere/elastic.v5"
+ "github.com/olivere/elastic"
)
func main() {
diff --git a/vendor/gopkg.in/olivere/elastic.v5/recipes/sliced_scroll/sliced_scroll.go b/vendor/gopkg.in/olivere/elastic.v5/recipes/sliced_scroll/sliced_scroll.go
index e59ca562d..d753a61cb 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/recipes/sliced_scroll/sliced_scroll.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/recipes/sliced_scroll/sliced_scroll.go
@@ -13,7 +13,7 @@
// The speedup of sliced scrolling can be significant but is very
// dependent on the specific use case.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-scroll.html#sliced-scroll
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-scroll.html#sliced-scroll
// for details on sliced scrolling in Elasticsearch.
//
// Example
@@ -36,7 +36,7 @@ import (
"time"
"golang.org/x/sync/errgroup"
- "gopkg.in/olivere/elastic.v5"
+ "github.com/olivere/elastic"
)
func main() {
diff --git a/vendor/gopkg.in/olivere/elastic.v5/reindex.go b/vendor/gopkg.in/olivere/elastic.v5/reindex.go
index 4650fb18b..35440fa80 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/reindex.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/reindex.go
@@ -11,7 +11,7 @@ import (
)
// ReindexService is a method to copy documents from one index to another.
-// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/5.0/docs-reindex.html.
+// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-reindex.html.
type ReindexService struct {
client *Client
pretty bool
@@ -168,7 +168,7 @@ func (s *ReindexService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.refresh != "" {
params.Set("refresh", s.refresh)
@@ -267,7 +267,12 @@ func (s *ReindexService) Do(ctx context.Context) (*BulkIndexByScrollResponse, er
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
@@ -309,7 +314,12 @@ func (s *ReindexService) DoAsync(ctx context.Context) (*StartTaskResult, error)
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
@@ -568,7 +578,7 @@ func (ri *ReindexRemoteInfo) Source() (interface{}, error) {
// ReindexDestination is the destination of a Reindex API call.
// It is basically the meta data of a BulkIndexRequest.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-reindex.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-reindex.html
// fsourcer details.
type ReindexDestination struct {
index string
@@ -627,7 +637,7 @@ func (r *ReindexDestination) Parent(parent string) *ReindexDestination {
// OpType specifies if this request should follow create-only or upsert
// behavior. This follows the OpType of the standard document index API.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-index_.html#operation-type
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-index_.html#operation-type
// for details.
func (r *ReindexDestination) OpType(opType string) *ReindexDestination {
r.opType = opType
diff --git a/vendor/gopkg.in/olivere/elastic.v5/reindex_test.go b/vendor/gopkg.in/olivere/elastic.v5/reindex_test.go
index 2ab604089..fadf4bfc7 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/reindex_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/reindex_test.go
@@ -166,7 +166,7 @@ func TestReindexSourceWithProceedOnVersionConflict(t *testing.T) {
func TestReindexSourceWithQuery(t *testing.T) {
client := setupTestClient(t)
- src := NewReindexSource().Index("twitter").Type("tweet").Query(NewTermQuery("user", "olivere"))
+ src := NewReindexSource().Index("twitter").Type("doc").Query(NewTermQuery("user", "olivere"))
dst := NewReindexDestination().Index("new_twitter")
out, err := client.Reindex().Source(src).Destination(dst).getBody()
if err != nil {
@@ -177,7 +177,7 @@ func TestReindexSourceWithQuery(t *testing.T) {
t.Fatal(err)
}
got := string(b)
- want := `{"dest":{"index":"new_twitter"},"source":{"index":"twitter","query":{"term":{"user":"olivere"}},"type":"tweet"}}`
+ want := `{"dest":{"index":"new_twitter"},"source":{"index":"twitter","query":{"term":{"user":"olivere"}},"type":"doc"}}`
if got != want {
t.Fatalf("\ngot %s\nwant %s", got, want)
}
@@ -185,7 +185,7 @@ func TestReindexSourceWithQuery(t *testing.T) {
func TestReindexSourceWithMultipleSourceIndicesAndTypes(t *testing.T) {
client := setupTestClient(t)
- src := NewReindexSource().Index("twitter", "blog").Type("tweet", "post")
+ src := NewReindexSource().Index("twitter", "blog").Type("doc", "post")
dst := NewReindexDestination().Index("all_together")
out, err := client.Reindex().Source(src).Destination(dst).getBody()
if err != nil {
@@ -196,7 +196,7 @@ func TestReindexSourceWithMultipleSourceIndicesAndTypes(t *testing.T) {
t.Fatal(err)
}
got := string(b)
- want := `{"dest":{"index":"all_together"},"source":{"index":["twitter","blog"],"type":["tweet","post"]}}`
+ want := `{"dest":{"index":"all_together"},"source":{"index":["twitter","blog"],"type":["doc","post"]}}`
if got != want {
t.Fatalf("\ngot %s\nwant %s", got, want)
}
@@ -235,7 +235,7 @@ func TestReindexSourceWithScript(t *testing.T) {
t.Fatal(err)
}
got := string(b)
- want := `{"dest":{"index":"new_twitter","version_type":"external"},"script":{"inline":"if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}"},"source":{"index":"twitter"}}`
+ want := `{"dest":{"index":"new_twitter","version_type":"external"},"script":{"source":"if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}"},"source":{"index":"twitter"}}`
if got != want {
t.Fatalf("\ngot %s\nwant %s", got, want)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/request.go b/vendor/gopkg.in/olivere/elastic.v5/request.go
index da5a7216e..87d191965 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/request.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/request.go
@@ -6,7 +6,6 @@ package elastic
import (
"bytes"
- "compress/gzip"
"encoding/json"
"io"
"io/ioutil"
@@ -35,21 +34,13 @@ func (r *Request) SetBasicAuth(username, password string) {
((*http.Request)(r)).SetBasicAuth(username, password)
}
-// SetBody encodes the body in the request. Optionally, it performs GZIP compression.
-func (r *Request) SetBody(body interface{}, gzipCompress bool) error {
+// SetBody encodes the body in the request.
+func (r *Request) SetBody(body interface{}) error {
switch b := body.(type) {
case string:
- if gzipCompress {
- return r.setBodyGzip(b)
- } else {
- return r.setBodyString(b)
- }
+ return r.setBodyString(b)
default:
- if gzipCompress {
- return r.setBodyGzip(body)
- } else {
- return r.setBodyJson(body)
- }
+ return r.setBodyJson(body)
}
}
@@ -69,42 +60,6 @@ func (r *Request) setBodyString(body string) error {
return r.setBodyReader(strings.NewReader(body))
}
-// setBodyGzip gzip's the body. It accepts both strings and structs as body.
-// The latter will be encoded via json.Marshal.
-func (r *Request) setBodyGzip(body interface{}) error {
- switch b := body.(type) {
- case string:
- buf := new(bytes.Buffer)
- w := gzip.NewWriter(buf)
- if _, err := w.Write([]byte(b)); err != nil {
- return err
- }
- if err := w.Close(); err != nil {
- return err
- }
- r.Header.Add("Content-Encoding", "gzip")
- r.Header.Add("Vary", "Accept-Encoding")
- return r.setBodyReader(bytes.NewReader(buf.Bytes()))
- default:
- data, err := json.Marshal(b)
- if err != nil {
- return err
- }
- buf := new(bytes.Buffer)
- w := gzip.NewWriter(buf)
- if _, err := w.Write(data); err != nil {
- return err
- }
- if err := w.Close(); err != nil {
- return err
- }
- r.Header.Add("Content-Encoding", "gzip")
- r.Header.Add("Vary", "Accept-Encoding")
- r.Header.Set("Content-Type", "application/json")
- return r.setBodyReader(bytes.NewReader(buf.Bytes()))
- }
-}
-
// setBodyReader writes the body from an io.Reader.
func (r *Request) setBodyReader(body io.Reader) error {
rc, ok := body.(io.ReadCloser)
diff --git a/vendor/gopkg.in/olivere/elastic.v5/request_test.go b/vendor/gopkg.in/olivere/elastic.v5/request_test.go
index d5ae4f800..04fbecbab 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/request_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/request_test.go
@@ -29,22 +29,7 @@ func BenchmarkRequestSetBodyString(b *testing.B) {
}
for i := 0; i < b.N; i++ {
body := `{"query":{"match_all":{}}}`
- err = req.SetBody(body, false)
- if err != nil {
- b.Fatal(err)
- }
- }
- testReq = req
-}
-
-func BenchmarkRequestSetBodyStringGzip(b *testing.B) {
- req, err := NewRequest("GET", "/")
- if err != nil {
- b.Fatal(err)
- }
- for i := 0; i < b.N; i++ {
- body := `{"query":{"match_all":{}}}`
- err = req.SetBody(body, true)
+ err = req.SetBody(body)
if err != nil {
b.Fatal(err)
}
@@ -59,22 +44,7 @@ func BenchmarkRequestSetBodyBytes(b *testing.B) {
}
for i := 0; i < b.N; i++ {
body := []byte(`{"query":{"match_all":{}}}`)
- err = req.SetBody(body, false)
- if err != nil {
- b.Fatal(err)
- }
- }
- testReq = req
-}
-
-func BenchmarkRequestSetBodyBytesGzip(b *testing.B) {
- req, err := NewRequest("GET", "/")
- if err != nil {
- b.Fatal(err)
- }
- for i := 0; i < b.N; i++ {
- body := []byte(`{"query":{"match_all":{}}}`)
- err = req.SetBody(body, true)
+ err = req.SetBody(body)
if err != nil {
b.Fatal(err)
}
@@ -93,26 +63,7 @@ func BenchmarkRequestSetBodyMap(b *testing.B) {
"match_all": map[string]interface{}{},
},
}
- err = req.SetBody(body, false)
- if err != nil {
- b.Fatal(err)
- }
- }
- testReq = req
-}
-
-func BenchmarkRequestSetBodyMapGzip(b *testing.B) {
- req, err := NewRequest("GET", "/")
- if err != nil {
- b.Fatal(err)
- }
- for i := 0; i < b.N; i++ {
- body := map[string]interface{}{
- "query": map[string]interface{}{
- "match_all": map[string]interface{}{},
- },
- }
- err = req.SetBody(body, true)
+ err = req.SetBody(body)
if err != nil {
b.Fatal(err)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/retrier_test.go b/vendor/gopkg.in/olivere/elastic.v5/retrier_test.go
index 100a17838..c1c5ff524 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/retrier_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/retrier_test.go
@@ -65,7 +65,10 @@ func TestRetrier(t *testing.T) {
t.Fatal(err)
}
- res, err := client.PerformRequest(context.TODO(), "GET", "/fail", nil, nil)
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/fail",
+ })
if err == nil {
t.Fatal("expected error")
}
@@ -107,7 +110,10 @@ func TestRetrierWithError(t *testing.T) {
t.Fatal(err)
}
- res, err := client.PerformRequest(context.TODO(), "GET", "/fail", nil, nil)
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/fail",
+ })
if err != kaboom {
t.Fatalf("expected %v, got %v", kaboom, err)
}
@@ -121,3 +127,48 @@ func TestRetrierWithError(t *testing.T) {
t.Errorf("expected %d Retrier calls; got: %d", 1, retrier.N)
}
}
+
+func TestRetrierOnPerformRequest(t *testing.T) {
+ var numFailedReqs int
+ fail := func(r *http.Request) (*http.Response, error) {
+ numFailedReqs += 1
+ //return &http.Response{Request: r, StatusCode: 400}, nil
+ return nil, errors.New("request failed")
+ }
+
+ tr := &failingTransport{path: "/fail", fail: fail}
+ httpClient := &http.Client{Transport: tr}
+
+ defaultRetrier := &testRetrier{
+ Retrier: NewStopRetrier(),
+ }
+ requestRetrier := &testRetrier{
+ Retrier: NewStopRetrier(),
+ }
+
+ client, err := NewClient(
+ SetHttpClient(httpClient),
+ SetHealthcheck(false),
+ SetRetrier(defaultRetrier))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := client.PerformRequest(context.TODO(), PerformRequestOptions{
+ Method: "GET",
+ Path: "/fail",
+ Retrier: requestRetrier,
+ })
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ if res != nil {
+ t.Fatal("expected no response")
+ }
+ if want, have := int64(0), defaultRetrier.N; want != have {
+ t.Errorf("defaultRetrier: expected %d calls; got: %d", want, have)
+ }
+ if want, have := int64(1), requestRetrier.N; want != have {
+ t.Errorf("requestRetrier: expected %d calls; got: %d", want, have)
+ }
+}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0.sh
deleted file mode 100755
index e160e3a9b..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0.sh
+++ /dev/null
@@ -1 +0,0 @@
-docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/etc:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.0.0 elasticsearch
diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.1.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.1.sh
deleted file mode 100755
index 0baf08bfe..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.1.sh
+++ /dev/null
@@ -1 +0,0 @@
-docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/etc:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.0.1 elasticsearch
diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.1.1.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.1.1.sh
deleted file mode 100755
index 707d8b769..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.1.1.sh
+++ /dev/null
@@ -1 +0,0 @@
-docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/etc:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.1.1 elasticsearch
diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.1.2.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.1.2.sh
deleted file mode 100755
index 94b4a8b25..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.1.2.sh
+++ /dev/null
@@ -1 +0,0 @@
-docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/etc:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.1.2 elasticsearch
diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.2.0.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.2.0.sh
deleted file mode 100755
index a5eae02aa..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.2.0.sh
+++ /dev/null
@@ -1 +0,0 @@
-docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/etc:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.2.0 elasticsearch
diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.2.1.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.2.1.sh
deleted file mode 100755
index c8b390daf..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.2.1.sh
+++ /dev/null
@@ -1 +0,0 @@
-docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/etc:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.2.1 elasticsearch
diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.2.2.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.2.2.sh
deleted file mode 100755
index d56b52769..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.2.2.sh
+++ /dev/null
@@ -1 +0,0 @@
-docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/etc:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.2.2 elasticsearch
diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.3.0.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.3.0.sh
deleted file mode 100755
index ce392a163..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.3.0.sh
+++ /dev/null
@@ -1 +0,0 @@
-docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/etc:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.3.0 elasticsearch
diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.4.0.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.4.0.sh
deleted file mode 100755
index 5b3eb0b5e..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.4.0.sh
+++ /dev/null
@@ -1 +0,0 @@
-docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/etc:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.4.0 elasticsearch
diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.4.1.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.4.1.sh
deleted file mode 100755
index 21737cbc5..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.4.1.sh
+++ /dev/null
@@ -1 +0,0 @@
-docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/etc:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.4.1 elasticsearch
diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.5.0.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.5.0.sh
deleted file mode 100755
index 41749a9ba..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.5.0.sh
+++ /dev/null
@@ -1 +0,0 @@
-docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/etc:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.5.0 elasticsearch
diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.5.1.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.5.1.sh
deleted file mode 100755
index 343a605ba..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.5.1.sh
+++ /dev/null
@@ -1 +0,0 @@
-docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/etc:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.5.1 elasticsearch
diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.6.3.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.6.3.sh
deleted file mode 100755
index 6a9864668..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.6.3.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-VERSION=5.6.3
-docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/etc:/usr/share/elasticsearch/config" -e "bootstrap.memory_lock=true" -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" docker.elastic.co/elasticsearch/elasticsearch:$VERSION elasticsearch -Expack.security.enabled=false -Escript.inline=true -Escript.stored=true -Escript.file=true
diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es.sh
new file mode 100755
index 000000000..1f4a851d4
--- /dev/null
+++ b/vendor/gopkg.in/olivere/elastic.v5/run-es.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+VERSION=${VERSION:=6.1.2}
+docker run --rm -p 9200:9200 -e "http.host=0.0.0.0" -e "transport.host=127.0.0.1" -e "bootstrap.memory_lock=true" -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" docker.elastic.co/elasticsearch/elasticsearch:$VERSION elasticsearch -Expack.security.enabled=false -Enetwork.host=_local_,_site_ -Enetwork.publish_host=_local_
diff --git a/vendor/gopkg.in/olivere/elastic.v5/script.go b/vendor/gopkg.in/olivere/elastic.v5/script.go
index b771c0547..273473950 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/script.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/script.go
@@ -9,7 +9,7 @@ import "errors"
// Script holds all the paramaters necessary to compile or find in cache
// and then execute a script.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/modules-scripting.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-scripting.html
// for details of scripting.
type Script struct {
script string
@@ -22,26 +22,21 @@ type Script struct {
func NewScript(script string) *Script {
return &Script{
script: script,
- typ: "", // default type is "inline"
+ typ: "inline",
params: make(map[string]interface{}),
}
}
-// NewScriptInline creates and initializes a new Script of type "inline".
+// NewScriptInline creates and initializes a new inline script, i.e. code.
func NewScriptInline(script string) *Script {
return NewScript(script).Type("inline")
}
-// NewScriptId creates and initializes a new Script of type "id".
-func NewScriptId(script string) *Script {
+// NewScriptStored creates and initializes a new stored script.
+func NewScriptStored(script string) *Script {
return NewScript(script).Type("id")
}
-// NewScriptFile creates and initializes a new Script of type "file".
-func NewScriptFile(script string) *Script {
- return NewScript(script).Type("file")
-}
-
// Script is either the cache key of the script to be compiled/executed
// or the actual script source code for inline scripts. For indexed
// scripts this is the id used in the request. For file scripts this is
@@ -51,7 +46,7 @@ func (s *Script) Script(script string) *Script {
return s
}
-// Type sets the type of script: "inline", "id", or "file".
+// Type sets the type of script: "inline" or "id".
func (s *Script) Type(typ string) *Script {
s.typ = typ
return s
@@ -60,7 +55,7 @@ func (s *Script) Type(typ string) *Script {
// Lang sets the language of the script. Permitted values are "groovy",
// "expression", "mustache", "mvel" (default), "javascript", "python".
// To use certain languages, you need to configure your server and/or
-// add plugins. See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/modules-scripting.html
+// add plugins. See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-scripting.html
// for details.
func (s *Script) Lang(lang string) *Script {
s.lang = lang
@@ -88,10 +83,11 @@ func (s *Script) Source() (interface{}, error) {
return s.script, nil
}
source := make(map[string]interface{})
- if s.typ == "" {
- source["inline"] = s.script
+ // Beginning with 6.0, the type can only be "source" or "id"
+ if s.typ == "" || s.typ == "inline" {
+ source["source"] = s.script
} else {
- source[s.typ] = s.script
+ source["id"] = s.script
}
if s.lang != "" {
source["lang"] = s.lang
diff --git a/vendor/gopkg.in/olivere/elastic.v5/script_test.go b/vendor/gopkg.in/olivere/elastic.v5/script_test.go
index 355e13a06..aa475d7eb 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/script_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/script_test.go
@@ -20,7 +20,7 @@ func TestScriptingDefault(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `"doc['field'].value * 2"`
+ expected := `{"source":"doc['field'].value * 2"}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
@@ -37,14 +37,14 @@ func TestScriptingInline(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"inline":"doc['field'].value * factor","params":{"factor":2}}`
+ expected := `{"params":{"factor":2},"source":"doc['field'].value * factor"}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
}
-func TestScriptingId(t *testing.T) {
- builder := NewScriptId("script-with-id").Param("factor", 2.0)
+func TestScriptingStored(t *testing.T) {
+ builder := NewScriptStored("script-with-id").Param("factor", 2.0)
src, err := builder.Source()
if err != nil {
t.Fatal(err)
@@ -59,20 +59,3 @@ func TestScriptingId(t *testing.T) {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
}
-
-func TestScriptingFile(t *testing.T) {
- builder := NewScriptFile("script-file").Param("factor", 2.0).Lang("groovy")
- src, err := builder.Source()
- if err != nil {
- t.Fatal(err)
- }
- data, err := json.Marshal(src)
- if err != nil {
- t.Fatalf("marshaling to JSON failed: %v", err)
- }
- got := string(data)
- expected := `{"file":"script-file","lang":"groovy","params":{"factor":2}}`
- if got != expected {
- t.Errorf("expected\n%s\n,got:\n%s", expected, got)
- }
-}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/scroll.go b/vendor/gopkg.in/olivere/elastic.v5/scroll.go
index a075ea61f..ac51a8c00 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/scroll.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/scroll.go
@@ -12,7 +12,7 @@ import (
"strings"
"sync"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
const (
@@ -23,6 +23,7 @@ const (
// ScrollService iterates over pages of search results from Elasticsearch.
type ScrollService struct {
client *Client
+ retrier Retrier
indices []string
types []string
keepAlive string
@@ -50,6 +51,13 @@ func NewScrollService(client *Client) *ScrollService {
return builder
}
+// Retrier allows to set specific retry logic for this ScrollService.
+// If not specified, it will use the client's default retrier.
+func (s *ScrollService) Retrier(retrier Retrier) *ScrollService {
+ s.retrier = retrier
+ return s
+}
+
// Index sets the name of one or more indices to iterate over.
func (s *ScrollService) Index(indices ...string) *ScrollService {
if s.indices == nil {
@@ -117,7 +125,7 @@ func (s *ScrollService) Query(query Query) *ScrollService {
// PostFilter is executed as the last filter. It only affects the
// search hits but not facets. See
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-post-filter.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-post-filter.html
// for details.
func (s *ScrollService) PostFilter(postFilter Query) *ScrollService {
s.ss = s.ss.PostFilter(postFilter)
@@ -126,7 +134,7 @@ func (s *ScrollService) PostFilter(postFilter Query) *ScrollService {
// Slice allows slicing the scroll request into several batches.
// This is supported in Elasticsearch 5.0 or later.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-scroll.html#sliced-scroll
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-scroll.html#sliced-scroll
// for details.
func (s *ScrollService) Slice(sliceQuery Query) *ScrollService {
s.ss = s.ss.Slice(sliceQuery)
@@ -147,7 +155,7 @@ func (s *ScrollService) FetchSourceContext(fetchSourceContext *FetchSourceContex
}
// Version can be set to true to return a version for each search hit.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-version.html.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-version.html.
func (s *ScrollService) Version(version bool) *ScrollService {
s.ss = s.ss.Version(version)
return s
@@ -258,7 +266,13 @@ func (s *ScrollService) Clear(ctx context.Context) error {
ScrollId: []string{scrollId},
}
- _, err := s.client.PerformRequest(ctx, "DELETE", path, params, body)
+ _, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "DELETE",
+ Path: path,
+ Params: params,
+ Body: body,
+ Retrier: s.retrier,
+ })
if err != nil {
return err
}
@@ -283,7 +297,13 @@ func (s *ScrollService) first(ctx context.Context) (*SearchResult, error) {
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ Retrier: s.retrier,
+ })
if err != nil {
return nil, err
}
@@ -330,7 +350,7 @@ func (s *ScrollService) buildFirstURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.size != nil && *s.size > 0 {
params.Set("size", fmt.Sprintf("%d", *s.size))
@@ -397,7 +417,13 @@ func (s *ScrollService) next(ctx context.Context) (*SearchResult, error) {
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ Retrier: s.retrier,
+ })
if err != nil {
return nil, err
}
@@ -423,7 +449,7 @@ func (s *ScrollService) buildNextURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
return path, params, nil
diff --git a/vendor/gopkg.in/olivere/elastic.v5/scroll_test.go b/vendor/gopkg.in/olivere/elastic.v5/scroll_test.go
index 9c9037beb..c94e5f92f 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/scroll_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/scroll_test.go
@@ -20,17 +20,17 @@ func TestScroll(t *testing.T) {
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -113,17 +113,17 @@ func TestScrollWithQueryAndSort(t *testing.T) {
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -200,17 +200,17 @@ func TestScrollWithBody(t *testing.T) {
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun.", Retweets: 3}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search.go b/vendor/gopkg.in/olivere/elastic.v5/search.go
index 7121d5545..12d51bf1f 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search.go
@@ -12,7 +12,7 @@ import (
"reflect"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// Search for documents in Elasticsearch.
@@ -60,7 +60,7 @@ func (s *SearchService) Source(source interface{}) *SearchService {
// FilterPath allows reducing the response, a mechanism known as
// response filtering and described here:
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.6/common-options.html#common-options-response-filtering.
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/common-options.html#common-options-response-filtering.
func (s *SearchService) FilterPath(filterPath ...string) *SearchService {
s.filterPath = append(s.filterPath, filterPath...)
return s
@@ -113,7 +113,7 @@ func (s *SearchService) TimeoutInMillis(timeoutInMillis int) *SearchService {
// SearchType sets the search operation type. Valid values are:
// "query_then_fetch", "query_and_fetch", "dfs_query_then_fetch",
// "dfs_query_and_fetch", "count", "scan".
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.6/search-request-search-type.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-search-type.html
// for details.
func (s *SearchService) SearchType(searchType string) *SearchService {
s.searchType = searchType
@@ -268,10 +268,17 @@ func (s *SearchService) StoredFields(fields ...string) *SearchService {
return s
}
+// TrackScores is applied when sorting and controls if scores will be
+// tracked as well. Defaults to false.
+func (s *SearchService) TrackScores(trackScores bool) *SearchService {
+ s.searchSource = s.searchSource.TrackScores(trackScores)
+ return s
+}
+
// SearchAfter allows a different form of pagination by using a live cursor,
// using the results of the previous page to help the retrieval of the next.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.6/search-request-search-after.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-search-after.html
func (s *SearchService) SearchAfter(sortValues ...interface{}) *SearchService {
s.searchSource = s.searchSource.SearchAfter(sortValues...)
return s
@@ -385,7 +392,12 @@ func (s *SearchService) Do(ctx context.Context) (*SearchResult, error) {
}
body = src
}
- res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
@@ -422,7 +434,8 @@ func (r *SearchResult) TotalHits() int64 {
// Each is a utility function to iterate over all hits. It saves you from
// checking for nil values. Notice that Each will ignore errors in
-// serializing JSON.
+// serializing JSON and hits with empty/nil _source will get an empty
+// value
func (r *SearchResult) Each(typ reflect.Type) []interface{} {
if r.Hits == nil || r.Hits.Hits == nil || len(r.Hits.Hits) == 0 {
return nil
@@ -430,6 +443,10 @@ func (r *SearchResult) Each(typ reflect.Type) []interface{} {
var slice []interface{}
for _, hit := range r.Hits.Hits {
v := reflect.New(typ).Elem()
+ if hit.Source == nil {
+ slice = append(slice, v.Interface())
+ continue
+ }
if err := json.Unmarshal(*hit.Source, v.Addr().Interface()); err == nil {
slice = append(slice, v.Interface())
}
@@ -473,7 +490,7 @@ type SearchHitInnerHits struct {
}
// SearchExplanation explains how the score for a hit was computed.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.6/search-request-explain.html.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-explain.html.
type SearchExplanation struct {
Value float64 `json:"value"` // e.g. 1.0
Description string `json:"description"` // e.g. "boost" or "ConstantScore(*:*), product of:"
@@ -483,11 +500,11 @@ type SearchExplanation struct {
// Suggest
// SearchSuggest is a map of suggestions.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.6/search-suggesters.html.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters.html.
type SearchSuggest map[string][]SearchSuggestion
// SearchSuggestion is a single search suggestion.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.6/search-suggesters.html.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters.html.
type SearchSuggestion struct {
Text string `json:"text"`
Offset int `json:"offset"`
@@ -496,7 +513,7 @@ type SearchSuggestion struct {
}
// SearchSuggestionOption is an option of a SearchSuggestion.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.6/search-suggesters.html.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters.html.
type SearchSuggestionOption struct {
Text string `json:"text"`
Index string `json:"_index"`
@@ -559,6 +576,6 @@ type ProfileResult struct {
// Highlighting
// SearchHitHighlight is the highlight information of a search hit.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.6/search-request-highlighting.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-highlighting.html
// for a general discussion of highlighting.
type SearchHitHighlight map[string][]string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs.go
index a43f8ddcc..c5082b2b1 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs.go
@@ -13,7 +13,7 @@ import (
// analytic information over a set of documents. It is
// (in many senses) the follow-up of facets in Elasticsearch.
// For more details about aggregations, visit:
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations.html
type Aggregation interface {
// Source returns a JSON-serializable aggregation that is a fragment
// of the request sent to Elasticsearch.
@@ -24,7 +24,7 @@ type Aggregation interface {
type Aggregations map[string]*json.RawMessage
// Min returns min aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-min-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-min-aggregation.html
func (a Aggregations) Min(name string) (*AggregationValueMetric, bool) {
if raw, found := a[name]; found {
agg := new(AggregationValueMetric)
@@ -39,7 +39,7 @@ func (a Aggregations) Min(name string) (*AggregationValueMetric, bool) {
}
// Max returns max aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-max-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-max-aggregation.html
func (a Aggregations) Max(name string) (*AggregationValueMetric, bool) {
if raw, found := a[name]; found {
agg := new(AggregationValueMetric)
@@ -54,7 +54,7 @@ func (a Aggregations) Max(name string) (*AggregationValueMetric, bool) {
}
// Sum returns sum aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-sum-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-sum-aggregation.html
func (a Aggregations) Sum(name string) (*AggregationValueMetric, bool) {
if raw, found := a[name]; found {
agg := new(AggregationValueMetric)
@@ -69,7 +69,7 @@ func (a Aggregations) Sum(name string) (*AggregationValueMetric, bool) {
}
// Avg returns average aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-avg-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-avg-aggregation.html
func (a Aggregations) Avg(name string) (*AggregationValueMetric, bool) {
if raw, found := a[name]; found {
agg := new(AggregationValueMetric)
@@ -84,7 +84,7 @@ func (a Aggregations) Avg(name string) (*AggregationValueMetric, bool) {
}
// ValueCount returns value-count aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-valuecount-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-valuecount-aggregation.html
func (a Aggregations) ValueCount(name string) (*AggregationValueMetric, bool) {
if raw, found := a[name]; found {
agg := new(AggregationValueMetric)
@@ -99,7 +99,7 @@ func (a Aggregations) ValueCount(name string) (*AggregationValueMetric, bool) {
}
// Cardinality returns cardinality aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-cardinality-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-cardinality-aggregation.html
func (a Aggregations) Cardinality(name string) (*AggregationValueMetric, bool) {
if raw, found := a[name]; found {
agg := new(AggregationValueMetric)
@@ -114,7 +114,7 @@ func (a Aggregations) Cardinality(name string) (*AggregationValueMetric, bool) {
}
// Stats returns stats aggregation results.
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-stats-aggregation.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-stats-aggregation.html
func (a Aggregations) Stats(name string) (*AggregationStatsMetric, bool) {
if raw, found := a[name]; found {
agg := new(AggregationStatsMetric)
@@ -129,7 +129,7 @@ func (a Aggregations) Stats(name string) (*AggregationStatsMetric, bool) {
}
// ExtendedStats returns extended stats aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-extendedstats-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-extendedstats-aggregation.html
func (a Aggregations) ExtendedStats(name string) (*AggregationExtendedStatsMetric, bool) {
if raw, found := a[name]; found {
agg := new(AggregationExtendedStatsMetric)
@@ -144,7 +144,7 @@ func (a Aggregations) ExtendedStats(name string) (*AggregationExtendedStatsMetri
}
// MatrixStats returns matrix stats aggregation results.
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-matrix-stats-aggregation.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-matrix-stats-aggregation.html
func (a Aggregations) MatrixStats(name string) (*AggregationMatrixStats, bool) {
if raw, found := a[name]; found {
agg := new(AggregationMatrixStats)
@@ -159,7 +159,7 @@ func (a Aggregations) MatrixStats(name string) (*AggregationMatrixStats, bool) {
}
// Percentiles returns percentiles results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-percentile-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-percentile-aggregation.html
func (a Aggregations) Percentiles(name string) (*AggregationPercentilesMetric, bool) {
if raw, found := a[name]; found {
agg := new(AggregationPercentilesMetric)
@@ -174,7 +174,7 @@ func (a Aggregations) Percentiles(name string) (*AggregationPercentilesMetric, b
}
// PercentileRanks returns percentile ranks results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-percentile-rank-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-percentile-rank-aggregation.html
func (a Aggregations) PercentileRanks(name string) (*AggregationPercentilesMetric, bool) {
if raw, found := a[name]; found {
agg := new(AggregationPercentilesMetric)
@@ -189,7 +189,7 @@ func (a Aggregations) PercentileRanks(name string) (*AggregationPercentilesMetri
}
// TopHits returns top-hits aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-top-hits-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-top-hits-aggregation.html
func (a Aggregations) TopHits(name string) (*AggregationTopHitsMetric, bool) {
if raw, found := a[name]; found {
agg := new(AggregationTopHitsMetric)
@@ -204,7 +204,7 @@ func (a Aggregations) TopHits(name string) (*AggregationTopHitsMetric, bool) {
}
// Global returns global results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-global-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-global-aggregation.html
func (a Aggregations) Global(name string) (*AggregationSingleBucket, bool) {
if raw, found := a[name]; found {
agg := new(AggregationSingleBucket)
@@ -219,7 +219,7 @@ func (a Aggregations) Global(name string) (*AggregationSingleBucket, bool) {
}
// Filter returns filter results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-filter-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-filter-aggregation.html
func (a Aggregations) Filter(name string) (*AggregationSingleBucket, bool) {
if raw, found := a[name]; found {
agg := new(AggregationSingleBucket)
@@ -234,7 +234,7 @@ func (a Aggregations) Filter(name string) (*AggregationSingleBucket, bool) {
}
// Filters returns filters results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-filters-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-filters-aggregation.html
func (a Aggregations) Filters(name string) (*AggregationBucketFilters, bool) {
if raw, found := a[name]; found {
agg := new(AggregationBucketFilters)
@@ -249,7 +249,7 @@ func (a Aggregations) Filters(name string) (*AggregationBucketFilters, bool) {
}
// Missing returns missing results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-missing-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-missing-aggregation.html
func (a Aggregations) Missing(name string) (*AggregationSingleBucket, bool) {
if raw, found := a[name]; found {
agg := new(AggregationSingleBucket)
@@ -264,7 +264,7 @@ func (a Aggregations) Missing(name string) (*AggregationSingleBucket, bool) {
}
// Nested returns nested results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-nested-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-nested-aggregation.html
func (a Aggregations) Nested(name string) (*AggregationSingleBucket, bool) {
if raw, found := a[name]; found {
agg := new(AggregationSingleBucket)
@@ -279,7 +279,7 @@ func (a Aggregations) Nested(name string) (*AggregationSingleBucket, bool) {
}
// ReverseNested returns reverse-nested results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-reverse-nested-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-reverse-nested-aggregation.html
func (a Aggregations) ReverseNested(name string) (*AggregationSingleBucket, bool) {
if raw, found := a[name]; found {
agg := new(AggregationSingleBucket)
@@ -294,7 +294,7 @@ func (a Aggregations) ReverseNested(name string) (*AggregationSingleBucket, bool
}
// Children returns children results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-children-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-children-aggregation.html
func (a Aggregations) Children(name string) (*AggregationSingleBucket, bool) {
if raw, found := a[name]; found {
agg := new(AggregationSingleBucket)
@@ -309,7 +309,7 @@ func (a Aggregations) Children(name string) (*AggregationSingleBucket, bool) {
}
// Terms returns terms aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-terms-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-terms-aggregation.html
func (a Aggregations) Terms(name string) (*AggregationBucketKeyItems, bool) {
if raw, found := a[name]; found {
agg := new(AggregationBucketKeyItems)
@@ -324,7 +324,7 @@ func (a Aggregations) Terms(name string) (*AggregationBucketKeyItems, bool) {
}
// SignificantTerms returns significant terms aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-significantterms-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html
func (a Aggregations) SignificantTerms(name string) (*AggregationBucketSignificantTerms, bool) {
if raw, found := a[name]; found {
agg := new(AggregationBucketSignificantTerms)
@@ -339,7 +339,7 @@ func (a Aggregations) SignificantTerms(name string) (*AggregationBucketSignifica
}
// Sampler returns sampler aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-sampler-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-sampler-aggregation.html
func (a Aggregations) Sampler(name string) (*AggregationSingleBucket, bool) {
if raw, found := a[name]; found {
agg := new(AggregationSingleBucket)
@@ -354,7 +354,7 @@ func (a Aggregations) Sampler(name string) (*AggregationSingleBucket, bool) {
}
// Range returns range aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-range-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-range-aggregation.html
func (a Aggregations) Range(name string) (*AggregationBucketRangeItems, bool) {
if raw, found := a[name]; found {
agg := new(AggregationBucketRangeItems)
@@ -369,7 +369,7 @@ func (a Aggregations) Range(name string) (*AggregationBucketRangeItems, bool) {
}
// KeyedRange returns keyed range aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-range-aggregation.html.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-range-aggregation.html.
func (a Aggregations) KeyedRange(name string) (*AggregationBucketKeyedRangeItems, bool) {
if raw, found := a[name]; found {
agg := new(AggregationBucketKeyedRangeItems)
@@ -384,7 +384,7 @@ func (a Aggregations) KeyedRange(name string) (*AggregationBucketKeyedRangeItems
}
// DateRange returns date range aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-daterange-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-daterange-aggregation.html
func (a Aggregations) DateRange(name string) (*AggregationBucketRangeItems, bool) {
if raw, found := a[name]; found {
agg := new(AggregationBucketRangeItems)
@@ -398,9 +398,9 @@ func (a Aggregations) DateRange(name string) (*AggregationBucketRangeItems, bool
return nil, false
}
-// IPv4Range returns IPv4 range aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-iprange-aggregation.html
-func (a Aggregations) IPv4Range(name string) (*AggregationBucketRangeItems, bool) {
+// IPRange returns IP range aggregation results.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-iprange-aggregation.html
+func (a Aggregations) IPRange(name string) (*AggregationBucketRangeItems, bool) {
if raw, found := a[name]; found {
agg := new(AggregationBucketRangeItems)
if raw == nil {
@@ -414,7 +414,7 @@ func (a Aggregations) IPv4Range(name string) (*AggregationBucketRangeItems, bool
}
// Histogram returns histogram aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-histogram-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-histogram-aggregation.html
func (a Aggregations) Histogram(name string) (*AggregationBucketHistogramItems, bool) {
if raw, found := a[name]; found {
agg := new(AggregationBucketHistogramItems)
@@ -429,7 +429,7 @@ func (a Aggregations) Histogram(name string) (*AggregationBucketHistogramItems,
}
// DateHistogram returns date histogram aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-datehistogram-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-datehistogram-aggregation.html
func (a Aggregations) DateHistogram(name string) (*AggregationBucketHistogramItems, bool) {
if raw, found := a[name]; found {
agg := new(AggregationBucketHistogramItems)
@@ -444,7 +444,7 @@ func (a Aggregations) DateHistogram(name string) (*AggregationBucketHistogramIte
}
// GeoBounds returns geo-bounds aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-geobounds-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-geobounds-aggregation.html
func (a Aggregations) GeoBounds(name string) (*AggregationGeoBoundsMetric, bool) {
if raw, found := a[name]; found {
agg := new(AggregationGeoBoundsMetric)
@@ -459,7 +459,7 @@ func (a Aggregations) GeoBounds(name string) (*AggregationGeoBoundsMetric, bool)
}
// GeoHash returns geo-hash aggregation results.
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-geohashgrid-aggregation.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-geohashgrid-aggregation.html
func (a Aggregations) GeoHash(name string) (*AggregationBucketKeyItems, bool) {
if raw, found := a[name]; found {
agg := new(AggregationBucketKeyItems)
@@ -474,7 +474,7 @@ func (a Aggregations) GeoHash(name string) (*AggregationBucketKeyItems, bool) {
}
// GeoDistance returns geo distance aggregation results.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-geodistance-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-geodistance-aggregation.html
func (a Aggregations) GeoDistance(name string) (*AggregationBucketRangeItems, bool) {
if raw, found := a[name]; found {
agg := new(AggregationBucketRangeItems)
@@ -489,7 +489,7 @@ func (a Aggregations) GeoDistance(name string) (*AggregationBucketRangeItems, bo
}
// AvgBucket returns average bucket pipeline aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-avg-bucket-aggregation.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-avg-bucket-aggregation.html
func (a Aggregations) AvgBucket(name string) (*AggregationPipelineSimpleValue, bool) {
if raw, found := a[name]; found {
agg := new(AggregationPipelineSimpleValue)
@@ -504,7 +504,7 @@ func (a Aggregations) AvgBucket(name string) (*AggregationPipelineSimpleValue, b
}
// SumBucket returns sum bucket pipeline aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-sum-bucket-aggregation.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-sum-bucket-aggregation.html
func (a Aggregations) SumBucket(name string) (*AggregationPipelineSimpleValue, bool) {
if raw, found := a[name]; found {
agg := new(AggregationPipelineSimpleValue)
@@ -519,7 +519,7 @@ func (a Aggregations) SumBucket(name string) (*AggregationPipelineSimpleValue, b
}
// StatsBucket returns stats bucket pipeline aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-stats-bucket-aggregation.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-stats-bucket-aggregation.html
func (a Aggregations) StatsBucket(name string) (*AggregationPipelineStatsMetric, bool) {
if raw, found := a[name]; found {
agg := new(AggregationPipelineStatsMetric)
@@ -534,7 +534,7 @@ func (a Aggregations) StatsBucket(name string) (*AggregationPipelineStatsMetric,
}
// PercentilesBucket returns stats bucket pipeline aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-percentiles-bucket-aggregation.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-percentiles-bucket-aggregation.html
func (a Aggregations) PercentilesBucket(name string) (*AggregationPipelinePercentilesMetric, bool) {
if raw, found := a[name]; found {
agg := new(AggregationPipelinePercentilesMetric)
@@ -549,7 +549,7 @@ func (a Aggregations) PercentilesBucket(name string) (*AggregationPipelinePercen
}
// MaxBucket returns maximum bucket pipeline aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-max-bucket-aggregation.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-max-bucket-aggregation.html
func (a Aggregations) MaxBucket(name string) (*AggregationPipelineBucketMetricValue, bool) {
if raw, found := a[name]; found {
agg := new(AggregationPipelineBucketMetricValue)
@@ -564,7 +564,7 @@ func (a Aggregations) MaxBucket(name string) (*AggregationPipelineBucketMetricVa
}
// MinBucket returns minimum bucket pipeline aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-min-bucket-aggregation.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-min-bucket-aggregation.html
func (a Aggregations) MinBucket(name string) (*AggregationPipelineBucketMetricValue, bool) {
if raw, found := a[name]; found {
agg := new(AggregationPipelineBucketMetricValue)
@@ -579,7 +579,7 @@ func (a Aggregations) MinBucket(name string) (*AggregationPipelineBucketMetricVa
}
// MovAvg returns moving average pipeline aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-movavg-aggregation.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html
func (a Aggregations) MovAvg(name string) (*AggregationPipelineSimpleValue, bool) {
if raw, found := a[name]; found {
agg := new(AggregationPipelineSimpleValue)
@@ -594,7 +594,7 @@ func (a Aggregations) MovAvg(name string) (*AggregationPipelineSimpleValue, bool
}
// Derivative returns derivative pipeline aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-derivative-aggregation.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-derivative-aggregation.html
func (a Aggregations) Derivative(name string) (*AggregationPipelineDerivative, bool) {
if raw, found := a[name]; found {
agg := new(AggregationPipelineDerivative)
@@ -609,7 +609,7 @@ func (a Aggregations) Derivative(name string) (*AggregationPipelineDerivative, b
}
// CumulativeSum returns a cumulative sum pipeline aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-cumulative-sum-aggregation.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-cumulative-sum-aggregation.html
func (a Aggregations) CumulativeSum(name string) (*AggregationPipelineSimpleValue, bool) {
if raw, found := a[name]; found {
agg := new(AggregationPipelineSimpleValue)
@@ -624,7 +624,7 @@ func (a Aggregations) CumulativeSum(name string) (*AggregationPipelineSimpleValu
}
// BucketScript returns bucket script pipeline aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-bucket-script-aggregation.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-bucket-script-aggregation.html
func (a Aggregations) BucketScript(name string) (*AggregationPipelineSimpleValue, bool) {
if raw, found := a[name]; found {
agg := new(AggregationPipelineSimpleValue)
@@ -639,7 +639,7 @@ func (a Aggregations) BucketScript(name string) (*AggregationPipelineSimpleValue
}
// SerialDiff returns serial differencing pipeline aggregation results.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-serialdiff-aggregation.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-serialdiff-aggregation.html
func (a Aggregations) SerialDiff(name string) (*AggregationPipelineSimpleValue, bool) {
if raw, found := a[name]; found {
agg := new(AggregationPipelineSimpleValue)
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_children.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_children.go
index 14d0d1ca9..08623a58e 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_children.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_children.go
@@ -7,7 +7,7 @@ package elastic
// ChildrenAggregation is a special single bucket aggregation that enables
// aggregating from buckets on parent document types to buckets on child documents.
// It is available from 1.4.0.Beta1 upwards.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-children-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-children-aggregation.html
type ChildrenAggregation struct {
typ string
subAggregations map[string]Aggregation
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_count_thresholds.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_count_thresholds.go
new file mode 100644
index 000000000..53efdaf5f
--- /dev/null
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_count_thresholds.go
@@ -0,0 +1,13 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// BucketCountThresholds is used in e.g. terms and significant text aggregations.
+type BucketCountThresholds struct {
+ MinDocCount *int64
+ ShardMinDocCount *int64
+ RequiredSize *int
+ ShardSize *int
+}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_histogram.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_histogram.go
index 17916b490..1e7a1246c 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_histogram.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_histogram.go
@@ -6,7 +6,7 @@ package elastic
// DateHistogramAggregation is a multi-bucket aggregation similar to the
// histogram except it can only be applied on date values.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-datehistogram-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-datehistogram-aggregation.html
type DateHistogramAggregation struct {
field string
script *Script
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range.go
index a4c10aa88..5407dadb8 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range.go
@@ -15,7 +15,7 @@ import (
// date format by which the from and to response fields will be returned.
// Note that this aggregration includes the from value and excludes the to
// value for each range.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-daterange-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-daterange-aggregation.html
type DateRangeAggregation struct {
field string
script *Script
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filter.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filter.go
index 2085f0d36..e4fbc67da 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filter.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filter.go
@@ -8,7 +8,7 @@ package elastic
// in the current document set context that match a specified filter.
// Often this will be used to narrow down the current aggregation context
// to a specific set of documents.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-filter-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-filter-aggregation.html
type FilterAggregation struct {
filter Query
subAggregations map[string]Aggregation
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filters.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filters.go
index 80999eed9..0d128ca17 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filters.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filters.go
@@ -15,7 +15,7 @@ import "errors"
// use both named and unnamed filters.
//
// For details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-filters-aggregation.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-filters-aggregation.html
type FiltersAggregation struct {
unnamedFilters []Query
namedFilters map[string]Query
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geo_distance.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geo_distance.go
index 2ae7b63dc..c082fb3f2 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geo_distance.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geo_distance.go
@@ -11,7 +11,7 @@ package elastic
// the origin point and determines the buckets it belongs to based on
// the ranges (a document belongs to a bucket if the distance between the
// document and the origin falls within the distance range of the bucket).
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-geodistance-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-geodistance-aggregation.html
type GeoDistanceAggregation struct {
field string
unit string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_global.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_global.go
index fbd14a45f..4bf2a63f8 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_global.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_global.go
@@ -8,7 +8,7 @@ package elastic
// the search execution context. This context is defined by the indices
// and the document types you’re searching on, but is not influenced
// by the search query itself.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-global-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-global-aggregation.html
type GlobalAggregation struct {
subAggregations map[string]Aggregation
meta map[string]interface{}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_histogram.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_histogram.go
index 30528c164..8b698cff5 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_histogram.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_histogram.go
@@ -8,7 +8,7 @@ package elastic
// that can be applied on numeric values extracted from the documents.
// It dynamically builds fixed size (a.k.a. interval) buckets over the
// values.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-histogram-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-histogram-aggregation.html
type HistogramAggregation struct {
field string
script *Script
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_ip_range.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_ip_range.go
new file mode 100644
index 000000000..3615e29c3
--- /dev/null
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_ip_range.go
@@ -0,0 +1,195 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// IPRangeAggregation is a range aggregation that is dedicated for
+// IP addresses.
+//
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-iprange-aggregation.html
+type IPRangeAggregation struct {
+ field string
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+ keyed *bool
+ entries []IPRangeAggregationEntry
+}
+
+type IPRangeAggregationEntry struct {
+ Key string
+ Mask string
+ From string
+ To string
+}
+
+func NewIPRangeAggregation() *IPRangeAggregation {
+ return &IPRangeAggregation{
+ subAggregations: make(map[string]Aggregation),
+ entries: make([]IPRangeAggregationEntry, 0),
+ }
+}
+
+func (a *IPRangeAggregation) Field(field string) *IPRangeAggregation {
+ a.field = field
+ return a
+}
+
+func (a *IPRangeAggregation) SubAggregation(name string, subAggregation Aggregation) *IPRangeAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *IPRangeAggregation) Meta(metaData map[string]interface{}) *IPRangeAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *IPRangeAggregation) Keyed(keyed bool) *IPRangeAggregation {
+ a.keyed = &keyed
+ return a
+}
+
+func (a *IPRangeAggregation) AddMaskRange(mask string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{Mask: mask})
+ return a
+}
+
+func (a *IPRangeAggregation) AddMaskRangeWithKey(key, mask string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, Mask: mask})
+ return a
+}
+
+func (a *IPRangeAggregation) AddRange(from, to string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{From: from, To: to})
+ return a
+}
+
+func (a *IPRangeAggregation) AddRangeWithKey(key, from, to string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: from, To: to})
+ return a
+}
+
+func (a *IPRangeAggregation) AddUnboundedTo(from string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{From: from, To: ""})
+ return a
+}
+
+func (a *IPRangeAggregation) AddUnboundedToWithKey(key, from string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: from, To: ""})
+ return a
+}
+
+func (a *IPRangeAggregation) AddUnboundedFrom(to string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{From: "", To: to})
+ return a
+}
+
+func (a *IPRangeAggregation) AddUnboundedFromWithKey(key, to string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: "", To: to})
+ return a
+}
+
+func (a *IPRangeAggregation) Lt(to string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{From: "", To: to})
+ return a
+}
+
+func (a *IPRangeAggregation) LtWithKey(key, to string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: "", To: to})
+ return a
+}
+
+func (a *IPRangeAggregation) Between(from, to string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{From: from, To: to})
+ return a
+}
+
+func (a *IPRangeAggregation) BetweenWithKey(key, from, to string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: from, To: to})
+ return a
+}
+
+func (a *IPRangeAggregation) Gt(from string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{From: from, To: ""})
+ return a
+}
+
+func (a *IPRangeAggregation) GtWithKey(key, from string) *IPRangeAggregation {
+ a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: from, To: ""})
+ return a
+}
+
+func (a *IPRangeAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "aggs" : {
+ // "range" : {
+ // "ip_range": {
+ // "field": "ip",
+ // "ranges": [
+ // { "to": "10.0.0.5" },
+ // { "from": "10.0.0.5" }
+ // ]
+ // }
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the { "ip_range" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["ip_range"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+
+ if a.keyed != nil {
+ opts["keyed"] = *a.keyed
+ }
+
+ var ranges []interface{}
+ for _, ent := range a.entries {
+ r := make(map[string]interface{})
+ if ent.Key != "" {
+ r["key"] = ent.Key
+ }
+ if ent.Mask != "" {
+ r["mask"] = ent.Mask
+ } else {
+ if ent.From != "" {
+ r["from"] = ent.From
+ }
+ if ent.To != "" {
+ r["to"] = ent.To
+ }
+ }
+ ranges = append(ranges, r)
+ }
+ opts["ranges"] = ranges
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_ip_range_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_ip_range_test.go
new file mode 100644
index 000000000..7a2b49f4c
--- /dev/null
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_ip_range_test.go
@@ -0,0 +1,90 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestIPRangeAggregation(t *testing.T) {
+ agg := NewIPRangeAggregation().Field("remote_ip")
+ agg = agg.AddRange("", "10.0.0.0")
+ agg = agg.AddRange("10.1.0.0", "10.1.255.255")
+ agg = agg.AddRange("10.2.0.0", "")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"ip_range":{"field":"remote_ip","ranges":[{"to":"10.0.0.0"},{"from":"10.1.0.0","to":"10.1.255.255"},{"from":"10.2.0.0"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestIPRangeAggregationMask(t *testing.T) {
+ agg := NewIPRangeAggregation().Field("remote_ip")
+ agg = agg.AddMaskRange("10.0.0.0/25")
+ agg = agg.AddMaskRange("10.0.0.127/25")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"ip_range":{"field":"remote_ip","ranges":[{"mask":"10.0.0.0/25"},{"mask":"10.0.0.127/25"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestIPRangeAggregationWithKeyedFlag(t *testing.T) {
+ agg := NewIPRangeAggregation().Field("remote_ip")
+ agg = agg.Keyed(true)
+ agg = agg.AddRange("", "10.0.0.0")
+ agg = agg.AddRange("10.1.0.0", "10.1.255.255")
+ agg = agg.AddRange("10.2.0.0", "")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"ip_range":{"field":"remote_ip","keyed":true,"ranges":[{"to":"10.0.0.0"},{"from":"10.1.0.0","to":"10.1.255.255"},{"from":"10.2.0.0"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestIPRangeAggregationWithKeys(t *testing.T) {
+ agg := NewIPRangeAggregation().Field("remote_ip")
+ agg = agg.Keyed(true)
+ agg = agg.LtWithKey("infinity", "10.0.0.5")
+ agg = agg.GtWithKey("and-beyond", "10.0.0.5")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"ip_range":{"field":"remote_ip","keyed":true,"ranges":[{"key":"infinity","to":"10.0.0.5"},{"from":"10.0.0.5","key":"and-beyond"}]}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_missing.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_missing.go
index 3ca3fd693..7ba3cb636 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_missing.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_missing.go
@@ -11,7 +11,7 @@ package elastic
// conjunction with other field data bucket aggregators (such as ranges)
// to return information for all the documents that could not be placed
// in any of the other buckets due to missing field data values.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-missing-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-missing-aggregation.html
type MissingAggregation struct {
field string
subAggregations map[string]Aggregation
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_nested.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_nested.go
index 62e592eb8..926d493a1 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_nested.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_nested.go
@@ -6,7 +6,7 @@ package elastic
// NestedAggregation is a special single bucket aggregation that enables
// aggregating nested documents.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-nested-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-nested-aggregation.html
type NestedAggregation struct {
path string
subAggregations map[string]Aggregation
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_range.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_range.go
index 1c5204a0b..28c3df78e 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_range.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_range.go
@@ -14,7 +14,7 @@ import (
// will be checked against each bucket range and "bucket" the
// relevant/matching document. Note that this aggregration includes the
// from value and excludes the to value for each range.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-range-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-range-aggregation.html
type RangeAggregation struct {
field string
script *Script
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_reverse_nested.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_reverse_nested.go
index f307f256f..9e4680195 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_reverse_nested.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_reverse_nested.go
@@ -11,7 +11,7 @@ package elastic
// which allows nesting other aggregations that aren’t part of
// the nested object in a nested aggregation.
//
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-reverse-nested-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-reverse-nested-aggregation.html
type ReverseNestedAggregation struct {
path string
subAggregations map[string]Aggregation
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_sampler.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_sampler.go
index c1a1ab4f7..0fd729dfd 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_sampler.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_sampler.go
@@ -9,7 +9,7 @@ package elastic
// Optionally, diversity settings can be used to limit the number of matches
// that share a common value such as an "author".
//
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-sampler-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-sampler-aggregation.html
type SamplerAggregation struct {
subAggregations map[string]Aggregation
meta map[string]interface{}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_terms.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_terms.go
index e03801f1e..571a91217 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_terms.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_terms.go
@@ -4,9 +4,9 @@
package elastic
-// SignificantSignificantTermsAggregation is an aggregation that returns interesting
+// SignificantTermsAggregation is an aggregation that returns interesting
// or unusual occurrences of terms in a set.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-significantterms-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html
type SignificantTermsAggregation struct {
field string
subAggregations map[string]Aggregation
@@ -166,7 +166,7 @@ type SignificanceHeuristic interface {
// ChiSquareSignificanceHeuristic implements Chi square as described
// in "Information Retrieval", Manning et al., Chapter 13.5.2.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-significantterms-aggregation.html#_chi_square
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html#_chi_square
// for details.
type ChiSquareSignificanceHeuristic struct {
backgroundIsSuperset *bool
@@ -216,7 +216,7 @@ func (sh *ChiSquareSignificanceHeuristic) Source() (interface{}, error) {
// as described in "The Google Similarity Distance", Cilibrasi and Vitanyi,
// 2007.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-significantterms-aggregation.html#_google_normalized_distance
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html#_google_normalized_distance
// for details.
type GNDSignificanceHeuristic struct {
backgroundIsSuperset *bool
@@ -252,7 +252,7 @@ func (sh *GNDSignificanceHeuristic) Source() (interface{}, error) {
// -- JLH Score --
// JLHScoreSignificanceHeuristic implements the JLH score as described in
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-significantterms-aggregation.html#_jlh_score.
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html#_jlh_score.
type JLHScoreSignificanceHeuristic struct{}
// NewJLHScoreSignificanceHeuristic initializes a new JLHScoreSignificanceHeuristic.
@@ -276,7 +276,7 @@ func (sh *JLHScoreSignificanceHeuristic) Source() (interface{}, error) {
// MutualInformationSignificanceHeuristic implements Mutual information
// as described in "Information Retrieval", Manning et al., Chapter 13.5.1.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-significantterms-aggregation.html#_mutual_information
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html#_mutual_information
// for details.
type MutualInformationSignificanceHeuristic struct {
backgroundIsSuperset *bool
@@ -324,7 +324,7 @@ func (sh *MutualInformationSignificanceHeuristic) Source() (interface{}, error)
// -- Percentage Score --
// PercentageScoreSignificanceHeuristic implements the algorithm described
-// in https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-significantterms-aggregation.html#_percentage.
+// in https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html#_percentage.
type PercentageScoreSignificanceHeuristic struct{}
// NewPercentageScoreSignificanceHeuristic initializes a new instance of
@@ -347,7 +347,7 @@ func (sh *PercentageScoreSignificanceHeuristic) Source() (interface{}, error) {
// -- Script --
// ScriptSignificanceHeuristic implements a scripted significance heuristic.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-significantterms-aggregation.html#_scripted
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html#_scripted
// for details.
type ScriptSignificanceHeuristic struct {
script *Script
@@ -368,7 +368,7 @@ func (sh *ScriptSignificanceHeuristic) Name() string {
// parameters are available in the script: `_subset_freq`, `_superset_freq`,
// `_subset_size`, and `_superset_size`.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-significantterms-aggregation.html#_scripted
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significantterms-aggregation.html#_scripted
// for details.
func (sh *ScriptSignificanceHeuristic) Script(script *Script) *ScriptSignificanceHeuristic {
sh.script = script
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_terms_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_terms_test.go
index 2f87373d7..a5b269671 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_terms_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_terms_test.go
@@ -204,7 +204,7 @@ func TestSignificantTermsAggregationWithScript(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"significant_terms":{"field":"crime_type","script_heuristic":{"script":"_subset_freq/(_superset_freq - _subset_freq + 1)"}}}`
+ expected := `{"significant_terms":{"field":"crime_type","script_heuristic":{"script":{"source":"_subset_freq/(_superset_freq - _subset_freq + 1)"}}}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_text.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_text.go
new file mode 100644
index 000000000..de761613c
--- /dev/null
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_text.go
@@ -0,0 +1,245 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// SignificantTextAggregation returns interesting or unusual occurrences
+// of free-text terms in a set.
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-bucket-significanttext-aggregation.html
+type SignificantTextAggregation struct {
+ field string
+ subAggregations map[string]Aggregation
+ meta map[string]interface{}
+
+ sourceFieldNames []string
+ filterDuplicateText *bool
+ includeExclude *TermsAggregationIncludeExclude
+ filter Query
+ bucketCountThresholds *BucketCountThresholds
+ significanceHeuristic SignificanceHeuristic
+}
+
+func NewSignificantTextAggregation() *SignificantTextAggregation {
+ return &SignificantTextAggregation{
+ subAggregations: make(map[string]Aggregation, 0),
+ }
+}
+
+func (a *SignificantTextAggregation) Field(field string) *SignificantTextAggregation {
+ a.field = field
+ return a
+}
+
+func (a *SignificantTextAggregation) SubAggregation(name string, subAggregation Aggregation) *SignificantTextAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Meta sets the meta data to be included in the aggregation response.
+func (a *SignificantTextAggregation) Meta(metaData map[string]interface{}) *SignificantTextAggregation {
+ a.meta = metaData
+ return a
+}
+
+func (a *SignificantTextAggregation) SourceFieldNames(names ...string) *SignificantTextAggregation {
+ a.sourceFieldNames = names
+ return a
+}
+
+func (a *SignificantTextAggregation) FilterDuplicateText(filter bool) *SignificantTextAggregation {
+ a.filterDuplicateText = &filter
+ return a
+}
+
+func (a *SignificantTextAggregation) MinDocCount(minDocCount int64) *SignificantTextAggregation {
+ if a.bucketCountThresholds == nil {
+ a.bucketCountThresholds = &BucketCountThresholds{}
+ }
+ a.bucketCountThresholds.MinDocCount = &minDocCount
+ return a
+}
+
+func (a *SignificantTextAggregation) ShardMinDocCount(shardMinDocCount int64) *SignificantTextAggregation {
+ if a.bucketCountThresholds == nil {
+ a.bucketCountThresholds = &BucketCountThresholds{}
+ }
+ a.bucketCountThresholds.ShardMinDocCount = &shardMinDocCount
+ return a
+}
+
+func (a *SignificantTextAggregation) Size(size int) *SignificantTextAggregation {
+ if a.bucketCountThresholds == nil {
+ a.bucketCountThresholds = &BucketCountThresholds{}
+ }
+ a.bucketCountThresholds.RequiredSize = &size
+ return a
+}
+
+func (a *SignificantTextAggregation) ShardSize(shardSize int) *SignificantTextAggregation {
+ if a.bucketCountThresholds == nil {
+ a.bucketCountThresholds = &BucketCountThresholds{}
+ }
+ a.bucketCountThresholds.ShardSize = &shardSize
+ return a
+}
+
+func (a *SignificantTextAggregation) BackgroundFilter(filter Query) *SignificantTextAggregation {
+ a.filter = filter
+ return a
+}
+
+func (a *SignificantTextAggregation) SignificanceHeuristic(heuristic SignificanceHeuristic) *SignificantTextAggregation {
+ a.significanceHeuristic = heuristic
+ return a
+}
+
+func (a *SignificantTextAggregation) Include(regexp string) *SignificantTextAggregation {
+ if a.includeExclude == nil {
+ a.includeExclude = &TermsAggregationIncludeExclude{}
+ }
+ a.includeExclude.Include = regexp
+ return a
+}
+
+func (a *SignificantTextAggregation) IncludeValues(values ...interface{}) *SignificantTextAggregation {
+ if a.includeExclude == nil {
+ a.includeExclude = &TermsAggregationIncludeExclude{}
+ }
+ a.includeExclude.IncludeValues = append(a.includeExclude.IncludeValues, values...)
+ return a
+}
+
+func (a *SignificantTextAggregation) Exclude(regexp string) *SignificantTextAggregation {
+ if a.includeExclude == nil {
+ a.includeExclude = &TermsAggregationIncludeExclude{}
+ }
+ a.includeExclude.Exclude = regexp
+ return a
+}
+
+func (a *SignificantTextAggregation) ExcludeValues(values ...interface{}) *SignificantTextAggregation {
+ if a.includeExclude == nil {
+ a.includeExclude = &TermsAggregationIncludeExclude{}
+ }
+ a.includeExclude.ExcludeValues = append(a.includeExclude.ExcludeValues, values...)
+ return a
+}
+
+func (a *SignificantTextAggregation) Partition(p int) *SignificantTextAggregation {
+ if a.includeExclude == nil {
+ a.includeExclude = &TermsAggregationIncludeExclude{}
+ }
+ a.includeExclude.Partition = p
+ return a
+}
+
+func (a *SignificantTextAggregation) NumPartitions(n int) *SignificantTextAggregation {
+ if a.includeExclude == nil {
+ a.includeExclude = &TermsAggregationIncludeExclude{}
+ }
+ a.includeExclude.NumPartitions = n
+ return a
+}
+
+func (a *SignificantTextAggregation) Source() (interface{}, error) {
+ // Example:
+ // {
+ // "query" : {
+ // "match" : {"content" : "Bird flu"}
+ // },
+ // "aggregations" : {
+ // "my_sample" : {
+ // "sampler": {
+ // "shard_size" : 100
+ // },
+ // "aggregations": {
+ // "keywords" : {
+ // "significant_text" : { "field" : "content" }
+ // }
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the
+ // { "significant_text" : { "field" : "content" }
+ // part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["significant_text"] = opts
+
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.bucketCountThresholds != nil {
+ if a.bucketCountThresholds.RequiredSize != nil {
+ opts["size"] = (*a.bucketCountThresholds).RequiredSize
+ }
+ if a.bucketCountThresholds.ShardSize != nil {
+ opts["shard_size"] = (*a.bucketCountThresholds).ShardSize
+ }
+ if a.bucketCountThresholds.MinDocCount != nil {
+ opts["min_doc_count"] = (*a.bucketCountThresholds).MinDocCount
+ }
+ if a.bucketCountThresholds.ShardMinDocCount != nil {
+ opts["shard_min_doc_count"] = (*a.bucketCountThresholds).ShardMinDocCount
+ }
+ }
+ if a.filter != nil {
+ src, err := a.filter.Source()
+ if err != nil {
+ return nil, err
+ }
+ opts["background_filter"] = src
+ }
+ if a.significanceHeuristic != nil {
+ name := a.significanceHeuristic.Name()
+ src, err := a.significanceHeuristic.Source()
+ if err != nil {
+ return nil, err
+ }
+ opts[name] = src
+ }
+ // Include/Exclude
+ if ie := a.includeExclude; ie != nil {
+ // Include
+ if ie.Include != "" {
+ opts["include"] = ie.Include
+ } else if len(ie.IncludeValues) > 0 {
+ opts["include"] = ie.IncludeValues
+ } else if ie.NumPartitions > 0 {
+ inc := make(map[string]interface{})
+ inc["partition"] = ie.Partition
+ inc["num_partitions"] = ie.NumPartitions
+ opts["include"] = inc
+ }
+ // Exclude
+ if ie.Exclude != "" {
+ opts["exclude"] = ie.Exclude
+ } else if len(ie.ExcludeValues) > 0 {
+ opts["exclude"] = ie.ExcludeValues
+ }
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ src, err := aggregate.Source()
+ if err != nil {
+ return nil, err
+ }
+ aggsMap[name] = src
+ }
+ }
+
+ // Add Meta data if available
+ if len(a.meta) > 0 {
+ source["meta"] = a.meta
+ }
+
+ return source, nil
+}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_text_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_text_test.go
new file mode 100644
index 000000000..53ac4461d
--- /dev/null
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_text_test.go
@@ -0,0 +1,66 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func TestSignificantTextAggregation(t *testing.T) {
+ agg := NewSignificantTextAggregation().Field("content")
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"significant_text":{"field":"content"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSignificantTextAggregationWithArgs(t *testing.T) {
+ agg := NewSignificantTextAggregation().
+ Field("content").
+ ShardSize(5).
+ MinDocCount(10).
+ BackgroundFilter(NewTermQuery("city", "London"))
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"significant_text":{"background_filter":{"term":{"city":"London"}},"field":"content","min_doc_count":10,"shard_size":5}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestSignificantTextAggregationWithMetaData(t *testing.T) {
+ agg := NewSignificantTextAggregation().Field("content")
+ agg = agg.Meta(map[string]interface{}{"name": "Oliver"})
+ src, err := agg.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"meta":{"name":"Oliver"},"significant_text":{"field":"content"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_matrix_stats.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_matrix_stats.go
index 875da0718..785f392b6 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_matrix_stats.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_matrix_stats.go
@@ -5,7 +5,7 @@
package elastic
// MatrixMatrixStatsAggregation ...
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.3/search-aggregations-metrics-stats-aggregation.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-stats-aggregation.html
// for details.
type MatrixStatsAggregation struct {
fields []string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_avg.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_avg.go
index ff337a8cd..2b764e065 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_avg.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_avg.go
@@ -9,7 +9,7 @@ package elastic
// aggregated documents. These values can be extracted either from
// specific numeric fields in the documents, or be generated by
// a provided script.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-avg-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-avg-aggregation.html
type AvgAggregation struct {
field string
script *Script
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_cardinality.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_cardinality.go
index 8f6f447c4..3b999c849 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_cardinality.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_cardinality.go
@@ -8,7 +8,7 @@ package elastic
// calculates an approximate count of distinct values.
// Values can be extracted either from specific fields in the document
// or generated by a script.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-cardinality-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-cardinality-aggregation.html
type CardinalityAggregation struct {
field string
script *Script
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_extended_stats.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_extended_stats.go
index 95b312686..4e0bbe65a 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_extended_stats.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_extended_stats.go
@@ -8,7 +8,7 @@ package elastic
// computes stats over numeric values extracted from the aggregated documents.
// These values can be extracted either from specific numeric fields
// in the documents, or be generated by a provided script.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-extendedstats-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-extendedstats-aggregation.html
type ExtendedStatsAggregation struct {
field string
script *Script
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_geo_bounds.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_geo_bounds.go
index c263a76b4..406f2d000 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_geo_bounds.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_geo_bounds.go
@@ -6,7 +6,7 @@ package elastic
// GeoBoundsAggregation is a metric aggregation that computes the
// bounding box containing all geo_point values for a field.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-geobounds-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-geobounds-aggregation.html
type GeoBoundsAggregation struct {
field string
script *Script
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_max.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_max.go
index b62130676..acdfa14a8 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_max.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_max.go
@@ -9,7 +9,7 @@ package elastic
// the aggregated documents. These values can be extracted either from
// specific numeric fields in the documents, or be generated by
// a provided script.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-max-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-max-aggregation.html
type MaxAggregation struct {
field string
script *Script
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_min.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_min.go
index c1ca6922b..af63585da 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_min.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_min.go
@@ -9,7 +9,7 @@ package elastic
// aggregated documents. These values can be extracted either from
// specific numeric fields in the documents, or be generated by a
// provided script.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-min-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-min-aggregation.html
type MinAggregation struct {
field string
script *Script
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentile_ranks.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentile_ranks.go
index 3e0595e88..674fc41f9 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentile_ranks.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentile_ranks.go
@@ -5,7 +5,7 @@
package elastic
// PercentileRanksAggregation
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-percentile-rank-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-percentile-rank-aggregation.html
type PercentileRanksAggregation struct {
field string
script *Script
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentiles.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentiles.go
index 411f9c50f..a1d78c8f2 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentiles.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentiles.go
@@ -5,7 +5,7 @@
package elastic
// PercentilesAggregation
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-percentile-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-percentile-aggregation.html
type PercentilesAggregation struct {
field string
script *Script
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_stats.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_stats.go
index 400b79b00..b9bbe7cff 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_stats.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_stats.go
@@ -8,7 +8,7 @@ package elastic
// over numeric values extracted from the aggregated documents.
// These values can be extracted either from specific numeric fields
// in the documents, or be generated by a provided script.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-stats-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-stats-aggregation.html
type StatsAggregation struct {
field string
script *Script
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_sum.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_sum.go
index f959a3e54..e1c07c9c1 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_sum.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_sum.go
@@ -8,7 +8,7 @@ package elastic
// numeric values that are extracted from the aggregated documents.
// These values can be extracted either from specific numeric fields
// in the documents, or be generated by a provided script.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-sum-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-sum-aggregation.html
type SumAggregation struct {
field string
script *Script
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_top_hits.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_top_hits.go
index 43dd36cdb..2b181895e 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_top_hits.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_top_hits.go
@@ -13,7 +13,7 @@ package elastic
// a bucket aggregator. One or more bucket aggregators determines by
// which properties a result set get sliced into.
//
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-top-hits-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-top-hits-aggregation.html
type TopHitsAggregation struct {
searchSource *SearchSource
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_value_count.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_value_count.go
index 4e7281d62..d56f1f873 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_value_count.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_value_count.go
@@ -11,7 +11,7 @@ package elastic
// used in conjunction with other single-value aggregations.
// For example, when computing the avg one might be interested in the
// number of values the average is computed over.
-// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-valuecount-aggregation.html
+// See: https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-metrics-valuecount-aggregation.html
type ValueCountAggregation struct {
field string
script *Script
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_avg_bucket.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_avg_bucket.go
index 7eea9310d..f37a9bdb8 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_avg_bucket.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_avg_bucket.go
@@ -10,7 +10,7 @@ package elastic
// be a multi-bucket aggregation.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-avg-bucket-aggregation.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-avg-bucket-aggregation.html
type AvgBucketAggregation struct {
format string
gapPolicy string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_script.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_script.go
index 13cad638f..34e356964 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_script.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_script.go
@@ -10,7 +10,7 @@ package elastic
// numeric and the script must return a numeric value.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-bucket-script-aggregation.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-bucket-script-aggregation.html
type BucketScriptAggregation struct {
format string
gapPolicy string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_script_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_script_test.go
index b4e6bf1c0..3c101c706 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_script_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_script_test.go
@@ -23,7 +23,7 @@ func TestBucketScriptAggregation(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"bucket_script":{"buckets_path":{"tShirtSales":"t-shirts\u003esales","totalSales":"total_sales"},"script":"tShirtSales / totalSales * 100"}}`
+ expected := `{"bucket_script":{"buckets_path":{"tShirtSales":"t-shirts\u003esales","totalSales":"total_sales"},"script":{"source":"tShirtSales / totalSales * 100"}}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_selector.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_selector.go
index f3c938519..233414d70 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_selector.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_selector.go
@@ -12,7 +12,7 @@ package elastic
// will be evaluated as false and all other values will evaluate to true.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-bucket-selector-aggregation.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-bucket-selector-aggregation.html
type BucketSelectorAggregation struct {
format string
gapPolicy string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_selector_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_selector_test.go
index dd276a867..e378c2832 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_selector_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_selector_test.go
@@ -22,7 +22,7 @@ func TestBucketSelectorAggregation(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"bucket_selector":{"buckets_path":{"totalSales":"total_sales"},"script":"totalSales \u003e= 1000"}}`
+ expected := `{"bucket_selector":{"buckets_path":{"totalSales":"total_sales"},"script":{"source":"totalSales \u003e= 1000"}}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_cumulative_sum.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_cumulative_sum.go
index 95546f1cb..80a1db42d 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_cumulative_sum.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_cumulative_sum.go
@@ -10,7 +10,7 @@ package elastic
// histogram must have min_doc_count set to 0 (default for histogram aggregations).
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-cumulative-sum-aggregation.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-cumulative-sum-aggregation.html
type CumulativeSumAggregation struct {
format string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_derivative.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_derivative.go
index 2c3c7e03a..ee7114e25 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_derivative.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_derivative.go
@@ -10,7 +10,7 @@ package elastic
// histogram must have min_doc_count set to 0 (default for histogram aggregations).
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-derivative-aggregation.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-derivative-aggregation.html
type DerivativeAggregation struct {
format string
gapPolicy string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_max_bucket.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_max_bucket.go
index 5a10b0e45..5da049561 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_max_bucket.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_max_bucket.go
@@ -11,7 +11,7 @@ package elastic
// be a multi-bucket aggregation.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-max-bucket-aggregation.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-max-bucket-aggregation.html
type MaxBucketAggregation struct {
format string
gapPolicy string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_min_bucket.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_min_bucket.go
index 96982250c..463bb919e 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_min_bucket.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_min_bucket.go
@@ -11,7 +11,7 @@ package elastic
// be a multi-bucket aggregation.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-min-bucket-aggregation.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-min-bucket-aggregation.html
type MinBucketAggregation struct {
format string
gapPolicy string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_mov_avg.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_mov_avg.go
index cf94342f7..821d73842 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_mov_avg.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_mov_avg.go
@@ -8,7 +8,7 @@ package elastic
// across the data and emit the average value of that window.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-movavg-aggregation.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html
type MovAvgAggregation struct {
format string
gapPolicy string
@@ -162,7 +162,7 @@ func (a *MovAvgAggregation) Source() (interface{}, error) {
}
// -- Models for moving averages --
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-movavg-aggregation.html#_models
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html#_models
// MovAvgModel specifies the model to use with the MovAvgAggregation.
type MovAvgModel interface {
@@ -175,7 +175,7 @@ type MovAvgModel interface {
// EWMAMovAvgModel calculates an exponentially weighted moving average.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-movavg-aggregation.html#_ewma_exponentially_weighted
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html#_ewma_exponentially_weighted
type EWMAMovAvgModel struct {
alpha *float64
}
@@ -213,7 +213,7 @@ func (m *EWMAMovAvgModel) Settings() map[string]interface{} {
// HoltLinearMovAvgModel calculates a doubly exponential weighted moving average.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-movavg-aggregation.html#_holt_linear
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html#_holt_linear
type HoltLinearMovAvgModel struct {
alpha *float64
beta *float64
@@ -262,7 +262,7 @@ func (m *HoltLinearMovAvgModel) Settings() map[string]interface{} {
// HoltWintersMovAvgModel calculates a triple exponential weighted moving average.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-movavg-aggregation.html#_holt_winters
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html#_holt_winters
type HoltWintersMovAvgModel struct {
alpha *float64
beta *float64
@@ -349,7 +349,7 @@ func (m *HoltWintersMovAvgModel) Settings() map[string]interface{} {
// by position in collection.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-movavg-aggregation.html#_linear
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html#_linear
type LinearMovAvgModel struct {
}
@@ -373,7 +373,7 @@ func (m *LinearMovAvgModel) Settings() map[string]interface{} {
// SimpleMovAvgModel calculates a simple unweighted (arithmetic) moving average.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-movavg-aggregation.html#_simple
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-movavg-aggregation.html#_simple
type SimpleMovAvgModel struct {
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_percentiles_bucket.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_percentiles_bucket.go
index 4caca7432..9a3556269 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_percentiles_bucket.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_percentiles_bucket.go
@@ -10,7 +10,7 @@ package elastic
// be a multi-bucket aggregation.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-percentiles-bucket-aggregation.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-percentiles-bucket-aggregation.html
type PercentilesBucketAggregation struct {
format string
gapPolicy string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_serial_diff.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_serial_diff.go
index 84ae43004..e13b94ea9 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_serial_diff.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_serial_diff.go
@@ -9,7 +9,7 @@ package elastic
// subtracted from itself at different time lags or periods.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-serialdiff-aggregation.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-serialdiff-aggregation.html
type SerialDiffAggregation struct {
format string
gapPolicy string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_stats_bucket.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_stats_bucket.go
index fb0a94afc..e68a420f2 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_stats_bucket.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_stats_bucket.go
@@ -10,7 +10,7 @@ package elastic
// be a multi-bucket aggregation.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-stats-bucket-aggregation.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-stats-bucket-aggregation.html
type StatsBucketAggregation struct {
format string
gapPolicy string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_sum_bucket.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_sum_bucket.go
index 1f78efa56..c22ae8f50 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_sum_bucket.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_sum_bucket.go
@@ -10,7 +10,7 @@ package elastic
// be a multi-bucket aggregation.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-sum-bucket-aggregation.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-aggregations-pipeline-sum-bucket-aggregation.html
type SumBucketAggregation struct {
format string
gapPolicy string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_test.go
index 058257774..24dd4eb0f 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_test.go
@@ -13,20 +13,10 @@ func TestAggsIntegrationAvgBucket(t *testing.T) {
//client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
client := setupTestClientAndCreateIndexAndAddDocs(t)
- esversion, err := client.ElasticsearchVersion(DefaultURL)
- if err != nil {
- t.Fatal(err)
- }
-
- if esversion < "2.0" {
- t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion)
- return
- }
-
// Match all should return all documents
builder := client.Search().
- Index(testIndexName).
- Type("order").
+ Index(testOrderIndex).
+ Type("doc").
Query(NewMatchAllQuery()).
Pretty(true)
h := NewDateHistogramAggregation().Field("time").Interval("month")
@@ -66,20 +56,10 @@ func TestAggsIntegrationDerivative(t *testing.T) {
//client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
client := setupTestClientAndCreateIndexAndAddDocs(t)
- esversion, err := client.ElasticsearchVersion(DefaultURL)
- if err != nil {
- t.Fatal(err)
- }
-
- if esversion < "2.0" {
- t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion)
- return
- }
-
// Match all should return all documents
builder := client.Search().
- Index(testIndexName).
- Type("order").
+ Index(testOrderIndex).
+ Type("doc").
Query(NewMatchAllQuery()).
Pretty(true)
h := NewDateHistogramAggregation().Field("time").Interval("month")
@@ -207,20 +187,10 @@ func TestAggsIntegrationMaxBucket(t *testing.T) {
//client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
client := setupTestClientAndCreateIndexAndAddDocs(t)
- esversion, err := client.ElasticsearchVersion(DefaultURL)
- if err != nil {
- t.Fatal(err)
- }
-
- if esversion < "2.0" {
- t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion)
- return
- }
-
// Match all should return all documents
builder := client.Search().
- Index(testIndexName).
- Type("order").
+ Index(testOrderIndex).
+ Type("doc").
Query(NewMatchAllQuery()).
Pretty(true)
h := NewDateHistogramAggregation().Field("time").Interval("month")
@@ -266,20 +236,10 @@ func TestAggsIntegrationMinBucket(t *testing.T) {
//client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
client := setupTestClientAndCreateIndexAndAddDocs(t)
- esversion, err := client.ElasticsearchVersion(DefaultURL)
- if err != nil {
- t.Fatal(err)
- }
-
- if esversion < "2.0" {
- t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion)
- return
- }
-
// Match all should return all documents
builder := client.Search().
- Index(testIndexName).
- Type("order").
+ Index(testOrderIndex).
+ Type("doc").
Query(NewMatchAllQuery()).
Pretty(true)
h := NewDateHistogramAggregation().Field("time").Interval("month")
@@ -325,20 +285,10 @@ func TestAggsIntegrationSumBucket(t *testing.T) {
//client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
client := setupTestClientAndCreateIndexAndAddDocs(t)
- esversion, err := client.ElasticsearchVersion(DefaultURL)
- if err != nil {
- t.Fatal(err)
- }
-
- if esversion < "2.0" {
- t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion)
- return
- }
-
// Match all should return all documents
builder := client.Search().
- Index(testIndexName).
- Type("order").
+ Index(testOrderIndex).
+ Type("doc").
Query(NewMatchAllQuery()).
Pretty(true)
h := NewDateHistogramAggregation().Field("time").Interval("month")
@@ -378,20 +328,10 @@ func TestAggsIntegrationMovAvg(t *testing.T) {
//client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
client := setupTestClientAndCreateIndexAndAddDocs(t)
- esversion, err := client.ElasticsearchVersion(DefaultURL)
- if err != nil {
- t.Fatal(err)
- }
-
- if esversion < "2.0" {
- t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion)
- return
- }
-
// Match all should return all documents
builder := client.Search().
- Index(testIndexName).
- Type("order").
+ Index(testOrderIndex).
+ Type("doc").
Query(NewMatchAllQuery()).
Pretty(true)
h := NewDateHistogramAggregation().Field("time").Interval("month")
@@ -500,20 +440,10 @@ func TestAggsIntegrationCumulativeSum(t *testing.T) {
//client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
client := setupTestClientAndCreateIndexAndAddDocs(t)
- esversion, err := client.ElasticsearchVersion(DefaultURL)
- if err != nil {
- t.Fatal(err)
- }
-
- if esversion < "2.0" {
- t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion)
- return
- }
-
// Match all should return all documents
builder := client.Search().
- Index(testIndexName).
- Type("order").
+ Index(testOrderIndex).
+ Type("doc").
Query(NewMatchAllQuery()).
Pretty(true)
h := NewDateHistogramAggregation().Field("time").Interval("month")
@@ -650,22 +580,13 @@ func TestAggsIntegrationCumulativeSum(t *testing.T) {
}
func TestAggsIntegrationBucketScript(t *testing.T) {
- client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
-
- esversion, err := client.ElasticsearchVersion(DefaultURL)
- if err != nil {
- t.Fatal(err)
- }
-
- if esversion < "2.0" {
- t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion)
- return
- }
+ // client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
// Match all should return all documents
builder := client.Search().
- Index(testIndexName).
- Type("order").
+ Index(testOrderIndex).
+ Type("doc").
Query(NewMatchAllQuery()).
Pretty(true)
h := NewDateHistogramAggregation().Field("time").Interval("month")
@@ -810,20 +731,10 @@ func TestAggsIntegrationBucketSelector(t *testing.T) {
//client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
client := setupTestClientAndCreateIndexAndAddDocs(t)
- esversion, err := client.ElasticsearchVersion(DefaultURL)
- if err != nil {
- t.Fatal(err)
- }
-
- if esversion < "2.0" {
- t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion)
- return
- }
-
// Match all should return all documents
builder := client.Search().
- Index(testIndexName).
- Type("order").
+ Index(testOrderIndex).
+ Type("doc").
Query(NewMatchAllQuery()).
Pretty(true)
h := NewDateHistogramAggregation().Field("time").Interval("month")
@@ -870,20 +781,10 @@ func TestAggsIntegrationSerialDiff(t *testing.T) {
//client := setupTestClientAndCreateIndexAndAddDocs(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
client := setupTestClientAndCreateIndexAndAddDocs(t)
- esversion, err := client.ElasticsearchVersion(DefaultURL)
- if err != nil {
- t.Fatal(err)
- }
-
- if esversion < "2.0" {
- t.Skipf("Elasticsearch %s does not have pipeline aggregations.", esversion)
- return
- }
-
// Match all should return all documents
builder := client.Search().
- Index(testIndexName).
- Type("order").
+ Index(testOrderIndex).
+ Type("doc").
Query(NewMatchAllQuery()).
Pretty(true)
h := NewDateHistogramAggregation().Field("time").Interval("month")
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_test.go
index c730e3b43..9d6fa8d27 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_test.go
@@ -48,17 +48,17 @@ func TestAggs(t *testing.T) {
}
// Add all documents
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -1000,7 +1000,7 @@ func TestAggsMarshal(t *testing.T) {
}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -2420,7 +2420,7 @@ func TestAggsBucketDateRange(t *testing.T) {
}
}
-func TestAggsBucketIPv4Range(t *testing.T) {
+func TestAggsBucketIPRange(t *testing.T) {
s := `{
"ip_ranges": {
"buckets" : [
@@ -2444,7 +2444,7 @@ func TestAggsBucketIPv4Range(t *testing.T) {
t.Fatalf("expected no error decoding; got: %v", err)
}
- agg, found := aggs.IPv4Range("ip_ranges")
+ agg, found := aggs.IPRange("ip_ranges")
if !found {
t.Fatalf("expected aggregation to be found; got: %v", found)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_collapse_builder.go b/vendor/gopkg.in/olivere/elastic.v5/search_collapse_builder.go
index 48ed17acb..b3c628ba3 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_collapse_builder.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_collapse_builder.go
@@ -5,7 +5,7 @@
package elastic
// CollapseBuilder enables field collapsing on a search request.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.3/search-request-collapse.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-collapse.html
// for details.
type CollapseBuilder struct {
field string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_bool.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_bool.go
index 8ae223834..a1ff17596 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_bool.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_bool.go
@@ -9,7 +9,7 @@ import "fmt"
// A bool query matches documents matching boolean
// combinations of other queries.
// For more details, see:
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-bool-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-bool-query.html
type BoolQuery struct {
Query
mustClauses []Query
@@ -17,7 +17,6 @@ type BoolQuery struct {
filterClauses []Query
shouldClauses []Query
boost *float64
- disableCoord *bool
minimumShouldMatch string
adjustPureNegative *bool
queryName string
@@ -58,11 +57,6 @@ func (q *BoolQuery) Boost(boost float64) *BoolQuery {
return q
}
-func (q *BoolQuery) DisableCoord(disableCoord bool) *BoolQuery {
- q.disableCoord = &disableCoord
- return q
-}
-
func (q *BoolQuery) MinimumShouldMatch(minimumShouldMatch string) *BoolQuery {
q.minimumShouldMatch = minimumShouldMatch
return q
@@ -106,7 +100,7 @@ func (q *BoolQuery) Source() (interface{}, error) {
// "term" : { "tag" : "elasticsearch" }
// }
// ],
- // "minimum_number_should_match" : 1,
+ // "minimum_should_match" : 1,
// "boost" : 1.0
// }
// }
@@ -195,9 +189,6 @@ func (q *BoolQuery) Source() (interface{}, error) {
if q.boost != nil {
boolClause["boost"] = *q.boost
}
- if q.disableCoord != nil {
- boolClause["disable_coord"] = *q.disableCoord
- }
if q.minimumShouldMatch != "" {
boolClause["minimum_should_match"] = q.minimumShouldMatch
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_bool_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_bool_test.go
index 1eb2038fd..cdcc38de1 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_bool_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_bool_test.go
@@ -16,7 +16,6 @@ func TestBoolQuery(t *testing.T) {
q = q.Filter(NewTermQuery("account", "1"))
q = q.Should(NewTermQuery("tag", "sometag"), NewTermQuery("tag", "sometagtag"))
q = q.Boost(10)
- q = q.DisableCoord(true)
q = q.QueryName("Test")
src, err := q.Source()
if err != nil {
@@ -27,7 +26,7 @@ func TestBoolQuery(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"bool":{"_name":"Test","boost":10,"disable_coord":true,"filter":{"term":{"account":"1"}},"must":{"term":{"tag":"wow"}},"must_not":{"range":{"age":{"from":10,"include_lower":true,"include_upper":true,"to":20}}},"should":[{"term":{"tag":"sometag"}},{"term":{"tag":"sometagtag"}}]}}`
+ expected := `{"bool":{"_name":"Test","boost":10,"filter":{"term":{"account":"1"}},"must":{"term":{"tag":"wow"}},"must_not":{"range":{"age":{"from":10,"include_lower":true,"include_upper":true,"to":20}}},"should":[{"term":{"tag":"sometag"}},{"term":{"tag":"sometagtag"}}]}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_boosting.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_boosting.go
index 3a31237c9..0060a30a8 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_boosting.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_boosting.go
@@ -7,7 +7,7 @@ package elastic
// A boosting query can be used to effectively
// demote results that match a given query.
// For more details, see:
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-boosting-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-boosting-query.html
type BoostingQuery struct {
Query
positiveClause Query
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_common_terms.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_common_terms.go
index a1a37b37c..93a03de54 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_common_terms.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_common_terms.go
@@ -8,7 +8,7 @@ package elastic
// which improves the precision and recall of search results
// (by taking stopwords into account), without sacrificing performance.
// For more details, see:
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-common-terms-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-common-terms-query.html
type CommonTermsQuery struct {
Query
name string
@@ -22,7 +22,6 @@ type CommonTermsQuery struct {
lowFreqMinimumShouldMatch string
analyzer string
boost *float64
- disableCoord *bool
queryName string
}
@@ -76,11 +75,6 @@ func (q *CommonTermsQuery) Boost(boost float64) *CommonTermsQuery {
return q
}
-func (q *CommonTermsQuery) DisableCoord(disableCoord bool) *CommonTermsQuery {
- q.disableCoord = &disableCoord
- return q
-}
-
func (q *CommonTermsQuery) QueryName(queryName string) *CommonTermsQuery {
q.queryName = queryName
return q
@@ -132,9 +126,6 @@ func (q *CommonTermsQuery) Source() (interface{}, error) {
if q.analyzer != "" {
query["analyzer"] = q.analyzer
}
- if q.disableCoord != nil {
- query["disable_coord"] = *q.disableCoord
- }
if q.boost != nil {
query["boost"] = *q.boost
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_common_terms_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_common_terms_test.go
index be5a381c7..e841e7731 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_common_terms_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_common_terms_test.go
@@ -36,17 +36,17 @@ func TestSearchQueriesCommonTermsQuery(t *testing.T) {
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_constant_score.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_constant_score.go
index 3ba879958..285d91817 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_constant_score.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_constant_score.go
@@ -8,7 +8,7 @@ package elastic
// a constant score equal to the query boost for every document in the filter.
//
// For more details, see:
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-constant-score-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-constant-score-query.html
type ConstantScoreQuery struct {
filter Query
boost *float64
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_dis_max.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_dis_max.go
index af9a40d37..7a4f53a97 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_dis_max.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_dis_max.go
@@ -10,7 +10,7 @@ package elastic
// increment for any additional matching subqueries.
//
// For more details, see:
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-dis-max-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-dis-max-query.html
type DisMaxQuery struct {
queries []Query
boost *float64
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_exists.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_exists.go
index 6c2ebd369..ac7378bad 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_exists.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_exists.go
@@ -8,7 +8,7 @@ package elastic
// has a value in them.
//
// For more details, see:
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-exists-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-exists-query.html
type ExistsQuery struct {
name string
queryName string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq.go
index ec34302f8..4cabd9bd9 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq.go
@@ -10,7 +10,7 @@ package elastic
// to compute the score on a filtered set of documents.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html
type FunctionScoreQuery struct {
query Query
filter Query
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq_score_funcs.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq_score_funcs.go
index 622b645bb..84cc52de9 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq_score_funcs.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq_score_funcs.go
@@ -18,7 +18,7 @@ type ScoreFunction interface {
// -- Exponential Decay --
// ExponentialDecayFunction builds an exponential decay score function.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html
// for details.
type ExponentialDecayFunction struct {
fieldName string
@@ -75,7 +75,7 @@ func (fn *ExponentialDecayFunction) Offset(offset interface{}) *ExponentialDecay
}
// Weight adjusts the score of the score function.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html#_using_function_score
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_using_function_score
// for details.
func (fn *ExponentialDecayFunction) Weight(weight float64) *ExponentialDecayFunction {
fn.weight = &weight
@@ -120,7 +120,7 @@ func (fn *ExponentialDecayFunction) Source() (interface{}, error) {
// -- Gauss Decay --
// GaussDecayFunction builds a gauss decay score function.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html
// for details.
type GaussDecayFunction struct {
fieldName string
@@ -177,7 +177,7 @@ func (fn *GaussDecayFunction) Offset(offset interface{}) *GaussDecayFunction {
}
// Weight adjusts the score of the score function.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html#_using_function_score
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_using_function_score
// for details.
func (fn *GaussDecayFunction) Weight(weight float64) *GaussDecayFunction {
fn.weight = &weight
@@ -223,7 +223,7 @@ func (fn *GaussDecayFunction) Source() (interface{}, error) {
// -- Linear Decay --
// LinearDecayFunction builds a linear decay score function.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html
// for details.
type LinearDecayFunction struct {
fieldName string
@@ -280,7 +280,7 @@ func (fn *LinearDecayFunction) Offset(offset interface{}) *LinearDecayFunction {
}
// Weight adjusts the score of the score function.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html#_using_function_score
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_using_function_score
// for details.
func (fn *LinearDecayFunction) Weight(weight float64) *LinearDecayFunction {
fn.weight = &weight
@@ -336,7 +336,7 @@ func (fn *LinearDecayFunction) Source() (interface{}, error) {
// compute or influence the score of documents that match with the inner
// query or filter.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html#_script_score
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_script_score
// for details.
type ScriptFunction struct {
script *Script
@@ -363,7 +363,7 @@ func (fn *ScriptFunction) Script(script *Script) *ScriptFunction {
}
// Weight adjusts the score of the score function.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html#_using_function_score
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_using_function_score
// for details.
func (fn *ScriptFunction) Weight(weight float64) *ScriptFunction {
fn.weight = &weight
@@ -394,7 +394,7 @@ func (fn *ScriptFunction) Source() (interface{}, error) {
// FieldValueFactorFunction is a function score function that allows you
// to use a field from a document to influence the score.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html#_field_value_factor.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_field_value_factor.
type FieldValueFactorFunction struct {
field string
factor *float64
@@ -435,7 +435,7 @@ func (fn *FieldValueFactorFunction) Modifier(modifier string) *FieldValueFactorF
}
// Weight adjusts the score of the score function.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html#_using_function_score
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_using_function_score
// for details.
func (fn *FieldValueFactorFunction) Weight(weight float64) *FieldValueFactorFunction {
fn.weight = &weight
@@ -477,7 +477,7 @@ func (fn *FieldValueFactorFunction) Source() (interface{}, error) {
// WeightFactorFunction builds a weight factor function that multiplies
// the weight to the score.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html#_weight
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_weight
// for details.
type WeightFactorFunction struct {
weight float64
@@ -495,7 +495,7 @@ func (fn *WeightFactorFunction) Name() string {
}
// Weight adjusts the score of the score function.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html#_using_function_score
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_using_function_score
// for details.
func (fn *WeightFactorFunction) Weight(weight float64) *WeightFactorFunction {
fn.weight = weight
@@ -517,7 +517,7 @@ func (fn *WeightFactorFunction) Source() (interface{}, error) {
// -- Random --
// RandomFunction builds a random score function.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html#_random
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_random
// for details.
type RandomFunction struct {
seed interface{}
@@ -543,7 +543,7 @@ func (fn *RandomFunction) Seed(seed interface{}) *RandomFunction {
}
// Weight adjusts the score of the score function.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html#_using_function_score
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-function-score-query.html#_using_function_score
// for details.
func (fn *RandomFunction) Weight(weight float64) *RandomFunction {
fn.weight = &weight
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq_test.go
index a8e7430ce..256752d18 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq_test.go
@@ -121,7 +121,7 @@ func TestFieldValueFactorWithMultipleScoreFuncsAndWeights(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"function_score":{"boost":2,"boost_mode":"multiply","functions":[{"field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"weight":2.5},{"script_score":{"script":"_score * doc['my_numeric_field'].value"},"weight":1.25},{"weight":0.5}],"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max"}}`
+ expected := `{"function_score":{"boost":2,"boost_mode":"multiply","functions":[{"field_value_factor":{"factor":2,"field":"income","modifier":"sqrt"},"weight":2.5},{"script_score":{"script":{"source":"_score * doc['my_numeric_field'].value"}},"weight":1.25},{"weight":0.5}],"max_boost":12,"query":{"term":{"name.last":"banon"}},"score_mode":"max"}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_fuzzy.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fuzzy.go
index b98f9c7f0..02b6c52c2 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_fuzzy.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fuzzy.go
@@ -8,7 +8,7 @@ package elastic
// string fields, and a +/- margin on numeric and date fields.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-fuzzy-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-fuzzy-query.html
type FuzzyQuery struct {
name string
value interface{}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_bounding_box.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_bounding_box.go
index e53340e64..0418620d8 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_bounding_box.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_bounding_box.go
@@ -10,7 +10,7 @@ import "errors"
// a bounding box.
//
// For more details, see:
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-geo-bounding-box-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-geo-bounding-box-query.html
type GeoBoundingBoxQuery struct {
name string
top *float64
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_distance.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_distance.go
index a10bd5e3c..00e62725f 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_distance.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_distance.go
@@ -8,7 +8,7 @@ package elastic
// within a specific distance from a geo point.
//
// For more details, see:
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-geo-distance-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-geo-distance-query.html
type GeoDistanceQuery struct {
name string
distance string
@@ -16,7 +16,6 @@ type GeoDistanceQuery struct {
lon float64
geohash string
distanceType string
- optimizeBbox string
queryName string
}
@@ -62,11 +61,6 @@ func (q *GeoDistanceQuery) DistanceType(distanceType string) *GeoDistanceQuery {
return q
}
-func (q *GeoDistanceQuery) OptimizeBbox(optimizeBbox string) *GeoDistanceQuery {
- q.optimizeBbox = optimizeBbox
- return q
-}
-
func (q *GeoDistanceQuery) QueryName(queryName string) *GeoDistanceQuery {
q.queryName = queryName
return q
@@ -103,9 +97,6 @@ func (q *GeoDistanceQuery) Source() (interface{}, error) {
if q.distanceType != "" {
params["distance_type"] = q.distanceType
}
- if q.optimizeBbox != "" {
- params["optimize_bbox"] = q.optimizeBbox
- }
if q.queryName != "" {
params["_name"] = q.queryName
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_distance_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_distance_test.go
index 7b91d94e8..dd169575a 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_distance_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_distance_test.go
@@ -15,7 +15,6 @@ func TestGeoDistanceQuery(t *testing.T) {
q = q.Lon(-70)
q = q.Distance("200km")
q = q.DistanceType("plane")
- q = q.OptimizeBbox("memory")
src, err := q.Source()
if err != nil {
t.Fatal(err)
@@ -25,7 +24,7 @@ func TestGeoDistanceQuery(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"geo_distance":{"distance":"200km","distance_type":"plane","optimize_bbox":"memory","pin.location":{"lat":40,"lon":-70}}}`
+ expected := `{"geo_distance":{"distance":"200km","distance_type":"plane","pin.location":{"lat":40,"lon":-70}}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_polygon.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_polygon.go
index 1faaf24e0..7678c3f3b 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_polygon.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_polygon.go
@@ -7,7 +7,7 @@ package elastic
// GeoPolygonQuery allows to include hits that only fall within a polygon of points.
//
// For more details, see:
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-geo-polygon-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-geo-polygon-query.html
type GeoPolygonQuery struct {
name string
points []*GeoPoint
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_child.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_child.go
index 27cea7ad6..41e7429c4 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_child.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_child.go
@@ -8,7 +8,7 @@ package elastic
// in parent documents that have child docs matching the query.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-has-child-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-has-child-query.html
type HasChildQuery struct {
query Query
childType string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_parent.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_parent.go
index 7df110cd2..5e1b650af 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_parent.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_parent.go
@@ -11,7 +11,7 @@ package elastic
// same manner as has_child query.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-has-parent-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-has-parent-query.html
type HasParentQuery struct {
query Query
parentType string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_ids.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_ids.go
index 42cc65672..e067aebbe 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_ids.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_ids.go
@@ -8,7 +8,7 @@ package elastic
// Note, this query uses the _uid field.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-ids-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-ids-query.html
type IdsQuery struct {
types []string
values []string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_indices.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_indices.go
deleted file mode 100644
index ed5ec9d84..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_indices.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// IndicesQuery can be used when executed across multiple indices, allowing
-// to have a query that executes only when executed on an index that matches
-// a specific list of indices, and another query that executes when it is
-// executed on an index that does not match the listed indices.
-//
-// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-indices-query.html
-type IndicesQuery struct {
- query Query
- indices []string
- noMatchQueryType string
- noMatchQuery Query
- queryName string
-}
-
-// NewIndicesQuery creates and initializes a new indices query.
-func NewIndicesQuery(query Query, indices ...string) *IndicesQuery {
- return &IndicesQuery{
- query: query,
- indices: indices,
- }
-}
-
-// NoMatchQuery sets the query to use when it executes on an index that
-// does not match the indices provided.
-func (q *IndicesQuery) NoMatchQuery(query Query) *IndicesQuery {
- q.noMatchQuery = query
- return q
-}
-
-// NoMatchQueryType sets the no match query which can be either all or none.
-func (q *IndicesQuery) NoMatchQueryType(typ string) *IndicesQuery {
- q.noMatchQueryType = typ
- return q
-}
-
-// QueryName sets the query name for the filter.
-func (q *IndicesQuery) QueryName(queryName string) *IndicesQuery {
- q.queryName = queryName
- return q
-}
-
-// Source returns JSON for the function score query.
-func (q *IndicesQuery) Source() (interface{}, error) {
- // {
- // "indices" : {
- // "indices" : ["index1", "index2"],
- // "query" : {
- // "term" : { "tag" : "wow" }
- // },
- // "no_match_query" : {
- // "term" : { "tag" : "kow" }
- // }
- // }
- // }
-
- source := make(map[string]interface{})
- params := make(map[string]interface{})
- source["indices"] = params
-
- params["indices"] = q.indices
-
- src, err := q.query.Source()
- if err != nil {
- return nil, err
- }
- params["query"] = src
-
- if q.noMatchQuery != nil {
- src, err := q.noMatchQuery.Source()
- if err != nil {
- return nil, err
- }
- params["no_match_query"] = src
- } else if q.noMatchQueryType != "" {
- params["no_match_query"] = q.noMatchQueryType
- }
- if q.queryName != "" {
- params["_name"] = q.queryName
- }
-
- return source, nil
-}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_match.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_match.go
index 1f2f16f69..b38b12452 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_match.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_match.go
@@ -12,7 +12,7 @@ package elastic
// or use one of the shortcuts e.g. NewMatchPhraseQuery(...).
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-match-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-match-query.html
type MatchQuery struct {
name string
text interface{}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_all.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_all.go
index 5551eea30..3829c8af0 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_all.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_all.go
@@ -8,7 +8,7 @@ package elastic
// giving them all a _score of 1.0.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-match-all-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-match-all-query.html
type MatchAllQuery struct {
boost *float64
queryName string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_none.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_none.go
index 06d036e71..9afe16716 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_none.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_none.go
@@ -8,7 +8,7 @@ package elastic
// MatchAllQuery.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.6/query-dsl-match-all-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-match-all-query.html
type MatchNoneQuery struct {
queryName string
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_phrase.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_phrase.go
index fdded2e76..0e4c6327e 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_phrase.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_phrase.go
@@ -8,7 +8,7 @@ package elastic
// the analyzed text.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.6/query-dsl-match-query-phrase.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-match-query-phrase.html
type MatchPhraseQuery struct {
name string
value interface{}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_phrase_prefix.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_phrase_prefix.go
index 1eeba8af5..10a88668d 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_phrase_prefix.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_phrase_prefix.go
@@ -8,7 +8,7 @@ package elastic
// prefix matches on the last term in the text.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.6/query-dsl-match-query-phrase-prefix.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-match-query-phrase-prefix.html
type MatchPhrasePrefixQuery struct {
name string
value interface{}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_more_like_this.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_more_like_this.go
index 40c2d575d..5c71e291f 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_more_like_this.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_more_like_this.go
@@ -13,7 +13,7 @@ import "errors"
// how the terms should be selected and how the query is formed.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-mlt-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-mlt-query.html
type MoreLikeThisQuery struct {
fields []string
docs []*MoreLikeThisQueryItem
@@ -25,8 +25,8 @@ type MoreLikeThisQuery struct {
stopWords []string
minDocFreq *int
maxDocFreq *int
- minWordLen *int
- maxWordLen *int
+ minWordLength *int
+ maxWordLength *int
boostTerms *float64
boost *float64
analyzer string
@@ -147,15 +147,15 @@ func (q *MoreLikeThisQuery) MaxDocFreq(maxDocFreq int) *MoreLikeThisQuery {
// MinWordLength sets the minimum word length below which words will be
// ignored. It defaults to 0.
-func (q *MoreLikeThisQuery) MinWordLen(minWordLen int) *MoreLikeThisQuery {
- q.minWordLen = &minWordLen
+func (q *MoreLikeThisQuery) MinWordLength(minWordLength int) *MoreLikeThisQuery {
+ q.minWordLength = &minWordLength
return q
}
-// MaxWordLen sets the maximum word length above which words will be ignored.
+// MaxWordLength sets the maximum word length above which words will be ignored.
// Defaults to unbounded (0).
-func (q *MoreLikeThisQuery) MaxWordLen(maxWordLen int) *MoreLikeThisQuery {
- q.maxWordLen = &maxWordLen
+func (q *MoreLikeThisQuery) MaxWordLength(maxWordLength int) *MoreLikeThisQuery {
+ q.maxWordLength = &maxWordLength
return q
}
@@ -254,11 +254,11 @@ func (q *MoreLikeThisQuery) Source() (interface{}, error) {
if q.maxDocFreq != nil {
params["max_doc_freq"] = *q.maxDocFreq
}
- if q.minWordLen != nil {
- params["min_word_len"] = *q.minWordLen
+ if q.minWordLength != nil {
+ params["min_word_length"] = *q.minWordLength
}
- if q.maxWordLen != nil {
- params["max_word_len"] = *q.maxWordLen
+ if q.maxWordLength != nil {
+ params["max_word_length"] = *q.maxWordLength
}
if q.boostTerms != nil {
params["boost_terms"] = *q.boostTerms
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_more_like_this_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_more_like_this_test.go
index 6fc5b1c72..dcbbe74d1 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_more_like_this_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_more_like_this_test.go
@@ -57,17 +57,17 @@ func TestMoreLikeThisQuery(t *testing.T) {
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_multi_match.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_multi_match.go
index 9a149fed5..b6ff2107e 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_multi_match.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_multi_match.go
@@ -12,7 +12,7 @@ import (
// MultiMatchQuery builds on the MatchQuery to allow multi-field queries.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-multi-match-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-multi-match-query.html
type MultiMatchQuery struct {
text interface{}
fields []string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_nested.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_nested.go
index a95cc2b80..d0a342283 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_nested.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_nested.go
@@ -10,7 +10,7 @@ package elastic
// root parent doc (or parent nested mapping).
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-nested-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-nested-query.html
type NestedQuery struct {
query Query
path string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_parent_id.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_parent_id.go
index bd11cc291..c0b610f12 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_parent_id.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_parent_id.go
@@ -8,7 +8,7 @@ package elastic
// particular parent. Given the following mapping definition.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-parent-id-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-parent-id-query.html
type ParentIdQuery struct {
typ string
id string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_percolator.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_percolator.go
index 16f7611ed..a7605655b 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_percolator.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_percolator.go
@@ -9,10 +9,10 @@ import "errors"
// PercolatorQuery can be used to match queries stored in an index.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.x/query-dsl-percolate-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-percolate-query.html
type PercolatorQuery struct {
field string
- documentType string
+ documentType string // deprecated
document interface{}
indexedDocumentIndex string
indexedDocumentType string
@@ -32,6 +32,7 @@ func (q *PercolatorQuery) Field(field string) *PercolatorQuery {
return q
}
+// Deprecated: DocumentType is deprecated as of 6.0.
func (q *PercolatorQuery) DocumentType(typ string) *PercolatorQuery {
q.documentType = typ
return q
@@ -77,9 +78,6 @@ func (q *PercolatorQuery) Source() (interface{}, error) {
if len(q.field) == 0 {
return nil, errors.New("elastic: Field is required in PercolatorQuery")
}
- if len(q.documentType) == 0 {
- return nil, errors.New("elastic: DocumentType is required in PercolatorQuery")
- }
if q.document == nil {
return nil, errors.New("elastic: Document is required in PercolatorQuery")
}
@@ -91,7 +89,9 @@ func (q *PercolatorQuery) Source() (interface{}, error) {
params := make(map[string]interface{})
source["percolate"] = params
params["field"] = q.field
- params["document_type"] = q.documentType
+ if q.documentType != "" {
+ params["document_type"] = q.documentType
+ }
params["document"] = q.document
if len(q.indexedDocumentIndex) > 0 {
params["index"] = q.indexedDocumentIndex
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_percolator_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_percolator_test.go
index 8a22d4614..edc7be626 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_percolator_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_percolator_test.go
@@ -12,7 +12,6 @@ import (
func TestPercolatorQuery(t *testing.T) {
q := NewPercolatorQuery().
Field("query").
- DocumentType("doctype").
Document(map[string]interface{}{
"message": "Some message",
})
@@ -25,7 +24,7 @@ func TestPercolatorQuery(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"percolate":{"document":{"message":"Some message"},"document_type":"doctype","field":"query"}}`
+ expected := `{"percolate":{"document":{"message":"Some message"},"field":"query"}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
@@ -34,12 +33,10 @@ func TestPercolatorQuery(t *testing.T) {
func TestPercolatorQueryWithDetails(t *testing.T) {
q := NewPercolatorQuery().
Field("query").
- DocumentType("doctype").
Document(map[string]interface{}{
"message": "Some message",
}).
IndexedDocumentIndex("index").
- IndexedDocumentType("type").
IndexedDocumentId("1").
IndexedDocumentRouting("route").
IndexedDocumentPreference("one").
@@ -53,7 +50,7 @@ func TestPercolatorQueryWithDetails(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"percolate":{"document":{"message":"Some message"},"document_type":"doctype","field":"query","id":"1","index":"index","preference":"one","routing":"route","type":"type","version":1}}`
+ expected := `{"percolate":{"document":{"message":"Some message"},"field":"query","id":"1","index":"index","preference":"one","routing":"route","version":1}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_prefix.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_prefix.go
index 0d5077553..075bcc7ba 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_prefix.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_prefix.go
@@ -8,7 +8,7 @@ package elastic
// with a specified prefix (not analyzed).
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-prefix-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-prefix-query.html
type PrefixQuery struct {
name string
prefix string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_prefix_example_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_prefix_example_test.go
index 1a421784e..73950f1f3 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_prefix_example_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_prefix_example_test.go
@@ -7,7 +7,7 @@ package elastic_test
import (
"context"
- "gopkg.in/olivere/elastic.v5"
+ "github.com/olivere/elastic"
)
func ExamplePrefixQuery() {
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_query_string.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_query_string.go
index 427e54c5b..a52c8b1a5 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_query_string.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_query_string.go
@@ -11,37 +11,36 @@ import (
// QueryStringQuery uses the query parser in order to parse its content.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-query-string-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-query-string-query.html
type QueryStringQuery struct {
- queryString string
- defaultField string
- defaultOperator string
- analyzer string
- quoteAnalyzer string
- quoteFieldSuffix string
- autoGeneratePhraseQueries *bool
- allowLeadingWildcard *bool
- lowercaseExpandedTerms *bool
- enablePositionIncrements *bool
- analyzeWildcard *bool
- locale string
- boost *float64
- fuzziness string
- fuzzyPrefixLength *int
- fuzzyMaxExpansions *int
- fuzzyRewrite string
- phraseSlop *int
- fields []string
- fieldBoosts map[string]*float64
- useDisMax *bool
- tieBreaker *float64
- rewrite string
- minimumShouldMatch string
- lenient *bool
- queryName string
- timeZone string
- maxDeterminizedStates *int
- escape *bool
+ queryString string
+ defaultField string
+ defaultOperator string
+ analyzer string
+ quoteAnalyzer string
+ quoteFieldSuffix string
+ allowLeadingWildcard *bool
+ lowercaseExpandedTerms *bool // Deprecated: Decision is now made by the analyzer
+ enablePositionIncrements *bool
+ analyzeWildcard *bool
+ locale string // Deprecated: Decision is now made by the analyzer
+ boost *float64
+ fuzziness string
+ fuzzyPrefixLength *int
+ fuzzyMaxExpansions *int
+ fuzzyRewrite string
+ phraseSlop *int
+ fields []string
+ fieldBoosts map[string]*float64
+ tieBreaker *float64
+ rewrite string
+ minimumShouldMatch string
+ lenient *bool
+ queryName string
+ timeZone string
+ maxDeterminizedStates *int
+ escape *bool
+ typ string
}
// NewQueryStringQuery creates and initializes a new QueryStringQuery.
@@ -67,6 +66,13 @@ func (q *QueryStringQuery) Field(field string) *QueryStringQuery {
return q
}
+// Type sets how multiple fields should be combined to build textual part queries,
+// e.g. "best_fields".
+func (q *QueryStringQuery) Type(typ string) *QueryStringQuery {
+ q.typ = typ
+ return q
+}
+
// FieldWithBoost adds a field to run the query string against with a specific boost.
func (q *QueryStringQuery) FieldWithBoost(field string, boost float64) *QueryStringQuery {
q.fields = append(q.fields, field)
@@ -74,14 +80,6 @@ func (q *QueryStringQuery) FieldWithBoost(field string, boost float64) *QueryStr
return q
}
-// UseDisMax specifies whether to combine queries using dis max or boolean
-// query when more zhan one field is used with the query string. Defaults
-// to dismax (true).
-func (q *QueryStringQuery) UseDisMax(useDisMax bool) *QueryStringQuery {
- q.useDisMax = &useDisMax
- return q
-}
-
// TieBreaker is used when more than one field is used with the query string,
// and combined queries are using dismax.
func (q *QueryStringQuery) TieBreaker(tieBreaker float64) *QueryStringQuery {
@@ -119,15 +117,6 @@ func (q *QueryStringQuery) QuoteAnalyzer(quoteAnalyzer string) *QueryStringQuery
return q
}
-// AutoGeneratePhraseQueries indicates whether or not phrase queries will
-// be automatically generated when the analyzer returns more then one term
-// from whitespace delimited text. Set to false if phrase queries should only
-// be generated when surrounded by double quotes.
-func (q *QueryStringQuery) AutoGeneratePhraseQueries(autoGeneratePhraseQueries bool) *QueryStringQuery {
- q.autoGeneratePhraseQueries = &autoGeneratePhraseQueries
- return q
-}
-
// MaxDeterminizedState protects against too-difficult regular expression queries.
func (q *QueryStringQuery) MaxDeterminizedState(maxDeterminizedStates int) *QueryStringQuery {
q.maxDeterminizedStates = &maxDeterminizedStates
@@ -143,6 +132,8 @@ func (q *QueryStringQuery) AllowLeadingWildcard(allowLeadingWildcard bool) *Quer
// LowercaseExpandedTerms indicates whether terms of wildcard, prefix, fuzzy
// and range queries are automatically lower-cased or not. Default is true.
+//
+// Deprecated: Decision is now made by the analyzer.
func (q *QueryStringQuery) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *QueryStringQuery {
q.lowercaseExpandedTerms = &lowercaseExpandedTerms
return q
@@ -233,6 +224,9 @@ func (q *QueryStringQuery) QueryName(queryName string) *QueryStringQuery {
return q
}
+// Locale specifies the locale to be used for string conversions.
+//
+// Deprecated: Decision is now made by the analyzer.
func (q *QueryStringQuery) Locale(locale string) *QueryStringQuery {
q.locale = locale
return q
@@ -282,9 +276,6 @@ func (q *QueryStringQuery) Source() (interface{}, error) {
if q.tieBreaker != nil {
query["tie_breaker"] = *q.tieBreaker
}
- if q.useDisMax != nil {
- query["use_dis_max"] = *q.useDisMax
- }
if q.defaultOperator != "" {
query["default_operator"] = q.defaultOperator
}
@@ -294,9 +285,6 @@ func (q *QueryStringQuery) Source() (interface{}, error) {
if q.quoteAnalyzer != "" {
query["quote_analyzer"] = q.quoteAnalyzer
}
- if q.autoGeneratePhraseQueries != nil {
- query["auto_generate_phrase_queries"] = *q.autoGeneratePhraseQueries
- }
if q.maxDeterminizedStates != nil {
query["max_determinized_states"] = *q.maxDeterminizedStates
}
@@ -354,6 +342,9 @@ func (q *QueryStringQuery) Source() (interface{}, error) {
if q.escape != nil {
query["escape"] = *q.escape
}
+ if q.typ != "" {
+ query["type"] = q.typ
+ }
return source, nil
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_range.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_range.go
index 54303fb4a..1b92dee23 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_range.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_range.go
@@ -7,7 +7,7 @@ package elastic
// RangeQuery matches documents with fields that have terms within a certain range.
//
// For details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-range-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-range-query.html
type RangeQuery struct {
name string
from interface{}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_regexp.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_regexp.go
index 636e4baf9..a08b533cb 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_regexp.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_regexp.go
@@ -7,7 +7,7 @@ package elastic
// RegexpQuery allows you to use regular expression term queries.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-regexp-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-regexp-query.html
type RegexpQuery struct {
name string
regexp string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_script.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_script.go
index 664555b3e..d430f4c8f 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_script.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_script.go
@@ -9,7 +9,7 @@ import "errors"
// ScriptQuery allows to define scripts as filters.
//
// For details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-script-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-script-query.html
type ScriptQuery struct {
script *Script
queryName string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_script_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_script_test.go
index 8bf9f8a11..66ec106d5 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_script_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_script_test.go
@@ -20,7 +20,7 @@ func TestScriptQuery(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"script":{"script":"doc['num1'.value \u003e 1"}}`
+ expected := `{"script":{"script":{"source":"doc['num1'.value \u003e 1"}}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
@@ -38,7 +38,7 @@ func TestScriptQueryWithParams(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"script":{"_name":"MyQueryName","script":"doc['num1'.value \u003e 1"}}`
+ expected := `{"script":{"_name":"MyQueryName","script":{"source":"doc['num1'.value \u003e 1"}}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_simple_query_string.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_simple_query_string.go
index 764fa0a20..462ea5533 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_simple_query_string.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_simple_query_string.go
@@ -15,7 +15,7 @@ import (
// and discards invalid parts of the query.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-simple-query-string-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-simple-query-string-query.html
type SimpleQueryStringQuery struct {
queryText string
analyzer string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_simple_query_string_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_simple_query_string_test.go
index cef7c5f51..ea4a341ec 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_simple_query_string_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_simple_query_string_test.go
@@ -36,17 +36,17 @@ func TestSimpleQueryStringQueryExec(t *testing.T) {
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_slice.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_slice.go
index 0ebf88009..e1b1db928 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_slice.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_slice.go
@@ -6,7 +6,7 @@ package elastic
// SliceQuery allows to partition the documents into several slices.
// It is used e.g. to slice scroll operations in Elasticsearch 5.0 or later.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-scroll.html#sliced-scroll
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-scroll.html#sliced-scroll
// for details.
type SliceQuery struct {
field string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_term.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_term.go
index 051f6dee3..9a445e0ec 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_term.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_term.go
@@ -8,7 +8,7 @@ package elastic
// in the inverted index.
//
// For details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-term-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-term-query.html
type TermQuery struct {
name string
value interface{}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_terms.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_terms.go
index 794c1f31c..3649576dc 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_terms.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_terms.go
@@ -8,7 +8,7 @@ package elastic
// of the provided terms (not analyzed).
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-terms-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-terms-query.html
type TermsQuery struct {
name string
values []interface{}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_type.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_type.go
index 70ace4541..e7aef30df 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_type.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_type.go
@@ -7,7 +7,7 @@ package elastic
// TypeQuery filters documents matching the provided document / mapping type.
//
// For details, see:
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-type-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-type-query.html
type TypeQuery struct {
typ string
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_wildcard.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_wildcard.go
index 35f481542..ea8a0901c 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_wildcard.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_wildcard.go
@@ -13,7 +13,7 @@ package elastic
// The wildcard query maps to Lucene WildcardQuery.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-wildcard-query.html
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-wildcard-query.html
type WildcardQuery struct {
name string
wildcard string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_wildcard_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_wildcard_test.go
index 658c513cc..b41c8ab7b 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_queries_wildcard_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_wildcard_test.go
@@ -9,7 +9,7 @@ import (
"encoding/json"
"testing"
- "gopkg.in/olivere/elastic.v5"
+ "github.com/olivere/elastic"
)
func ExampleWildcardQuery() {
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_request.go b/vendor/gopkg.in/olivere/elastic.v5/search_request.go
index 03513085f..6f40ff028 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_request.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_request.go
@@ -146,7 +146,7 @@ func (r *SearchRequest) Source(source interface{}) *SearchRequest {
// header is used e.g. by MultiSearch to get information about the search header
// of one SearchRequest.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-multi-search.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-multi-search.html
func (r *SearchRequest) header() interface{} {
h := make(map[string]interface{})
if r.searchType != "" {
@@ -199,7 +199,7 @@ func (r *SearchRequest) header() interface{} {
//
// Body is used e.g. by MultiSearch to get information about the search body
// of one SearchRequest.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.6/search-multi-search.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-multi-search.html
func (r *SearchRequest) Body() interface{} {
return r.source
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_source.go b/vendor/gopkg.in/olivere/elastic.v5/search_source.go
index 6a8efd229..77b1c5093 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_source.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_source.go
@@ -77,7 +77,7 @@ func (s *SearchSource) PostFilter(postFilter Query) *SearchSource {
// Slice allows partitioning the documents in multiple slices.
// It is e.g. used to slice a scroll operation, supported in
// Elasticsearch 5.0 or later.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-scroll.html#sliced-scroll
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-scroll.html#sliced-scroll
// for details.
func (s *SearchSource) Slice(sliceQuery Query) *SearchSource {
s.sliceQuery = sliceQuery
@@ -168,7 +168,7 @@ func (s *SearchSource) TrackScores(trackScores bool) *SearchSource {
// SearchAfter allows a different form of pagination by using a live cursor,
// using the results of the previous page to help the retrieval of the next.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-search-after.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-search-after.html
func (s *SearchSource) SearchAfter(sortValues ...interface{}) *SearchSource {
s.searchAfterSortValues = append(s.searchAfterSortValues, sortValues...)
return s
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_source_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_source_test.go
index 49e52f660..a78991bf0 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_source_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_source_test.go
@@ -132,7 +132,7 @@ func TestSearchSourceScriptFields(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"query":{"match_all":{}},"script_fields":{"test1":{"script":"doc['my_field_name'].value * 2"},"test2":{"script":{"inline":"doc['my_field_name'].value * factor","params":{"factor":3.1415927}}}}}`
+ expected := `{"query":{"match_all":{}},"script_fields":{"test1":{"script":{"source":"doc['my_field_name'].value * 2"}},"test2":{"script":{"params":{"factor":3.1415927},"source":"doc['my_field_name'].value * factor"}}}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
@@ -232,7 +232,7 @@ func TestSearchSourceMixDifferentSorters(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"query":{"match_all":{}},"sort":[{"a":{"order":"desc"}},{"b":{"order":"asc"}},{"_script":{"order":"asc","script":{"inline":"doc['field_name'].value * factor","params":{"factor":1.1}},"type":"number"}}]}`
+ expected := `{"query":{"match_all":{}},"sort":[{"a":{"order":"desc"}},{"b":{"order":"asc"}},{"_script":{"order":"asc","script":{"params":{"factor":1.1},"source":"doc['field_name'].value * factor"},"type":"number"}}]}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_suggester_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_suggester_test.go
index a555e3462..33bdc9275 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_suggester_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_suggester_test.go
@@ -17,17 +17,17 @@ func TestTermSuggester(t *testing.T) {
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -92,17 +92,17 @@ func TestPhraseSuggester(t *testing.T) {
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -150,6 +150,15 @@ func TestPhraseSuggester(t *testing.T) {
if mySuggestion.Length != 7 {
t.Errorf("expected Length = %d; got %d", 7, mySuggestion.Length)
}
+ if want, have := 1, len(mySuggestion.Options); want != have {
+ t.Errorf("expected len(options) = %d; got %d", want, have)
+ }
+ if want, have := "golang", mySuggestion.Options[0].Text; want != have {
+ t.Errorf("expected options[0].Text = %q; got %q", want, have)
+ }
+ if score := mySuggestion.Options[0].Score; score <= 0.0 {
+ t.Errorf("expected options[0].Score > 0.0; got %v", score)
+ }
}
func TestCompletionSuggester(t *testing.T) {
@@ -172,17 +181,17 @@ func TestCompletionSuggester(t *testing.T) {
}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -278,12 +287,12 @@ func TestContextSuggester(t *testing.T) {
`
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyString(tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyString(tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyString(tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyString(tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_terms_lookup.go b/vendor/gopkg.in/olivere/elastic.v5/search_terms_lookup.go
index e59e15c12..9a2456bdd 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_terms_lookup.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_terms_lookup.go
@@ -7,7 +7,7 @@ package elastic
// TermsLookup encapsulates the parameters needed to fetch terms.
//
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.3/query-dsl-terms-query.html#query-dsl-terms-lookup.
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/query-dsl-terms-query.html#query-dsl-terms-lookup.
type TermsLookup struct {
index string
typ string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_test.go b/vendor/gopkg.in/olivere/elastic.v5/search_test.go
index 96346b8b0..097c26525 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/search_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/search_test.go
@@ -29,10 +29,10 @@ func TestSearchMatchAll(t *testing.T) {
if searchResult.Hits == nil {
t.Errorf("expected SearchResult.Hits != nil; got nil")
}
- if got, want := searchResult.Hits.TotalHits, int64(12); got != want {
+ if got, want := searchResult.Hits.TotalHits, int64(3); got != want {
t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", want, got)
}
- if got, want := len(searchResult.Hits.Hits), 12; got != want {
+ if got, want := len(searchResult.Hits.Hits), 3; got != want {
t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, got)
}
@@ -66,10 +66,10 @@ func TestSearchMatchAllWithRequestCacheDisabled(t *testing.T) {
if searchResult.Hits == nil {
t.Errorf("expected SearchResult.Hits != nil; got nil")
}
- if got, want := searchResult.Hits.TotalHits, int64(12); got != want {
+ if got, want := searchResult.Hits.TotalHits, int64(3); got != want {
t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", want, got)
}
- if got, want := len(searchResult.Hits.Hits), 12; got != want {
+ if got, want := len(searchResult.Hits.Hits), 3; got != want {
t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, got)
}
}
@@ -195,6 +195,51 @@ func TestSearchResultEach(t *testing.T) {
}
}
+func TestSearchResultEachNoSource(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocsNoSource(t)
+
+ all := NewMatchAllQuery()
+ searchResult, err := client.Search().Index(testNoSourceIndexName).Query(all).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Iterate over non-ptr type
+ var aTweet tweet
+ count := 0
+ for _, item := range searchResult.Each(reflect.TypeOf(aTweet)) {
+ count++
+ tw, ok := item.(tweet)
+ if !ok {
+ t.Fatalf("expected hit to be serialized as tweet; got: %v", reflect.ValueOf(item))
+ }
+
+ if tw.User != "" {
+ t.Fatalf("expected no _source hit to be empty tweet; got: %v", reflect.ValueOf(item))
+ }
+ }
+ if count != 2 {
+ t.Errorf("expected to find 2 hits; got: %d", count)
+ }
+
+ // Iterate over ptr-type
+ count = 0
+ var aTweetPtr *tweet
+ for _, item := range searchResult.Each(reflect.TypeOf(aTweetPtr)) {
+ count++
+ tw, ok := item.(*tweet)
+ if !ok {
+ t.Fatalf("expected hit to be serialized as tweet; got: %v", reflect.ValueOf(item))
+ }
+ if tw != nil {
+ t.Fatal("expected hit to be nil")
+ }
+ }
+ if count != 2 {
+ t.Errorf("expected to find 2 hits; got: %d", count)
+ }
+}
+
func TestSearchSorting(t *testing.T) {
client := setupTestClientAndCreateIndex(t)
@@ -215,17 +260,17 @@ func TestSearchSorting(t *testing.T) {
}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -288,17 +333,17 @@ func TestSearchSortingBySorters(t *testing.T) {
}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -350,17 +395,17 @@ func TestSearchSpecificFields(t *testing.T) {
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -442,17 +487,17 @@ func TestSearchExplain(t *testing.T) {
}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -520,17 +565,17 @@ func TestSearchSource(t *testing.T) {
}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -583,17 +628,17 @@ func TestSearchRawString(t *testing.T) {
}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -639,17 +684,17 @@ func TestSearchSearchSource(t *testing.T) {
}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -684,299 +729,249 @@ func TestSearchSearchSource(t *testing.T) {
}
func TestSearchInnerHitsOnHasChild(t *testing.T) {
+ // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0)))
client := setupTestClientAndCreateIndex(t)
- // Check for valid ES version
- esversion, err := client.ElasticsearchVersion(DefaultURL)
+ ctx := context.Background()
+
+ // Create join index
+ createIndex, err := client.CreateIndex(testJoinIndex).Body(testJoinMapping).Do(ctx)
if err != nil {
t.Fatal(err)
}
- if esversion < "1.5.0" {
- t.Skip("InnerHits feature is only available for Elasticsearch 1.5+")
- return
+ if createIndex == nil {
+ t.Errorf("expected result to be != nil; got: %v", createIndex)
}
- tweet1 := tweet{
- User: "olivere", Retweets: 108,
- Message: "Welcome to Golang and Elasticsearch.",
- Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
- }
- tweet2 := tweet{
- User: "olivere", Retweets: 0,
- Message: "Another unrelated topic.",
- Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
- }
- comment2a := comment{User: "sandrae", Comment: "What does that even mean?"}
- tweet3 := tweet{
- User: "sandrae", Retweets: 12,
- Message: "Cycling is fun.",
- Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
+ // Add documents
+ // See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/parent-join.html for example code.
+ doc1 := joinDoc{
+ Message: "This is a question",
+ JoinField: &joinField{Name: "question"},
}
- comment3a := comment{User: "nico", Comment: "You bet."}
- comment3b := comment{User: "olivere", Comment: "It sure is."}
-
- // Add all documents
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("t1").BodyJson(&tweet1).Do(context.TODO())
+ _, err = client.Index().Index(testJoinIndex).Type("doc").Id("1").BodyJson(&doc1).Refresh("true").Do(ctx)
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("t2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
+ doc2 := joinDoc{
+ Message: "This is another question",
+ JoinField: "question",
}
- _, err = client.Index().Index(testIndexName).Type("comment").Id("c2a").Parent("t2").BodyJson(&comment2a).Do(context.TODO())
+ _, err = client.Index().Index(testJoinIndex).Type("doc").Id("2").BodyJson(&doc2).Refresh("true").Do(ctx)
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("t3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
+ doc3 := joinDoc{
+ Message: "This is an answer",
+ JoinField: &joinField{
+ Name: "answer",
+ Parent: "1",
+ },
}
- _, err = client.Index().Index(testIndexName).Type("comment").Id("c3a").Parent("t3").BodyJson(&comment3a).Do(context.TODO())
+ _, err = client.Index().Index(testJoinIndex).Type("doc").Id("3").BodyJson(&doc3).Routing("1").Refresh("true").Do(ctx)
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("comment").Id("c3b").Parent("t3").BodyJson(&comment3b).Do(context.TODO())
+ doc4 := joinDoc{
+ Message: "This is another answer",
+ JoinField: &joinField{
+ Name: "answer",
+ Parent: "1",
+ },
+ }
+ _, err = client.Index().Index(testJoinIndex).Type("doc").Id("4").BodyJson(&doc4).Routing("1").Refresh("true").Do(ctx)
if err != nil {
t.Fatal(err)
}
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ _, err = client.Flush().Index(testJoinIndex).Do(ctx)
if err != nil {
t.Fatal(err)
}
+ // Search for all documents that have an answer, and return those answers as inner hits
bq := NewBoolQuery()
bq = bq.Must(NewMatchAllQuery())
- bq = bq.Filter(NewHasChildQuery("comment", NewMatchAllQuery()).
- InnerHit(NewInnerHit().Name("comments")))
+ bq = bq.Filter(NewHasChildQuery("answer", NewMatchAllQuery()).
+ InnerHit(NewInnerHit().Name("answers")))
searchResult, err := client.Search().
- Index(testIndexName).
+ Index(testJoinIndex).
Query(bq).
Pretty(true).
- Do(context.TODO())
+ Do(ctx)
if err != nil {
t.Fatal(err)
}
if searchResult.Hits == nil {
t.Errorf("expected SearchResult.Hits != nil; got nil")
}
- if searchResult.Hits.TotalHits != 2 {
+ if searchResult.Hits.TotalHits != 1 {
t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 2, searchResult.Hits.TotalHits)
}
- if len(searchResult.Hits.Hits) != 2 {
+ if len(searchResult.Hits.Hits) != 1 {
t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 2, len(searchResult.Hits.Hits))
}
hit := searchResult.Hits.Hits[0]
- if hit.Id != "t2" {
- t.Fatalf("expected tweet %q; got: %q", "t2", hit.Id)
- }
- if hit.InnerHits == nil {
- t.Fatalf("expected inner hits; got: %v", hit.InnerHits)
- }
- if len(hit.InnerHits) != 1 {
- t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits))
- }
- innerHits, found := hit.InnerHits["comments"]
- if !found {
- t.Fatalf("expected inner hits for name %q", "comments")
- }
- if innerHits == nil || innerHits.Hits == nil {
- t.Fatal("expected inner hits != nil")
- }
- if len(innerHits.Hits.Hits) != 1 {
- t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits))
- }
- if innerHits.Hits.Hits[0].Id != "c2a" {
- t.Fatalf("expected inner hit with id %q; got: %q", "c2a", innerHits.Hits.Hits[0].Id)
- }
-
- hit = searchResult.Hits.Hits[1]
- if hit.Id != "t3" {
- t.Fatalf("expected tweet %q; got: %q", "t3", hit.Id)
+ if want, have := "1", hit.Id; want != have {
+ t.Fatalf("expected tweet %q; got: %q", want, have)
}
if hit.InnerHits == nil {
t.Fatalf("expected inner hits; got: %v", hit.InnerHits)
}
- if len(hit.InnerHits) != 1 {
- t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits))
+ if want, have := 1, len(hit.InnerHits); want != have {
+ t.Fatalf("expected %d inner hits; got: %d", want, have)
}
- innerHits, found = hit.InnerHits["comments"]
+ innerHits, found := hit.InnerHits["answers"]
if !found {
- t.Fatalf("expected inner hits for name %q", "comments")
+ t.Fatalf("expected inner hits for name %q", "answers")
}
if innerHits == nil || innerHits.Hits == nil {
t.Fatal("expected inner hits != nil")
}
- if len(innerHits.Hits.Hits) != 2 {
- t.Fatalf("expected %d inner hits; got: %d", 2, len(innerHits.Hits.Hits))
+ if want, have := 2, len(innerHits.Hits.Hits); want != have {
+ t.Fatalf("expected %d inner hits; got: %d", want, have)
}
- if innerHits.Hits.Hits[0].Id != "c3a" {
- t.Fatalf("expected inner hit with id %q; got: %q", "c3a", innerHits.Hits.Hits[0].Id)
+ if want, have := "3", innerHits.Hits.Hits[0].Id; want != have {
+ t.Fatalf("expected inner hit with id %q; got: %q", want, have)
}
- if innerHits.Hits.Hits[1].Id != "c3b" {
- t.Fatalf("expected inner hit with id %q; got: %q", "c3b", innerHits.Hits.Hits[1].Id)
+ if want, have := "4", innerHits.Hits.Hits[1].Id; want != have {
+ t.Fatalf("expected inner hit with id %q; got: %q", want, have)
}
}
func TestSearchInnerHitsOnHasParent(t *testing.T) {
+ // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0)))
client := setupTestClientAndCreateIndex(t)
- // Check for valid ES version
- esversion, err := client.ElasticsearchVersion(DefaultURL)
+ ctx := context.Background()
+
+ // Create join index
+ createIndex, err := client.CreateIndex(testJoinIndex).Body(testJoinMapping).Do(ctx)
if err != nil {
t.Fatal(err)
}
- if esversion < "1.5.0" {
- t.Skip("InnerHits feature is only available for Elasticsearch 1.5+")
- return
+ if createIndex == nil {
+ t.Errorf("expected result to be != nil; got: %v", createIndex)
}
- tweet1 := tweet{
- User: "olivere", Retweets: 108,
- Message: "Welcome to Golang and Elasticsearch.",
- Created: time.Date(2012, 12, 12, 17, 38, 34, 0, time.UTC),
- }
- tweet2 := tweet{
- User: "olivere", Retweets: 0,
- Message: "Another unrelated topic.",
- Created: time.Date(2012, 10, 10, 8, 12, 03, 0, time.UTC),
- }
- comment2a := comment{User: "sandrae", Comment: "What does that even mean?"}
- tweet3 := tweet{
- User: "sandrae", Retweets: 12,
- Message: "Cycling is fun.",
- Created: time.Date(2011, 11, 11, 10, 58, 12, 0, time.UTC),
+ // Add documents
+ // See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/parent-join.html for example code.
+ doc1 := joinDoc{
+ Message: "This is a question",
+ JoinField: &joinField{Name: "question"},
}
- comment3a := comment{User: "nico", Comment: "You bet."}
- comment3b := comment{User: "olivere", Comment: "It sure is."}
-
- // Add all documents
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("t1").BodyJson(&tweet1).Do(context.TODO())
+ _, err = client.Index().Index(testJoinIndex).Type("doc").Id("1").BodyJson(&doc1).Refresh("true").Do(ctx)
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("t2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
+ doc2 := joinDoc{
+ Message: "This is another question",
+ JoinField: "question",
}
- _, err = client.Index().Index(testIndexName).Type("comment").Id("c2a").Parent("t2").BodyJson(&comment2a).Do(context.TODO())
+ _, err = client.Index().Index(testJoinIndex).Type("doc").Id("2").BodyJson(&doc2).Refresh("true").Do(ctx)
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("t3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
+ doc3 := joinDoc{
+ Message: "This is an answer",
+ JoinField: &joinField{
+ Name: "answer",
+ Parent: "1",
+ },
}
- _, err = client.Index().Index(testIndexName).Type("comment").Id("c3a").Parent("t3").BodyJson(&comment3a).Do(context.TODO())
+ _, err = client.Index().Index(testJoinIndex).Type("doc").Id("3").BodyJson(&doc3).Routing("1").Refresh("true").Do(ctx)
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("comment").Id("c3b").Parent("t3").BodyJson(&comment3b).Do(context.TODO())
+ doc4 := joinDoc{
+ Message: "This is another answer",
+ JoinField: &joinField{
+ Name: "answer",
+ Parent: "1",
+ },
+ }
+ _, err = client.Index().Index(testJoinIndex).Type("doc").Id("4").BodyJson(&doc4).Routing("1").Refresh("true").Do(ctx)
if err != nil {
t.Fatal(err)
}
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ _, err = client.Flush().Index(testJoinIndex).Do(ctx)
if err != nil {
t.Fatal(err)
}
+ // Search for all documents that have an answer, and return those answers as inner hits
bq := NewBoolQuery()
bq = bq.Must(NewMatchAllQuery())
- bq = bq.Filter(NewHasParentQuery("tweet", NewMatchAllQuery()).
- InnerHit(NewInnerHit().Name("tweets")))
+ bq = bq.Filter(NewHasParentQuery("question", NewMatchAllQuery()).
+ InnerHit(NewInnerHit().Name("answers")))
searchResult, err := client.Search().
- Index(testIndexName).
+ Index(testJoinIndex).
Query(bq).
Pretty(true).
- Do(context.TODO())
+ Do(ctx)
if err != nil {
t.Fatal(err)
}
if searchResult.Hits == nil {
t.Errorf("expected SearchResult.Hits != nil; got nil")
}
- if searchResult.Hits.TotalHits != 3 {
- t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult.Hits.TotalHits)
+ if want, have := int64(2), searchResult.Hits.TotalHits; want != have {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", want, have)
}
- if len(searchResult.Hits.Hits) != 3 {
- t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", 3, len(searchResult.Hits.Hits))
+ if want, have := 2, len(searchResult.Hits.Hits); want != have {
+ t.Errorf("expected len(SearchResult.Hits.Hits) = %d; got %d", want, have)
}
hit := searchResult.Hits.Hits[0]
- if hit.Id != "c2a" {
- t.Fatalf("expected tweet %q; got: %q", "c2a", hit.Id)
+ if want, have := "3", hit.Id; want != have {
+ t.Fatalf("expected tweet %q; got: %q", want, have)
}
if hit.InnerHits == nil {
t.Fatalf("expected inner hits; got: %v", hit.InnerHits)
}
- if len(hit.InnerHits) != 1 {
- t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits))
+ if want, have := 1, len(hit.InnerHits); want != have {
+ t.Fatalf("expected %d inner hits; got: %d", want, have)
}
- innerHits, found := hit.InnerHits["tweets"]
+ innerHits, found := hit.InnerHits["answers"]
if !found {
t.Fatalf("expected inner hits for name %q", "tweets")
}
if innerHits == nil || innerHits.Hits == nil {
t.Fatal("expected inner hits != nil")
}
- if len(innerHits.Hits.Hits) != 1 {
- t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits))
+ if want, have := 1, len(innerHits.Hits.Hits); want != have {
+ t.Fatalf("expected %d inner hits; got: %d", want, have)
}
- if innerHits.Hits.Hits[0].Id != "t2" {
- t.Fatalf("expected inner hit with id %q; got: %q", "t2", innerHits.Hits.Hits[0].Id)
+ if want, have := "1", innerHits.Hits.Hits[0].Id; want != have {
+ t.Fatalf("expected inner hit with id %q; got: %q", want, have)
}
hit = searchResult.Hits.Hits[1]
- if hit.Id != "c3a" {
- t.Fatalf("expected tweet %q; got: %q", "c3a", hit.Id)
- }
- if hit.InnerHits == nil {
- t.Fatalf("expected inner hits; got: %v", hit.InnerHits)
- }
- if len(hit.InnerHits) != 1 {
- t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits))
- }
- innerHits, found = hit.InnerHits["tweets"]
- if !found {
- t.Fatalf("expected inner hits for name %q", "tweets")
- }
- if innerHits == nil || innerHits.Hits == nil {
- t.Fatal("expected inner hits != nil")
- }
- if len(innerHits.Hits.Hits) != 1 {
- t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits))
- }
- if innerHits.Hits.Hits[0].Id != "t3" {
- t.Fatalf("expected inner hit with id %q; got: %q", "t3", innerHits.Hits.Hits[0].Id)
- }
-
- hit = searchResult.Hits.Hits[2]
- if hit.Id != "c3b" {
- t.Fatalf("expected tweet %q; got: %q", "c3b", hit.Id)
+ if want, have := "4", hit.Id; want != have {
+ t.Fatalf("expected tweet %q; got: %q", want, have)
}
if hit.InnerHits == nil {
t.Fatalf("expected inner hits; got: %v", hit.InnerHits)
}
- if len(hit.InnerHits) != 1 {
- t.Fatalf("expected %d inner hits; got: %d", 1, len(hit.InnerHits))
+ if want, have := 1, len(hit.InnerHits); want != have {
+ t.Fatalf("expected %d inner hits; got: %d", want, have)
}
- innerHits, found = hit.InnerHits["tweets"]
+ innerHits, found = hit.InnerHits["answers"]
if !found {
t.Fatalf("expected inner hits for name %q", "tweets")
}
if innerHits == nil || innerHits.Hits == nil {
t.Fatal("expected inner hits != nil")
}
- if len(innerHits.Hits.Hits) != 1 {
- t.Fatalf("expected %d inner hits; got: %d", 1, len(innerHits.Hits.Hits))
+ if want, have := 1, len(innerHits.Hits.Hits); want != have {
+ t.Fatalf("expected %d inner hits; got: %d", want, have)
}
- if innerHits.Hits.Hits[0].Id != "t3" {
- t.Fatalf("expected inner hit with id %q; got: %q", "t3", innerHits.Hits.Hits[0].Id)
+ if want, have := "1", innerHits.Hits.Hits[0].Id; want != have {
+ t.Fatalf("expected inner hit with id %q; got: %q", want, have)
}
}
@@ -1045,7 +1040,7 @@ func TestSearchFilterPath(t *testing.T) {
all := NewMatchAllQuery()
searchResult, err := client.Search().
Index(testIndexName).
- Type("tweet").
+ Type("doc").
Query(all).
FilterPath(
"took",
@@ -1119,17 +1114,17 @@ func TestSearchAfter(t *testing.T) {
}
// Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
@@ -1168,7 +1163,7 @@ func TestSearchResultWithFieldCollapsing(t *testing.T) {
searchResult, err := client.Search().
Index(testIndexName).
- Type("tweet").
+ Type("doc").
Query(NewMatchAllQuery()).
Collapse(NewCollapseBuilder("user")).
Pretty(true).
@@ -1215,7 +1210,7 @@ func TestSearchResultWithFieldCollapsingAndInnerHits(t *testing.T) {
searchResult, err := client.Search().
Index(testIndexName).
- Type("tweet").
+ Type("doc").
Query(NewMatchAllQuery()).
Collapse(
NewCollapseBuilder("user").
diff --git a/vendor/gopkg.in/olivere/elastic.v5/setup_test.go b/vendor/gopkg.in/olivere/elastic.v5/setup_test.go
index df2206a14..480ae5d20 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/setup_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/setup_test.go
@@ -16,6 +16,7 @@ import (
const (
testIndexName = "elastic-test"
testIndexName2 = "elastic-test2"
+ testIndexName3 = "elastic-test3"
testMapping = `
{
"settings":{
@@ -23,12 +24,49 @@ const (
"number_of_replicas":0
},
"mappings":{
- "_default_": {
- "_all": {
- "enabled": true
+ "doc":{
+ "properties":{
+ "user":{
+ "type":"keyword"
+ },
+ "message":{
+ "type":"text",
+ "store": true,
+ "fielddata": true
+ },
+ "tags":{
+ "type":"keyword"
+ },
+ "location":{
+ "type":"geo_point"
+ },
+ "suggest_field":{
+ "type":"completion",
+ "contexts":[
+ {
+ "name":"user_name",
+ "type":"category"
+ }
+ ]
+ }
}
- },
- "tweet":{
+ }
+ }
+}
+`
+
+ testNoSourceIndexName = "elastic-nosource-test"
+ testNoSourceMapping = `
+{
+ "settings":{
+ "number_of_shards":1,
+ "number_of_replicas":0
+ },
+ "mappings":{
+ "doc":{
+ "_source": {
+ "enabled": false
+ },
"properties":{
"user":{
"type":"keyword"
@@ -48,19 +86,51 @@ const (
"type":"completion",
"contexts":[
{
- "name": "user_name",
- "type": "category"
+ "name":"user_name",
+ "type":"category"
}
]
}
}
+ }
+ }
+}
+`
+
+ testJoinIndex = "elastic-joins"
+ testJoinMapping = `
+ {
+ "settings":{
+ "number_of_shards":1,
+ "number_of_replicas":0
},
- "comment":{
- "_parent": {
- "type": "tweet"
+ "mappings":{
+ "doc":{
+ "properties":{
+ "message":{
+ "type":"text"
+ },
+ "my_join_field": {
+ "type": "join",
+ "relations": {
+ "question": "answer"
+ }
+ }
+ }
}
- },
- "order":{
+ }
+ }
+`
+
+ testOrderIndex = "elastic-orders"
+ testOrderMapping = `
+{
+ "settings":{
+ "number_of_shards":1,
+ "number_of_replicas":0
+ },
+ "mappings":{
+ "doc":{
"properties":{
"article":{
"type":"text"
@@ -76,18 +146,49 @@ const (
"format": "YYYY-MM-dd"
}
}
- },
- "doctype":{
+ }
+ }
+}
+`
+
+ /*
+ testDoctypeIndex = "elastic-doctypes"
+ testDoctypeMapping = `
+ {
+ "settings":{
+ "number_of_shards":1,
+ "number_of_replicas":0
+ },
+ "mappings":{
+ "doc":{
+ "properties":{
+ "message":{
+ "type":"text",
+ "store": true,
+ "fielddata": true
+ }
+ }
+ }
+ }
+ }
+ `
+ */
+
+ testQueryIndex = "elastic-queries"
+ testQueryMapping = `
+{
+ "settings":{
+ "number_of_shards":1,
+ "number_of_replicas":0
+ },
+ "mappings":{
+ "doc":{
"properties":{
"message":{
"type":"text",
"store": true,
"fielddata": true
- }
- }
- },
- "queries":{
- "properties": {
+ },
"query": {
"type": "percolator"
}
@@ -123,6 +224,16 @@ func (c comment) String() string {
return fmt.Sprintf("comment{User:%q,Comment:%q}", c.User, c.Comment)
}
+type joinDoc struct {
+ Message string `json:"message"`
+ JoinField interface{} `json:"my_join_field,omitempty"`
+}
+
+type joinField struct {
+ Name string `json:"name"`
+ Parent string `json:"parent,omitempty"`
+}
+
type order struct {
Article string `json:"article"`
Manufacturer string `json:"manufacturer"`
@@ -173,6 +284,12 @@ func setupTestClient(t logger, options ...ClientOptionFunc) (client *Client) {
client.DeleteIndex(testIndexName).Do(context.TODO())
client.DeleteIndex(testIndexName2).Do(context.TODO())
+ client.DeleteIndex(testIndexName3).Do(context.TODO())
+ client.DeleteIndex(testOrderIndex).Do(context.TODO())
+ client.DeleteIndex(testNoSourceIndexName).Do(context.TODO())
+ //client.DeleteIndex(testDoctypeIndex).Do(context.TODO())
+ client.DeleteIndex(testQueryIndex).Do(context.TODO())
+ client.DeleteIndex(testJoinIndex).Do(context.TODO())
return client
}
@@ -198,6 +315,24 @@ func setupTestClientAndCreateIndex(t logger, options ...ClientOptionFunc) *Clien
t.Errorf("expected result to be != nil; got: %v", createIndex2)
}
+ // Create no source index
+ createNoSourceIndex, err := client.CreateIndex(testNoSourceIndexName).Body(testNoSourceMapping).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if createNoSourceIndex == nil {
+ t.Errorf("expected result to be != nil; got: %v", createNoSourceIndex)
+ }
+
+ // Create order index
+ createOrderIndex, err := client.CreateIndex(testOrderIndex).Body(testOrderMapping).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if createOrderIndex == nil {
+ t.Errorf("expected result to be != nil; got: %v", createOrderIndex)
+ }
+
return client
}
@@ -212,24 +347,26 @@ func setupTestClientAndCreateIndexAndAddDocs(t logger, options ...ClientOptionFu
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
- comment1 := comment{User: "nico", Comment: "You bet."}
+ //comment1 := comment{User: "nico", Comment: "You bet."}
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ _, err := client.Index().Index(testIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").Routing("someroutingkey").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- _, err = client.Index().Index(testIndexName).Type("comment").Id("1").Parent("3").BodyJson(&comment1).Do(context.TODO())
+ _, err = client.Index().Index(testIndexName).Type("doc").Id("3").Routing("someroutingkey").BodyJson(&tweet3).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
+ /*
+ _, err = client.Index().Index(testIndexName).Type("comment").Id("1").Parent("3").BodyJson(&comment1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ */
// Add orders
var orders []order
@@ -243,20 +380,44 @@ func setupTestClientAndCreateIndexAndAddDocs(t logger, options ...ClientOptionFu
orders = append(orders, order{Article: "T-Shirt", Manufacturer: "h&m", Price: 19, Time: "2015-06-18"})
for i, o := range orders {
id := fmt.Sprintf("%d", i)
- _, err = client.Index().Index(testIndexName).Type("order").Id(id).BodyJson(&o).Do(context.TODO())
+ _, err = client.Index().Index(testOrderIndex).Type("doc").Id(id).BodyJson(&o).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
}
// Flush
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
+ _, err = client.Flush().Index(testIndexName, testOrderIndex).Do(context.TODO())
if err != nil {
t.Fatal(err)
}
return client
}
+func setupTestClientAndCreateIndexAndAddDocsNoSource(t logger, options ...ClientOptionFunc) *Client {
+ client := setupTestClientAndCreateIndex(t, options...)
+
+ // Add tweets
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+
+ _, err := client.Index().Index(testNoSourceIndexName).Type("doc").Id("1").BodyJson(&tweet1).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = client.Index().Index(testNoSourceIndexName).Type("doc").Id("2").BodyJson(&tweet2).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Flush
+ _, err = client.Flush().Index(testNoSourceIndexName).Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return client
+}
+
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
func randomString(n int) string {
@@ -266,3 +427,19 @@ func randomString(n int) string {
}
return string(b)
}
+
+type lexicographically struct {
+ strings []string
+}
+
+func (l lexicographically) Len() int {
+ return len(l.strings)
+}
+
+func (l lexicographically) Less(i, j int) bool {
+ return l.strings[i] < l.strings[j]
+}
+
+func (l lexicographically) Swap(i, j int) {
+ l.strings[i], l.strings[j] = l.strings[j], l.strings[i]
+}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/snapshot_create.go b/vendor/gopkg.in/olivere/elastic.v5/snapshot_create.go
index 245fdbff8..1bbd2762e 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/snapshot_create.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/snapshot_create.go
@@ -11,10 +11,10 @@ import (
"net/url"
"time"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
-// SnapshotCreateService is documented at https://www.elastic.co/guide/en/elasticsearch/reference/5.x/modules-snapshots.html.
+// SnapshotCreateService is documented at https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-snapshots.html.
type SnapshotCreateService struct {
client *Client
pretty bool
@@ -89,7 +89,7 @@ func (s *SnapshotCreateService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.masterTimeout != "" {
params.Set("master_timeout", s.masterTimeout)
@@ -137,7 +137,12 @@ func (s *SnapshotCreateService) Do(ctx context.Context) (*SnapshotCreateResponse
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "PUT", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "PUT",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/snapshot_create_repository.go b/vendor/gopkg.in/olivere/elastic.v5/snapshot_create_repository.go
index 9fc0a32a6..e7f6d5336 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/snapshot_create_repository.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/snapshot_create_repository.go
@@ -10,11 +10,11 @@ import (
"fmt"
"net/url"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// SnapshotCreateRepositoryService creates a snapshot repository.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.3/modules-snapshots.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-snapshots.html
// for details.
type SnapshotCreateRepositoryService struct {
client *Client
@@ -112,7 +112,7 @@ func (s *SnapshotCreateRepositoryService) buildURL() (string, url.Values, error)
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.masterTimeout != "" {
params.Set("master_timeout", s.masterTimeout)
@@ -179,7 +179,12 @@ func (s *SnapshotCreateRepositoryService) Do(ctx context.Context) (*SnapshotCrea
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "PUT", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "PUT",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
@@ -194,5 +199,7 @@ func (s *SnapshotCreateRepositoryService) Do(ctx context.Context) (*SnapshotCrea
// SnapshotCreateRepositoryResponse is the response of SnapshotCreateRepositoryService.Do.
type SnapshotCreateRepositoryResponse struct {
- Acknowledged bool `json:"acknowledged"`
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/snapshot_create_test.go b/vendor/gopkg.in/olivere/elastic.v5/snapshot_create_test.go
index d3fafc50d..74b009cfe 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/snapshot_create_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/snapshot_create_test.go
@@ -37,7 +37,7 @@ func TestSnapshotPutURL(t *testing.T) {
WaitForCompletion: true,
ExpectedPath: "/_snapshot/repo/snapshot_of_sunday",
ExpectedParams: url.Values{
- "pretty": []string{"1"},
+ "pretty": []string{"true"},
"master_timeout": []string{"60s"},
"wait_for_completion": []string{"true"},
},
diff --git a/vendor/gopkg.in/olivere/elastic.v5/snapshot_delete_repository.go b/vendor/gopkg.in/olivere/elastic.v5/snapshot_delete_repository.go
index 1f402fba5..ad3e49b0e 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/snapshot_delete_repository.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/snapshot_delete_repository.go
@@ -11,11 +11,11 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// SnapshotDeleteRepositoryService deletes a snapshot repository.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.3/modules-snapshots.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-snapshots.html
// for details.
type SnapshotDeleteRepositoryService struct {
client *Client
@@ -70,7 +70,7 @@ func (s *SnapshotDeleteRepositoryService) buildURL() (string, url.Values, error)
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.masterTimeout != "" {
params.Set("master_timeout", s.masterTimeout)
@@ -107,7 +107,11 @@ func (s *SnapshotDeleteRepositoryService) Do(ctx context.Context) (*SnapshotDele
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "DELETE",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
@@ -122,5 +126,7 @@ func (s *SnapshotDeleteRepositoryService) Do(ctx context.Context) (*SnapshotDele
// SnapshotDeleteRepositoryResponse is the response of SnapshotDeleteRepositoryService.Do.
type SnapshotDeleteRepositoryResponse struct {
- Acknowledged bool `json:"acknowledged"`
+ Acknowledged bool `json:"acknowledged"`
+ ShardsAcknowledged bool `json:"shards_acknowledged"`
+ Index string `json:"index,omitempty"`
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/snapshot_get_repository.go b/vendor/gopkg.in/olivere/elastic.v5/snapshot_get_repository.go
index 10b2d0b9c..2d24c5e4c 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/snapshot_get_repository.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/snapshot_get_repository.go
@@ -11,11 +11,11 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// SnapshotGetRepositoryService reads a snapshot repository.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.3/modules-snapshots.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-snapshots.html
// for details.
type SnapshotGetRepositoryService struct {
client *Client
@@ -76,7 +76,7 @@ func (s *SnapshotGetRepositoryService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.local != nil {
params.Set("local", fmt.Sprintf("%v", *s.local))
@@ -106,7 +106,11 @@ func (s *SnapshotGetRepositoryService) Do(ctx context.Context) (SnapshotGetRepos
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/snapshot_verify_repository.go b/vendor/gopkg.in/olivere/elastic.v5/snapshot_verify_repository.go
index 4e8c25a24..5494ab475 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/snapshot_verify_repository.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/snapshot_verify_repository.go
@@ -10,11 +10,11 @@ import (
"fmt"
"net/url"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// SnapshotVerifyRepositoryService verifies a snapshop repository.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.3/modules-snapshots.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-snapshots.html
// for details.
type SnapshotVerifyRepositoryService struct {
client *Client
@@ -68,7 +68,7 @@ func (s *SnapshotVerifyRepositoryService) buildURL() (string, url.Values, error)
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.masterTimeout != "" {
params.Set("master_timeout", s.masterTimeout)
@@ -105,7 +105,11 @@ func (s *SnapshotVerifyRepositoryService) Do(ctx context.Context) (*SnapshotVeri
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "POST", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/sort.go b/vendor/gopkg.in/olivere/elastic.v5/sort.go
index 122b69104..7e2b32183 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/sort.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/sort.go
@@ -9,7 +9,7 @@ import "errors"
// -- Sorter --
// Sorter is an interface for sorting strategies, e.g. ScoreSort or FieldSort.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-sort.html.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-sort.html.
type Sorter interface {
Source() (interface{}, error)
}
@@ -23,9 +23,11 @@ type SortInfo struct {
Ascending bool
Missing interface{}
IgnoreUnmapped *bool
+ UnmappedType string
SortMode string
NestedFilter Query
NestedPath string
+ NestedSort *NestedSort // available in 6.1 or later
}
func (info SortInfo) Source() (interface{}, error) {
@@ -41,6 +43,9 @@ func (info SortInfo) Source() (interface{}, error) {
if info.IgnoreUnmapped != nil {
prop["ignore_unmapped"] = *info.IgnoreUnmapped
}
+ if info.UnmappedType != "" {
+ prop["unmapped_type"] = info.UnmappedType
+ }
if info.SortMode != "" {
prop["mode"] = info.SortMode
}
@@ -54,6 +59,13 @@ func (info SortInfo) Source() (interface{}, error) {
if info.NestedPath != "" {
prop["nested_path"] = info.NestedPath
}
+ if info.NestedSort != nil {
+ src, err := info.NestedSort.Source()
+ if err != nil {
+ return nil, err
+ }
+ prop["nested"] = src
+ }
source := make(map[string]interface{})
source[info.Field] = prop
return source, nil
@@ -62,7 +74,7 @@ func (info SortInfo) Source() (interface{}, error) {
// -- SortByDoc --
// SortByDoc sorts by the "_doc" field, as described in
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-scroll.html.
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-scroll.html.
//
// Example:
// ss := elastic.NewSearchSource()
@@ -125,14 +137,14 @@ func (s *ScoreSort) Source() (interface{}, error) {
// FieldSort sorts by a given field.
type FieldSort struct {
Sorter
- fieldName string
- ascending bool
- missing interface{}
- ignoreUnmapped *bool
- unmappedType *string
- sortMode *string
- nestedFilter Query
- nestedPath *string
+ fieldName string
+ ascending bool
+ missing interface{}
+ unmappedType *string
+ sortMode *string
+ nestedFilter Query
+ nestedPath *string
+ nestedSort *NestedSort
}
// NewFieldSort creates a new FieldSort.
@@ -175,13 +187,6 @@ func (s *FieldSort) Missing(missing interface{}) *FieldSort {
return s
}
-// IgnoreUnmapped specifies what happens if the field does not exist in
-// the index. Set it to true to ignore, or set it to false to not ignore (default).
-func (s *FieldSort) IgnoreUnmapped(ignoreUnmapped bool) *FieldSort {
- s.ignoreUnmapped = &ignoreUnmapped
- return s
-}
-
// UnmappedType sets the type to use when the current field is not mapped
// in an index.
func (s *FieldSort) UnmappedType(typ string) *FieldSort {
@@ -211,6 +216,13 @@ func (s *FieldSort) NestedPath(nestedPath string) *FieldSort {
return s
}
+// NestedSort is available starting with 6.1 and will replace NestedFilter
+// and NestedPath.
+func (s *FieldSort) NestedSort(nestedSort *NestedSort) *FieldSort {
+ s.nestedSort = nestedSort
+ return s
+}
+
// Source returns the JSON-serializable data.
func (s *FieldSort) Source() (interface{}, error) {
source := make(map[string]interface{})
@@ -224,9 +236,6 @@ func (s *FieldSort) Source() (interface{}, error) {
if s.missing != nil {
x["missing"] = s.missing
}
- if s.ignoreUnmapped != nil {
- x["ignore_unmapped"] = *s.ignoreUnmapped
- }
if s.unmappedType != nil {
x["unmapped_type"] = *s.unmappedType
}
@@ -243,24 +252,32 @@ func (s *FieldSort) Source() (interface{}, error) {
if s.nestedPath != nil {
x["nested_path"] = *s.nestedPath
}
+ if s.nestedSort != nil {
+ src, err := s.nestedSort.Source()
+ if err != nil {
+ return nil, err
+ }
+ x["nested"] = src
+ }
return source, nil
}
// -- GeoDistanceSort --
// GeoDistanceSort allows for sorting by geographic distance.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-sort.html#_geo_distance_sorting.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-sort.html#_geo_distance_sorting.
type GeoDistanceSort struct {
Sorter
fieldName string
points []*GeoPoint
geohashes []string
- geoDistance *string
+ distanceType *string
unit string
ascending bool
sortMode *string
nestedFilter Query
nestedPath *string
+ nestedSort *NestedSort
}
// NewGeoDistanceSort creates a new sorter for geo distances.
@@ -313,22 +330,27 @@ func (s *GeoDistanceSort) GeoHashes(geohashes ...string) *GeoDistanceSort {
return s
}
-// GeoDistance represents how to compute the distance.
-// It can be sloppy_arc (default), arc, or plane.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-sort.html#_geo_distance_sorting.
-func (s *GeoDistanceSort) GeoDistance(geoDistance string) *GeoDistanceSort {
- s.geoDistance = &geoDistance
- return s
-}
-
// Unit specifies the distance unit to use. It defaults to km.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/common-options.html#distance-units
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/common-options.html#distance-units
// for details.
func (s *GeoDistanceSort) Unit(unit string) *GeoDistanceSort {
s.unit = unit
return s
}
+// GeoDistance is an alias for DistanceType.
+func (s *GeoDistanceSort) GeoDistance(geoDistance string) *GeoDistanceSort {
+ return s.DistanceType(geoDistance)
+}
+
+// DistanceType describes how to compute the distance, e.g. "arc" or "plane".
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-request-sort.html#geo-sorting
+// for details.
+func (s *GeoDistanceSort) DistanceType(distanceType string) *GeoDistanceSort {
+ s.distanceType = &distanceType
+ return s
+}
+
// SortMode specifies what values to pick in case a document contains
// multiple values for the targeted sort field. Possible values are:
// min, max, sum, and avg.
@@ -351,6 +373,13 @@ func (s *GeoDistanceSort) NestedPath(nestedPath string) *GeoDistanceSort {
return s
}
+// NestedSort is available starting with 6.1 and will replace NestedFilter
+// and NestedPath.
+func (s *GeoDistanceSort) NestedSort(nestedSort *NestedSort) *GeoDistanceSort {
+ s.nestedSort = nestedSort
+ return s
+}
+
// Source returns the JSON-serializable data.
func (s *GeoDistanceSort) Source() (interface{}, error) {
source := make(map[string]interface{})
@@ -370,8 +399,8 @@ func (s *GeoDistanceSort) Source() (interface{}, error) {
if s.unit != "" {
x["unit"] = s.unit
}
- if s.geoDistance != nil {
- x["distance_type"] = *s.geoDistance
+ if s.distanceType != nil {
+ x["distance_type"] = *s.distanceType
}
if s.ascending {
@@ -392,13 +421,20 @@ func (s *GeoDistanceSort) Source() (interface{}, error) {
if s.nestedPath != nil {
x["nested_path"] = *s.nestedPath
}
+ if s.nestedSort != nil {
+ src, err := s.nestedSort.Source()
+ if err != nil {
+ return nil, err
+ }
+ x["nested"] = src
+ }
return source, nil
}
// -- ScriptSort --
// ScriptSort sorts by a custom script. See
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/modules-scripting.html#modules-scripting
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/modules-scripting.html#modules-scripting
// for details about scripting.
type ScriptSort struct {
Sorter
@@ -408,6 +444,7 @@ type ScriptSort struct {
sortMode *string
nestedFilter Query
nestedPath *string
+ nestedSort *NestedSort
}
// NewScriptSort creates and initializes a new ScriptSort.
@@ -466,6 +503,13 @@ func (s *ScriptSort) NestedPath(nestedPath string) *ScriptSort {
return s
}
+// NestedSort is available starting with 6.1 and will replace NestedFilter
+// and NestedPath.
+func (s *ScriptSort) NestedSort(nestedSort *NestedSort) *ScriptSort {
+ s.nestedSort = nestedSort
+ return s
+}
+
// Source returns the JSON-serializable data.
func (s *ScriptSort) Source() (interface{}, error) {
if s.script == nil {
@@ -501,5 +545,70 @@ func (s *ScriptSort) Source() (interface{}, error) {
if s.nestedPath != nil {
x["nested_path"] = *s.nestedPath
}
+ if s.nestedSort != nil {
+ src, err := s.nestedSort.Source()
+ if err != nil {
+ return nil, err
+ }
+ x["nested"] = src
+ }
+ return source, nil
+}
+
+// -- NestedSort --
+
+// NestedSort is used for fields that are inside a nested object.
+// It takes a "path" argument and an optional nested filter that the
+// nested objects should match with in order to be taken into account
+// for sorting.
+//
+// NestedSort is available from 6.1 and replaces nestedFilter and nestedPath
+// in the other sorters.
+type NestedSort struct {
+ Sorter
+ path string
+ filter Query
+ nestedSort *NestedSort
+}
+
+// NewNestedSort creates a new NestedSort.
+func NewNestedSort(path string) *NestedSort {
+ return &NestedSort{path: path}
+}
+
+// Filter sets the filter.
+func (s *NestedSort) Filter(filter Query) *NestedSort {
+ s.filter = filter
+ return s
+}
+
+// NestedSort embeds another level of nested sorting.
+func (s *NestedSort) NestedSort(nestedSort *NestedSort) *NestedSort {
+ s.nestedSort = nestedSort
+ return s
+}
+
+// Source returns the JSON-serializable data.
+func (s *NestedSort) Source() (interface{}, error) {
+ source := make(map[string]interface{})
+
+ if s.path != "" {
+ source["path"] = s.path
+ }
+ if s.filter != nil {
+ src, err := s.filter.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["filter"] = src
+ }
+ if s.nestedSort != nil {
+ src, err := s.nestedSort.Source()
+ if err != nil {
+ return nil, err
+ }
+ source["nested"] = src
+ }
+
return source, nil
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/sort_test.go b/vendor/gopkg.in/olivere/elastic.v5/sort_test.go
index 54e6e1e5b..b54cbd98c 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/sort_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/sort_test.go
@@ -166,7 +166,7 @@ func TestGeoDistanceSort(t *testing.T) {
Order(true).
Unit("km").
SortMode("min").
- GeoDistance("sloppy_arc")
+ GeoDistance("plane")
src, err := builder.Source()
if err != nil {
t.Fatal(err)
@@ -176,7 +176,7 @@ func TestGeoDistanceSort(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"_geo_distance":{"distance_type":"sloppy_arc","mode":"min","order":"asc","pin.location":[{"lat":-70,"lon":40}],"unit":"km"}}`
+ expected := `{"_geo_distance":{"distance_type":"plane","mode":"min","order":"asc","pin.location":[{"lat":-70,"lon":40}],"unit":"km"}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
@@ -187,7 +187,7 @@ func TestGeoDistanceSortOrderDesc(t *testing.T) {
Point(-70, 40).
Unit("km").
SortMode("min").
- GeoDistance("sloppy_arc").
+ GeoDistance("arc").
Desc()
src, err := builder.Source()
if err != nil {
@@ -198,7 +198,7 @@ func TestGeoDistanceSortOrderDesc(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"_geo_distance":{"distance_type":"sloppy_arc","mode":"min","order":"desc","pin.location":[{"lat":-70,"lon":40}],"unit":"km"}}`
+ expected := `{"_geo_distance":{"distance_type":"arc","mode":"min","order":"desc","pin.location":[{"lat":-70,"lon":40}],"unit":"km"}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
@@ -214,7 +214,7 @@ func TestScriptSort(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"_script":{"order":"asc","script":{"inline":"doc['field_name'].value * factor","params":{"factor":1.1}},"type":"number"}}`
+ expected := `{"_script":{"order":"asc","script":{"params":{"factor":1.1},"source":"doc['field_name'].value * factor"},"type":"number"}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
@@ -231,7 +231,47 @@ func TestScriptSortOrderDesc(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"_script":{"order":"desc","script":{"inline":"doc['field_name'].value * factor","params":{"factor":1.1}},"type":"number"}}`
+ expected := `{"_script":{"order":"desc","script":{"params":{"factor":1.1},"source":"doc['field_name'].value * factor"},"type":"number"}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestNestedSort(t *testing.T) {
+ builder := NewNestedSort("offer").
+ Filter(NewTermQuery("offer.color", "blue"))
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"filter":{"term":{"offer.color":"blue"}},"path":"offer"}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestFieldSortWithNestedSort(t *testing.T) {
+ builder := NewFieldSort("offer.price").
+ Asc().
+ SortMode("avg").
+ NestedSort(
+ NewNestedSort("offer").Filter(NewTermQuery("offer.color", "blue")),
+ )
+ src, err := builder.Source()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"offer.price":{"mode":"avg","nested":{"filter":{"term":{"offer.color":"blue"}},"path":"offer"},"order":"asc"}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggest.go b/vendor/gopkg.in/olivere/elastic.v5/suggest.go
deleted file mode 100644
index 7249abd5f..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/suggest.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
- "strings"
-
- "gopkg.in/olivere/elastic.v5/uritemplates"
-)
-
-// SuggestService returns suggestions for text.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters.html.
-type SuggestService struct {
- client *Client
- pretty bool
- routing string
- preference string
- index []string
- suggesters []Suggester
-}
-
-// NewSuggestService creates a new instance of SuggestService.
-func NewSuggestService(client *Client) *SuggestService {
- builder := &SuggestService{
- client: client,
- }
- return builder
-}
-
-// Index adds one or more indices to use for the suggestion request.
-func (s *SuggestService) Index(index ...string) *SuggestService {
- s.index = append(s.index, index...)
- return s
-}
-
-// Pretty asks Elasticsearch to return indented JSON.
-func (s *SuggestService) Pretty(pretty bool) *SuggestService {
- s.pretty = pretty
- return s
-}
-
-// Routing specifies the routing value.
-func (s *SuggestService) Routing(routing string) *SuggestService {
- s.routing = routing
- return s
-}
-
-// Preference specifies the node or shard the operation should be
-// performed on (default: random).
-func (s *SuggestService) Preference(preference string) *SuggestService {
- s.preference = preference
- return s
-}
-
-// Suggester adds a suggester to the request.
-func (s *SuggestService) Suggester(suggester Suggester) *SuggestService {
- s.suggesters = append(s.suggesters, suggester)
- return s
-}
-
-// buildURL builds the URL for the operation.
-func (s *SuggestService) buildURL() (string, url.Values, error) {
- var err error
- var path string
-
- if len(s.index) > 0 {
- path, err = uritemplates.Expand("/{index}/_suggest", map[string]string{
- "index": strings.Join(s.index, ","),
- })
- } else {
- path = "/_suggest"
- }
- if err != nil {
- return "", url.Values{}, err
- }
-
- // Add query string parameters
- params := url.Values{}
- if s.pretty {
- params.Set("pretty", fmt.Sprintf("%v", s.pretty))
- }
- if s.routing != "" {
- params.Set("routing", s.routing)
- }
- if s.preference != "" {
- params.Set("preference", s.preference)
- }
- return path, params, nil
-}
-
-// Do executes the request.
-func (s *SuggestService) Do(ctx context.Context) (SuggestResult, error) {
- path, params, err := s.buildURL()
- if err != nil {
- return nil, err
- }
-
- // Set body
- body := make(map[string]interface{})
- for _, s := range s.suggesters {
- src, err := s.Source(false)
- if err != nil {
- return nil, err
- }
- body[s.Name()] = src
- }
-
- // Get response
- res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
- if err != nil {
- return nil, err
- }
-
- // There is a _shard object that cannot be deserialized.
- // So we use json.RawMessage instead.
- var suggestions map[string]*json.RawMessage
- if err := s.client.decoder.Decode(res.Body, &suggestions); err != nil {
- return nil, err
- }
-
- ret := make(SuggestResult)
- for name, result := range suggestions {
- if name != "_shards" {
- var sug []Suggestion
- if err := s.client.decoder.Decode(*result, &sug); err != nil {
- return nil, err
- }
- ret[name] = sug
- }
- }
-
- return ret, nil
-}
-
-// SuggestResult is the outcome of SuggestService.Do.
-type SuggestResult map[string][]Suggestion
-
-// Suggestion is a single suggester outcome.
-type Suggestion struct {
- Text string `json:"text"`
- Offset int `json:"offset"`
- Length int `json:"length"`
- Options []suggestionOption `json:"options"`
-}
-
-type suggestionOption struct {
- Text string `json:"text"`
- Score float64 `json:"score"`
- Freq int `json:"freq"`
- Payload interface{} `json:"payload"`
- CollateMatch bool `json:"collate_match"`
-}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggest_field.go b/vendor/gopkg.in/olivere/elastic.v5/suggest_field.go
index 8e15b4ec2..8405a6f9e 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/suggest_field.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/suggest_field.go
@@ -67,7 +67,7 @@ func (f *SuggestField) MarshalJSON() ([]byte, error) {
if err != nil {
return nil, err
}
- source["context"] = src
+ source["contexts"] = src
default:
ctxq := make(map[string]interface{})
for _, query := range f.contextQueries {
@@ -83,7 +83,7 @@ func (f *SuggestField) MarshalJSON() ([]byte, error) {
ctxq[k] = v
}
}
- source["context"] = ctxq
+ source["contexts"] = ctxq
}
return json.Marshal(source)
diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggest_field_test.go b/vendor/gopkg.in/olivere/elastic.v5/suggest_field_test.go
index c2ee7fdcf..426875b2f 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/suggest_field_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/suggest_field_test.go
@@ -22,7 +22,7 @@ func TestSuggestField(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"context":{"color":{"default":["red","green","blue"],"path":"color_field","type":"category"},"location":{"default":{"lat":52.516275,"lon":13.377704},"neighbors":true,"precision":["5m"],"type":"geo"}},"input":["Welcome to Golang and Elasticsearch.","Golang and Elasticsearch"],"weight":1}`
+ expected := `{"contexts":{"color":{"default":["red","green","blue"],"path":"color_field","type":"category"},"location":{"default":{"lat":52.516275,"lon":13.377704},"neighbors":true,"precision":["5m"],"type":"geo"}},"input":["Welcome to Golang and Elasticsearch.","Golang and Elasticsearch"],"weight":1}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggest_test.go b/vendor/gopkg.in/olivere/elastic.v5/suggest_test.go
deleted file mode 100644
index bdc989dbb..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/suggest_test.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-import (
- "context"
- "testing"
-)
-
-func TestSuggestBuildURL(t *testing.T) {
- client := setupTestClient(t)
-
- tests := []struct {
- Indices []string
- Expected string
- }{
- {
- []string{},
- "/_suggest",
- },
- {
- []string{"index1"},
- "/index1/_suggest",
- },
- {
- []string{"index1", "index2"},
- "/index1%2Cindex2/_suggest",
- },
- }
-
- for i, test := range tests {
- path, _, err := client.Suggest().Index(test.Indices...).buildURL()
- if err != nil {
- t.Errorf("case #%d: %v", i+1, err)
- continue
- }
- if path != test.Expected {
- t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
- }
- }
-}
-
-func TestSuggestService(t *testing.T) {
- client := setupTestClientAndCreateIndex(t)
- // client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0)))
-
- tweet1 := tweet{
- User: "olivere",
- Message: "Welcome to Golang and Elasticsearch.",
- Tags: []string{"golang", "elasticsearch"},
- Location: "48.1333,11.5667", // lat,lon
- Suggest: NewSuggestField().
- Input("Welcome to Golang and Elasticsearch.", "Golang and Elasticsearch").
- Weight(0),
- }
- tweet2 := tweet{
- User: "olivere",
- Message: "Another unrelated topic.",
- Tags: []string{"golang"},
- Location: "48.1189,11.4289", // lat,lon
- Suggest: NewSuggestField().
- Input("Another unrelated topic.", "Golang topic.").
- Weight(1),
- }
- tweet3 := tweet{
- User: "sandrae",
- Message: "Cycling is fun.",
- Tags: []string{"sports", "cycling"},
- Location: "47.7167,11.7167", // lat,lon
- Suggest: NewSuggestField().
- Input("Cycling is fun."),
- }
-
- // Add all documents
- _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = client.Flush().Index(testIndexName).Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
-
- // Test _suggest endpoint
- termSuggesterName := "my-term-suggester"
- termSuggester := NewTermSuggester(termSuggesterName).Text("Goolang").Field("message")
- phraseSuggesterName := "my-phrase-suggester"
- phraseSuggester := NewPhraseSuggester(phraseSuggesterName).Text("Goolang").Field("message")
- completionSuggesterName := "my-completion-suggester"
- completionSuggester := NewCompletionSuggester(completionSuggesterName).Text("Go").Field("suggest_field")
-
- result, err := client.Suggest().
- Index(testIndexName).
- Suggester(termSuggester).
- Suggester(phraseSuggester).
- Suggester(completionSuggester).
- Do(context.TODO())
- if err != nil {
- t.Fatal(err)
- }
- if result == nil {
- t.Errorf("expected result != nil; got nil")
- }
- if len(result) != 3 {
- t.Errorf("expected 3 suggester results; got %d", len(result))
- }
-
- termSuggestions, found := result[termSuggesterName]
- if !found {
- t.Errorf("expected to find Suggest[%s]; got false", termSuggesterName)
- }
- if termSuggestions == nil {
- t.Errorf("expected Suggest[%s] != nil; got nil", termSuggesterName)
- }
- if len(termSuggestions) != 1 {
- t.Errorf("expected 1 suggestion; got %d", len(termSuggestions))
- }
-
- phraseSuggestions, found := result[phraseSuggesterName]
- if !found {
- t.Errorf("expected to find Suggest[%s]; got false", phraseSuggesterName)
- }
- if phraseSuggestions == nil {
- t.Errorf("expected Suggest[%s] != nil; got nil", phraseSuggesterName)
- }
- if len(phraseSuggestions) != 1 {
- t.Errorf("expected 1 suggestion; got %d", len(phraseSuggestions))
- }
-
- completionSuggestions, found := result[completionSuggesterName]
- if !found {
- t.Errorf("expected to find Suggest[%s]; got false", completionSuggesterName)
- }
- if completionSuggestions == nil {
- t.Errorf("expected Suggest[%s] != nil; got nil", completionSuggesterName)
- }
- if len(completionSuggestions) != 1 {
- t.Errorf("expected 1 suggestion; got %d", len(completionSuggestions))
- }
- if len(completionSuggestions[0].Options) != 2 {
- t.Errorf("expected 2 suggestion options; got %d", len(completionSuggestions[0].Options))
- }
- if have, want := completionSuggestions[0].Options[0].Text, "Golang topic."; have != want {
- t.Errorf("expected Suggest[%s][0].Options[0].Text == %q; got %q", completionSuggesterName, want, have)
- }
- if have, want := completionSuggestions[0].Options[1].Text, "Golang and Elasticsearch"; have != want {
- t.Errorf("expected Suggest[%s][0].Options[1].Text == %q; got %q", completionSuggesterName, want, have)
- }
-}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_completion.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_completion.go
index 1da6063a6..d2b4a326c 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/suggester_completion.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_completion.go
@@ -13,18 +13,24 @@ type CompletionSuggester struct {
Suggester
name string
text string
+ prefix string
+ regex string
field string
analyzer string
size *int
shardSize *int
contextQueries []SuggesterContextQuery
+ payload interface{}
+
+ fuzzyOptions *FuzzyCompletionSuggesterOptions
+ regexOptions *RegexCompletionSuggesterOptions
+ skipDuplicates *bool
}
// Creates a new completion suggester.
func NewCompletionSuggester(name string) *CompletionSuggester {
return &CompletionSuggester{
- name: name,
- contextQueries: make([]SuggesterContextQuery, 0),
+ name: name,
}
}
@@ -37,6 +43,57 @@ func (q *CompletionSuggester) Text(text string) *CompletionSuggester {
return q
}
+func (q *CompletionSuggester) Prefix(prefix string) *CompletionSuggester {
+ q.prefix = prefix
+ return q
+}
+
+func (q *CompletionSuggester) PrefixWithEditDistance(prefix string, editDistance interface{}) *CompletionSuggester {
+ q.prefix = prefix
+ q.fuzzyOptions = NewFuzzyCompletionSuggesterOptions().EditDistance(editDistance)
+ return q
+}
+
+func (q *CompletionSuggester) PrefixWithOptions(prefix string, options *FuzzyCompletionSuggesterOptions) *CompletionSuggester {
+ q.prefix = prefix
+ q.fuzzyOptions = options
+ return q
+}
+
+func (q *CompletionSuggester) FuzzyOptions(options *FuzzyCompletionSuggesterOptions) *CompletionSuggester {
+ q.fuzzyOptions = options
+ return q
+}
+
+func (q *CompletionSuggester) Fuzziness(fuzziness interface{}) *CompletionSuggester {
+ if q.fuzzyOptions == nil {
+ q.fuzzyOptions = NewFuzzyCompletionSuggesterOptions()
+ }
+ q.fuzzyOptions = q.fuzzyOptions.EditDistance(fuzziness)
+ return q
+}
+
+func (q *CompletionSuggester) Regex(regex string) *CompletionSuggester {
+ q.regex = regex
+ return q
+}
+
+func (q *CompletionSuggester) RegexWithOptions(regex string, options *RegexCompletionSuggesterOptions) *CompletionSuggester {
+ q.regex = regex
+ q.regexOptions = options
+ return q
+}
+
+func (q *CompletionSuggester) RegexOptions(options *RegexCompletionSuggesterOptions) *CompletionSuggester {
+ q.regexOptions = options
+ return q
+}
+
+func (q *CompletionSuggester) SkipDuplicates(skipDuplicates bool) *CompletionSuggester {
+ q.skipDuplicates = &skipDuplicates
+ return q
+}
+
func (q *CompletionSuggester) Field(field string) *CompletionSuggester {
q.field = field
return q
@@ -72,17 +129,25 @@ func (q *CompletionSuggester) ContextQueries(queries ...SuggesterContextQuery) *
// We got into trouble when using plain maps because the text element
// needs to go before the completion element.
type completionSuggesterRequest struct {
- Text string `json:"text"`
- Completion interface{} `json:"completion"`
+ Text string `json:"text,omitempty"`
+ Prefix string `json:"prefix,omitempty"`
+ Regex string `json:"regex,omitempty"`
+ Completion interface{} `json:"completion,omitempty"`
}
-// Creates the source for the completion suggester.
+// Source creates the JSON data for the completion suggester.
func (q *CompletionSuggester) Source(includeName bool) (interface{}, error) {
cs := &completionSuggesterRequest{}
if q.text != "" {
cs.Text = q.text
}
+ if q.prefix != "" {
+ cs.Prefix = q.prefix
+ }
+ if q.regex != "" {
+ cs.Regex = q.regex
+ }
suggester := make(map[string]interface{})
cs.Completion = suggester
@@ -106,7 +171,7 @@ func (q *CompletionSuggester) Source(includeName bool) (interface{}, error) {
if err != nil {
return nil, err
}
- suggester["context"] = src
+ suggester["contexts"] = src
default:
ctxq := make(map[string]interface{})
for _, query := range q.contextQueries {
@@ -126,6 +191,28 @@ func (q *CompletionSuggester) Source(includeName bool) (interface{}, error) {
suggester["contexts"] = ctxq
}
+ // Fuzzy options
+ if q.fuzzyOptions != nil {
+ src, err := q.fuzzyOptions.Source()
+ if err != nil {
+ return nil, err
+ }
+ suggester["fuzzy"] = src
+ }
+
+ // Regex options
+ if q.regexOptions != nil {
+ src, err := q.regexOptions.Source()
+ if err != nil {
+ return nil, err
+ }
+ suggester["regex"] = src
+ }
+
+ if q.skipDuplicates != nil {
+ suggester["skip_duplicates"] = *q.skipDuplicates
+ }
+
// TODO(oe) Add completion-suggester specific parameters here
if !includeName {
@@ -136,3 +223,130 @@ func (q *CompletionSuggester) Source(includeName bool) (interface{}, error) {
source[q.name] = cs
return source, nil
}
+
+// -- Fuzzy options --
+
+// FuzzyCompletionSuggesterOptions represents the options for fuzzy completion suggester.
+type FuzzyCompletionSuggesterOptions struct {
+ editDistance interface{}
+ transpositions *bool
+ minLength *int
+ prefixLength *int
+ unicodeAware *bool
+ maxDeterminizedStates *int
+}
+
+// NewFuzzyCompletionSuggesterOptions initializes a new FuzzyCompletionSuggesterOptions instance.
+func NewFuzzyCompletionSuggesterOptions() *FuzzyCompletionSuggesterOptions {
+ return &FuzzyCompletionSuggesterOptions{}
+}
+
+// EditDistance specifies the maximum number of edits, e.g. a number like "1" or "2"
+// or a string like "0..2" or ">5". See https://www.elastic.co/guide/en/elasticsearch/reference/5.6/common-options.html#fuzziness
+// for details.
+func (o *FuzzyCompletionSuggesterOptions) EditDistance(editDistance interface{}) *FuzzyCompletionSuggesterOptions {
+ o.editDistance = editDistance
+ return o
+}
+
+// Transpositions, if set to true, are counted as one change instead of two (defaults to true).
+func (o *FuzzyCompletionSuggesterOptions) Transpositions(transpositions bool) *FuzzyCompletionSuggesterOptions {
+ o.transpositions = &transpositions
+ return o
+}
+
+// MinLength represents the minimum length of the input before fuzzy suggestions are returned (defaults to 3).
+func (o *FuzzyCompletionSuggesterOptions) MinLength(minLength int) *FuzzyCompletionSuggesterOptions {
+ o.minLength = &minLength
+ return o
+}
+
+// PrefixLength represents the minimum length of the input, which is not checked for
+// fuzzy alternatives (defaults to 1).
+func (o *FuzzyCompletionSuggesterOptions) PrefixLength(prefixLength int) *FuzzyCompletionSuggesterOptions {
+ o.prefixLength = &prefixLength
+ return o
+}
+
+// UnicodeAware, if true, all measurements (like fuzzy edit distance, transpositions, and lengths)
+// are measured in Unicode code points instead of in bytes. This is slightly slower than
+// raw bytes, so it is set to false by default.
+func (o *FuzzyCompletionSuggesterOptions) UnicodeAware(unicodeAware bool) *FuzzyCompletionSuggesterOptions {
+ o.unicodeAware = &unicodeAware
+ return o
+}
+
+// MaxDeterminizedStates is currently undocumented in Elasticsearch. It represents
+// the maximum automaton states allowed for fuzzy expansion.
+func (o *FuzzyCompletionSuggesterOptions) MaxDeterminizedStates(max int) *FuzzyCompletionSuggesterOptions {
+ o.maxDeterminizedStates = &max
+ return o
+}
+
+// Source creates the JSON data.
+func (o *FuzzyCompletionSuggesterOptions) Source() (interface{}, error) {
+ out := make(map[string]interface{})
+
+ if o.editDistance != nil {
+ out["fuzziness"] = o.editDistance
+ }
+ if o.transpositions != nil {
+ out["transpositions"] = *o.transpositions
+ }
+ if o.minLength != nil {
+ out["min_length"] = *o.minLength
+ }
+ if o.prefixLength != nil {
+ out["prefix_length"] = *o.prefixLength
+ }
+ if o.unicodeAware != nil {
+ out["unicode_aware"] = *o.unicodeAware
+ }
+ if o.maxDeterminizedStates != nil {
+ out["max_determinized_states"] = *o.maxDeterminizedStates
+ }
+
+ return out, nil
+}
+
+// -- Regex options --
+
+// RegexCompletionSuggesterOptions represents the options for regex completion suggester.
+type RegexCompletionSuggesterOptions struct {
+ flags interface{} // string or int
+ maxDeterminizedStates *int
+}
+
+// NewRegexCompletionSuggesterOptions initializes a new RegexCompletionSuggesterOptions instance.
+func NewRegexCompletionSuggesterOptions() *RegexCompletionSuggesterOptions {
+ return &RegexCompletionSuggesterOptions{}
+}
+
+// Flags represents internal regex flags. See https://www.elastic.co/guide/en/elasticsearch/reference/5.6/search-suggesters-completion.html#regex
+// for details.
+func (o *RegexCompletionSuggesterOptions) Flags(flags interface{}) *RegexCompletionSuggesterOptions {
+ o.flags = flags
+ return o
+}
+
+// MaxDeterminizedStates represents the maximum automaton states allowed for regex expansion.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/5.6/search-suggesters-completion.html#regex
+// for details.
+func (o *RegexCompletionSuggesterOptions) MaxDeterminizedStates(max int) *RegexCompletionSuggesterOptions {
+ o.maxDeterminizedStates = &max
+ return o
+}
+
+// Source creates the JSON data.
+func (o *RegexCompletionSuggesterOptions) Source() (interface{}, error) {
+ out := make(map[string]interface{})
+
+ if o.flags != nil {
+ out["flags"] = o.flags
+ }
+ if o.maxDeterminizedStates != nil {
+ out["max_determinized_states"] = *o.maxDeterminizedStates
+ }
+
+ return out, nil
+}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_completion_fuzzy.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_completion_fuzzy.go
deleted file mode 100644
index e2c06a25f..000000000
--- a/vendor/gopkg.in/olivere/elastic.v5/suggester_completion_fuzzy.go
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2012-present Oliver Eilhard. All rights reserved.
-// Use of this source code is governed by a MIT-license.
-// See http://olivere.mit-license.org/license.txt for details.
-
-package elastic
-
-// FuzzyFuzzyCompletionSuggester is a FuzzyCompletionSuggester that allows fuzzy
-// completion.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters-completion.html
-// for details, and
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters-completion.html#fuzzy
-// for details about the fuzzy completion suggester.
-type FuzzyCompletionSuggester struct {
- Suggester
- name string
- text string
- field string
- analyzer string
- size *int
- shardSize *int
- contextQueries []SuggesterContextQuery
-
- fuzziness interface{}
- fuzzyTranspositions *bool
- fuzzyMinLength *int
- fuzzyPrefixLength *int
- unicodeAware *bool
-}
-
-// Fuzziness defines the fuzziness which is used in FuzzyCompletionSuggester.
-type Fuzziness struct {
-}
-
-// Creates a new completion suggester.
-func NewFuzzyCompletionSuggester(name string) *FuzzyCompletionSuggester {
- return &FuzzyCompletionSuggester{
- name: name,
- contextQueries: make([]SuggesterContextQuery, 0),
- }
-}
-
-func (q *FuzzyCompletionSuggester) Name() string {
- return q.name
-}
-
-func (q *FuzzyCompletionSuggester) Text(text string) *FuzzyCompletionSuggester {
- q.text = text
- return q
-}
-
-func (q *FuzzyCompletionSuggester) Field(field string) *FuzzyCompletionSuggester {
- q.field = field
- return q
-}
-
-func (q *FuzzyCompletionSuggester) Analyzer(analyzer string) *FuzzyCompletionSuggester {
- q.analyzer = analyzer
- return q
-}
-
-func (q *FuzzyCompletionSuggester) Size(size int) *FuzzyCompletionSuggester {
- q.size = &size
- return q
-}
-
-func (q *FuzzyCompletionSuggester) ShardSize(shardSize int) *FuzzyCompletionSuggester {
- q.shardSize = &shardSize
- return q
-}
-
-func (q *FuzzyCompletionSuggester) ContextQuery(query SuggesterContextQuery) *FuzzyCompletionSuggester {
- q.contextQueries = append(q.contextQueries, query)
- return q
-}
-
-func (q *FuzzyCompletionSuggester) ContextQueries(queries ...SuggesterContextQuery) *FuzzyCompletionSuggester {
- q.contextQueries = append(q.contextQueries, queries...)
- return q
-}
-
-// Fuzziness defines the strategy used to describe what "fuzzy" actually
-// means for the suggester, e.g. 1, 2, "0", "1..2", ">4", or "AUTO".
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/common-options.html#fuzziness
-// for a detailed description.
-func (q *FuzzyCompletionSuggester) Fuzziness(fuzziness interface{}) *FuzzyCompletionSuggester {
- q.fuzziness = fuzziness
- return q
-}
-
-func (q *FuzzyCompletionSuggester) FuzzyTranspositions(fuzzyTranspositions bool) *FuzzyCompletionSuggester {
- q.fuzzyTranspositions = &fuzzyTranspositions
- return q
-}
-
-func (q *FuzzyCompletionSuggester) FuzzyMinLength(minLength int) *FuzzyCompletionSuggester {
- q.fuzzyMinLength = &minLength
- return q
-}
-
-func (q *FuzzyCompletionSuggester) FuzzyPrefixLength(prefixLength int) *FuzzyCompletionSuggester {
- q.fuzzyPrefixLength = &prefixLength
- return q
-}
-
-func (q *FuzzyCompletionSuggester) UnicodeAware(unicodeAware bool) *FuzzyCompletionSuggester {
- q.unicodeAware = &unicodeAware
- return q
-}
-
-// Creates the source for the completion suggester.
-func (q *FuzzyCompletionSuggester) Source(includeName bool) (interface{}, error) {
- cs := &completionSuggesterRequest{}
-
- if q.text != "" {
- cs.Text = q.text
- }
-
- suggester := make(map[string]interface{})
- cs.Completion = suggester
-
- if q.analyzer != "" {
- suggester["analyzer"] = q.analyzer
- }
- if q.field != "" {
- suggester["field"] = q.field
- }
- if q.size != nil {
- suggester["size"] = *q.size
- }
- if q.shardSize != nil {
- suggester["shard_size"] = *q.shardSize
- }
- switch len(q.contextQueries) {
- case 0:
- case 1:
- src, err := q.contextQueries[0].Source()
- if err != nil {
- return nil, err
- }
- suggester["context"] = src
- default:
- var ctxq []interface{}
- for _, query := range q.contextQueries {
- src, err := query.Source()
- if err != nil {
- return nil, err
- }
- ctxq = append(ctxq, src)
- }
- suggester["context"] = ctxq
- }
-
- // Fuzzy Completion Suggester fields
- fuzzy := make(map[string]interface{})
- suggester["fuzzy"] = fuzzy
- if q.fuzziness != nil {
- fuzzy["fuzziness"] = q.fuzziness
- }
- if q.fuzzyTranspositions != nil {
- fuzzy["transpositions"] = *q.fuzzyTranspositions
- }
- if q.fuzzyMinLength != nil {
- fuzzy["min_length"] = *q.fuzzyMinLength
- }
- if q.fuzzyPrefixLength != nil {
- fuzzy["prefix_length"] = *q.fuzzyPrefixLength
- }
- if q.unicodeAware != nil {
- fuzzy["unicode_aware"] = *q.unicodeAware
- }
-
- if !includeName {
- return cs, nil
- }
-
- source := make(map[string]interface{})
- source[q.name] = cs
- return source, nil
-}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_completion_test.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_completion_test.go
index 6bffddfe7..adbf58657 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/suggester_completion_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_completion_test.go
@@ -28,6 +28,64 @@ func TestCompletionSuggesterSource(t *testing.T) {
}
}
+func TestCompletionSuggesterPrefixSource(t *testing.T) {
+ s := NewCompletionSuggester("song-suggest").
+ Prefix("nir").
+ Field("suggest")
+ src, err := s.Source(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"song-suggest":{"prefix":"nir","completion":{"field":"suggest"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestCompletionSuggesterPrefixWithFuzzySource(t *testing.T) {
+ s := NewCompletionSuggester("song-suggest").
+ Prefix("nor").
+ Field("suggest").
+ FuzzyOptions(NewFuzzyCompletionSuggesterOptions().EditDistance(2))
+ src, err := s.Source(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"song-suggest":{"prefix":"nor","completion":{"field":"suggest","fuzzy":{"fuzziness":2}}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
+func TestCompletionSuggesterRegexSource(t *testing.T) {
+ s := NewCompletionSuggester("song-suggest").
+ Regex("n[ever|i]r").
+ Field("suggest")
+ src, err := s.Source(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(src)
+ if err != nil {
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ }
+ got := string(data)
+ expected := `{"song-suggest":{"regex":"n[ever|i]r","completion":{"field":"suggest"}}}`
+ if got != expected {
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+ }
+}
+
func TestCompletionSuggesterSourceWithMultipleContexts(t *testing.T) {
s := NewCompletionSuggester("song-suggest").
Text("n").
diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_context.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_context.go
index ade099151..12877c1a6 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/suggester_context.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_context.go
@@ -13,7 +13,7 @@ type SuggesterContextQuery interface {
}
// ContextSuggester is a fast suggester for e.g. type-ahead completion that supports filtering and boosting based on contexts.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/current/suggester-context.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/suggester-context.html
// for more details.
type ContextSuggester struct {
Suggester
@@ -94,7 +94,7 @@ func (q *ContextSuggester) Source(includeName bool) (interface{}, error) {
if err != nil {
return nil, err
}
- suggester["context"] = src
+ suggester["contexts"] = src
default:
ctxq := make(map[string]interface{})
for _, query := range q.contextQueries {
diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_context_category.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_context_category.go
index 2d63fe8fb..9c50651fa 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/suggester_context_category.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_context_category.go
@@ -7,7 +7,7 @@ package elastic
// -- SuggesterCategoryMapping --
// SuggesterCategoryMapping provides a mapping for a category context in a suggester.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/suggester-context.html#_category_mapping.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/suggester-context.html#_category_mapping.
type SuggesterCategoryMapping struct {
name string
fieldName string
@@ -59,7 +59,7 @@ func (q *SuggesterCategoryMapping) Source() (interface{}, error) {
// -- SuggesterCategoryQuery --
// SuggesterCategoryQuery provides querying a category context in a suggester.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/suggester-context.html#_category_query.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/suggester-context.html#_category_query.
type SuggesterCategoryQuery struct {
name string
values map[string]*int
diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_context_geo.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_context_geo.go
index 6815bfe73..3fea63feb 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/suggester_context_geo.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_context_geo.go
@@ -7,7 +7,7 @@ package elastic
// -- SuggesterGeoMapping --
// SuggesterGeoMapping provides a mapping for a geolocation context in a suggester.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/suggester-context.html#_geo_location_mapping.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/suggester-context.html#_geo_location_mapping.
type SuggesterGeoMapping struct {
name string
defaultLocations []*GeoPoint
@@ -80,7 +80,7 @@ func (q *SuggesterGeoMapping) Source() (interface{}, error) {
// -- SuggesterGeoQuery --
// SuggesterGeoQuery provides querying a geolocation context in a suggester.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/suggester-context.html#_geo_location_query
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/suggester-context.html#_geo_location_query
type SuggesterGeoQuery struct {
name string
location *GeoPoint
diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_context_test.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_context_test.go
index cd3c5586c..045ccb2f4 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/suggester_context_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_context_test.go
@@ -45,9 +45,9 @@ func TestContextSuggesterSourceWithMultipleContexts(t *testing.T) {
}
got := string(data)
// Due to the randomization of dictionary key, we could actually have two different valid expected outcomes
- expected := `{"place_suggestion":{"prefix":"tim","completion":{"context":{"place_type":[{"context":"cafe"},{"context":"restaurants"}]},"field":"suggest"}}}`
+ expected := `{"place_suggestion":{"prefix":"tim","completion":{"contexts":{"place_type":[{"context":"cafe"},{"context":"restaurants"}]},"field":"suggest"}}}`
if got != expected {
- expected := `{"place_suggestion":{"prefix":"tim","completion":{"context":{"place_type":[{"context":"restaurants"},{"context":"cafe"}]},"field":"suggest"}}}`
+ expected := `{"place_suggestion":{"prefix":"tim","completion":{"contexts":{"place_type":[{"context":"restaurants"},{"context":"cafe"}]},"field":"suggest"}}}`
if got != expected {
t.Errorf("expected %s\n,got:\n%s", expected, got)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_phrase.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_phrase.go
index f75e1ddc1..2f6b6a326 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/suggester_phrase.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_phrase.go
@@ -7,7 +7,7 @@ package elastic
// PhraseSuggester provides an API to access word alternatives
// on a per token basis within a certain string distance.
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters-phrase.html.
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters-phrase.html.
type PhraseSuggester struct {
Suggester
name string
@@ -213,7 +213,7 @@ func (q *PhraseSuggester) Source(includeName bool) (interface{}, error) {
if err != nil {
return nil, err
}
- suggester["context"] = src
+ suggester["contexts"] = src
default:
var ctxq []interface{}
for _, query := range q.contextQueries {
@@ -223,7 +223,7 @@ func (q *PhraseSuggester) Source(includeName bool) (interface{}, error) {
}
ctxq = append(ctxq, src)
}
- suggester["context"] = ctxq
+ suggester["contexts"] = ctxq
}
// Phase-specified parameters
@@ -312,7 +312,7 @@ type SmoothingModel interface {
}
// StupidBackoffSmoothingModel implements a stupid backoff smoothing model.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters-phrase.html#_smoothing_models
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters-phrase.html#_smoothing_models
// for details about smoothing models.
type StupidBackoffSmoothingModel struct {
discount float64
@@ -337,7 +337,7 @@ func (sm *StupidBackoffSmoothingModel) Source() (interface{}, error) {
// --
// LaplaceSmoothingModel implements a laplace smoothing model.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters-phrase.html#_smoothing_models
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters-phrase.html#_smoothing_models
// for details about smoothing models.
type LaplaceSmoothingModel struct {
alpha float64
@@ -363,7 +363,7 @@ func (sm *LaplaceSmoothingModel) Source() (interface{}, error) {
// LinearInterpolationSmoothingModel implements a linear interpolation
// smoothing model.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters-phrase.html#_smoothing_models
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters-phrase.html#_smoothing_models
// for details about smoothing models.
type LinearInterpolationSmoothingModel struct {
trigramLamda float64
@@ -399,7 +399,7 @@ type CandidateGenerator interface {
}
// DirectCandidateGenerator implements a direct candidate generator.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters-phrase.html#_smoothing_models
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters-phrase.html#_smoothing_models
// for details about smoothing models.
type DirectCandidateGenerator struct {
field string
diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_phrase_test.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_phrase_test.go
index fbcc676fe..63dde686e 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/suggester_phrase_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_phrase_test.go
@@ -60,7 +60,7 @@ func TestPhraseSuggesterSourceWithContextQuery(t *testing.T) {
t.Fatalf("marshaling to JSON failed: %v", err)
}
got := string(data)
- expected := `{"name":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","context":{"location":{"default":{"lat":0,"lon":0},"neighbors":true,"path":"pin","precision":["1km","5m"],"type":"geo"}},"field":"bigram","gram_size":2,"highlight":{"post_tag":"\u003c/em\u003e","pre_tag":"\u003cem\u003e"},"max_errors":0.5,"real_word_error_likelihood":0.95,"size":1}}}`
+ expected := `{"name":{"text":"Xor the Got-Jewel","phrase":{"analyzer":"body","contexts":{"location":{"default":{"lat":0,"lon":0},"neighbors":true,"path":"pin","precision":["1km","5m"],"type":"geo"}},"field":"bigram","gram_size":2,"highlight":{"post_tag":"\u003c/em\u003e","pre_tag":"\u003cem\u003e"},"max_errors":0.5,"real_word_error_likelihood":0.95,"size":1}}}`
if got != expected {
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_term.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_term.go
index 9c199f69d..69e1531f6 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/suggester_term.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_term.go
@@ -6,7 +6,7 @@ package elastic
// TermSuggester suggests terms based on edit distance.
// For more details, see
-// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters-term.html.
+// https://www.elastic.co/guide/en/elasticsearch/reference/6.0/search-suggesters-term.html.
type TermSuggester struct {
Suggester
name string
@@ -178,7 +178,7 @@ func (q *TermSuggester) Source(includeName bool) (interface{}, error) {
if err != nil {
return nil, err
}
- suggester["context"] = src
+ suggester["contexts"] = src
default:
ctxq := make([]interface{}, len(q.contextQueries))
for i, query := range q.contextQueries {
@@ -188,7 +188,7 @@ func (q *TermSuggester) Source(includeName bool) (interface{}, error) {
}
ctxq[i] = src
}
- suggester["context"] = ctxq
+ suggester["contexts"] = ctxq
}
// Specific to term suggester
diff --git a/vendor/gopkg.in/olivere/elastic.v5/tasks_cancel.go b/vendor/gopkg.in/olivere/elastic.v5/tasks_cancel.go
index 2c74fd87e..84f8aec35 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/tasks_cancel.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/tasks_cancel.go
@@ -10,7 +10,7 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// TasksCancelService can cancel long-running tasks.
@@ -95,7 +95,7 @@ func (s *TasksCancelService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if len(s.actions) > 0 {
params.Set("actions", strings.Join(s.actions, ","))
@@ -131,7 +131,11 @@ func (s *TasksCancelService) Do(ctx context.Context) (*TasksListResponse, error)
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "POST", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/tasks_get_task.go b/vendor/gopkg.in/olivere/elastic.v5/tasks_get_task.go
index 5368169ec..5f63726e4 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/tasks_get_task.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/tasks_get_task.go
@@ -5,7 +5,7 @@ import (
"fmt"
"net/url"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// TasksGetTaskService retrieves the state of a task in the cluster. It is part of the Task Management API
@@ -85,7 +85,11 @@ func (s *TasksGetTaskService) Do(ctx context.Context) (*TasksGetTaskResponse, er
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/tasks_list.go b/vendor/gopkg.in/olivere/elastic.v5/tasks_list.go
index d68bc21fb..54299d961 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/tasks_list.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/tasks_list.go
@@ -10,38 +10,36 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// TasksListService retrieves the list of currently executing tasks
// on one ore more nodes in the cluster. It is part of the Task Management API
-// documented at http://www.elastic.co/guide/en/elasticsearch/reference/5.2/tasks-list.html.
+// documented at https://www.elastic.co/guide/en/elasticsearch/reference/6.0/tasks.html.
//
// It is supported as of Elasticsearch 2.3.0.
type TasksListService struct {
client *Client
pretty bool
- taskId []int64
+ taskId []string
actions []string
detailed *bool
nodeId []string
parentNode string
- parentTask *int64
+ parentTaskId *string
waitForCompletion *bool
+ groupBy string
}
// NewTasksListService creates a new TasksListService.
func NewTasksListService(client *Client) *TasksListService {
return &TasksListService{
- client: client,
- taskId: make([]int64, 0),
- actions: make([]string, 0),
- nodeId: make([]string, 0),
+ client: client,
}
}
// TaskId indicates to returns the task(s) with specified id(s).
-func (s *TasksListService) TaskId(taskId ...int64) *TasksListService {
+func (s *TasksListService) TaskId(taskId ...string) *TasksListService {
s.taskId = append(s.taskId, taskId...)
return s
}
@@ -72,9 +70,9 @@ func (s *TasksListService) ParentNode(parentNode string) *TasksListService {
return s
}
-// ParentTask returns tasks with specified parent task id. Set to -1 to return all.
-func (s *TasksListService) ParentTask(parentTask int64) *TasksListService {
- s.parentTask = &parentTask
+// ParentTaskId returns tasks with specified parent task id (node_id:task_number). Set to -1 to return all.
+func (s *TasksListService) ParentTaskId(parentTaskId string) *TasksListService {
+ s.parentTaskId = &parentTaskId
return s
}
@@ -85,6 +83,13 @@ func (s *TasksListService) WaitForCompletion(waitForCompletion bool) *TasksListS
return s
}
+// GroupBy groups tasks by nodes or parent/child relationships.
+// As of now, it can either be "nodes" (default) or "parents".
+func (s *TasksListService) GroupBy(groupBy string) *TasksListService {
+ s.groupBy = groupBy
+ return s
+}
+
// Pretty indicates that the JSON response be indented and human readable.
func (s *TasksListService) Pretty(pretty bool) *TasksListService {
s.pretty = pretty
@@ -97,12 +102,8 @@ func (s *TasksListService) buildURL() (string, url.Values, error) {
var err error
var path string
if len(s.taskId) > 0 {
- var tasks []string
- for _, taskId := range s.taskId {
- tasks = append(tasks, fmt.Sprintf("%d", taskId))
- }
path, err = uritemplates.Expand("/_tasks/{task_id}", map[string]string{
- "task_id": strings.Join(tasks, ","),
+ "task_id": strings.Join(s.taskId, ","),
})
} else {
path = "/_tasks"
@@ -114,7 +115,7 @@ func (s *TasksListService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if len(s.actions) > 0 {
params.Set("actions", strings.Join(s.actions, ","))
@@ -128,12 +129,15 @@ func (s *TasksListService) buildURL() (string, url.Values, error) {
if s.parentNode != "" {
params.Set("parent_node", s.parentNode)
}
- if s.parentTask != nil {
- params.Set("parent_task", fmt.Sprintf("%v", *s.parentTask))
+ if s.parentTaskId != nil {
+ params.Set("parent_task_id", *s.parentTaskId)
}
if s.waitForCompletion != nil {
params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion))
}
+ if s.groupBy != "" {
+ params.Set("group_by", s.groupBy)
+ }
return path, params, nil
}
@@ -156,7 +160,11 @@ func (s *TasksListService) Do(ctx context.Context) (*TasksListResponse, error) {
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ })
if err != nil {
return nil, err
}
@@ -178,7 +186,7 @@ type TasksListResponse struct {
}
type TaskOperationFailure struct {
- TaskId int64 `json:"task_id"`
+ TaskId int64 `json:"task_id"` // this is a long in the Java source
NodeId string `json:"node_id"`
Status string `json:"status"`
Reason *ErrorDetails `json:"reason"`
@@ -194,14 +202,16 @@ type DiscoveryNode struct {
TransportAddress string `json:"transport_address"`
Host string `json:"host"`
IP string `json:"ip"`
+ Roles []string `json:"roles"` // "master", "data", or "ingest"
Attributes map[string]interface{} `json:"attributes"`
// Tasks returns the tasks by its id (as a string).
Tasks map[string]*TaskInfo `json:"tasks"`
}
+// TaskInfo represents information about a currently running task.
type TaskInfo struct {
Node string `json:"node"`
- Id int64 `json:"id"` // the task id
+ Id int64 `json:"id"` // the task id (yes, this is a long in the Java source)
Type string `json:"type"`
Action string `json:"action"`
Status interface{} `json:"status"` // has separate implementations of Task.Status in Java for reindexing, replication, and "RawTaskStatus"
diff --git a/vendor/gopkg.in/olivere/elastic.v5/tasks_list_test.go b/vendor/gopkg.in/olivere/elastic.v5/tasks_list_test.go
index e14bc6e43..9ecabcd68 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/tasks_list_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/tasks_list_test.go
@@ -13,19 +13,19 @@ func TestTasksListBuildURL(t *testing.T) {
client := setupTestClient(t)
tests := []struct {
- TaskId []int64
+ TaskId []string
Expected string
}{
{
- []int64{},
+ []string{},
"/_tasks",
},
{
- []int64{42},
+ []string{"42"},
"/_tasks/42",
},
{
- []int64{42, 37},
+ []string{"42", "37"},
"/_tasks/42%2C37",
},
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/termvectors.go b/vendor/gopkg.in/olivere/elastic.v5/termvectors.go
index 6bbcf8f3b..5943ad14f 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/termvectors.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/termvectors.go
@@ -10,14 +10,14 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// TermvectorsService returns information and statistics on terms in the
// fields of a particular document. The document could be stored in the
// index or artificially provided by the user.
//
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-termvectors.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-termvectors.html
// for documentation.
type TermvectorsService struct {
client *Client
@@ -169,7 +169,7 @@ func (s *TermvectorsService) Version(version interface{}) *TermvectorsService {
return s
}
-// VersionType specifies a version type ("internal", "external", "external_gte", or "force").
+// VersionType specifies a version type ("internal", "external", or "external_gte").
func (s *TermvectorsService) VersionType(versionType string) *TermvectorsService {
s.versionType = versionType
return s
@@ -217,7 +217,7 @@ func (s *TermvectorsService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if s.dfs != nil {
params.Set("dfs", fmt.Sprintf("%v", *s.dfs))
@@ -316,7 +316,12 @@ func (s *TermvectorsService) Do(ctx context.Context) (*TermvectorsResponse, erro
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "GET", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "GET",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
@@ -333,7 +338,7 @@ func (s *TermvectorsService) Do(ctx context.Context) (*TermvectorsResponse, erro
// TermvectorsFilterSettings adds additional filters to a Termsvector request.
// It allows to filter terms based on their tf-idf scores.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-termvectors.html#_terms_filtering
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-termvectors.html#_terms_filtering
// for more information.
type TermvectorsFilterSettings struct {
maxNumTerms *int64
diff --git a/vendor/gopkg.in/olivere/elastic.v5/termvectors_test.go b/vendor/gopkg.in/olivere/elastic.v5/termvectors_test.go
index fb0ede146..0391f2b0a 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/termvectors_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/termvectors_test.go
@@ -21,15 +21,15 @@ func TestTermVectorsBuildURL(t *testing.T) {
}{
{
"twitter",
- "tweet",
+ "doc",
"",
- "/twitter/tweet/_termvectors",
+ "/twitter/doc/_termvectors",
},
{
"twitter",
- "tweet",
+ "doc",
"1",
- "/twitter/tweet/1/_termvectors",
+ "/twitter/doc/1/_termvectors",
},
}
@@ -56,7 +56,7 @@ func TestTermVectorsWithId(t *testing.T) {
// Add a document
indexResult, err := client.Index().
Index(testIndexName).
- Type("tweet").
+ Type("doc").
Id("1").
BodyJson(&tweet1).
Refresh("true").
@@ -70,7 +70,7 @@ func TestTermVectorsWithId(t *testing.T) {
// TermVectors by specifying ID
field := "Message"
- result, err := client.TermVectors(testIndexName, "tweet").
+ result, err := client.TermVectors(testIndexName, "doc").
Id("1").
Fields(field).
FieldStatistics(true).
@@ -104,7 +104,7 @@ func TestTermVectorsWithDoc(t *testing.T) {
"fullname": "keyword",
}
- result, err := client.TermVectors(testIndexName, "tweet").
+ result, err := client.TermVectors(testIndexName, "doc").
Doc(doc).
PerFieldAnalyzer(perFieldAnalyzer).
FieldStatistics(true).
@@ -138,7 +138,7 @@ func TestTermVectorsWithFilter(t *testing.T) {
"fullname": "keyword",
}
- result, err := client.TermVectors(testIndexName, "tweet").
+ result, err := client.TermVectors(testIndexName, "doc").
Doc(doc).
PerFieldAnalyzer(perFieldAnalyzer).
FieldStatistics(true).
diff --git a/vendor/gopkg.in/olivere/elastic.v5/update.go b/vendor/gopkg.in/olivere/elastic.v5/update.go
index 1718bc585..5507fae4c 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/update.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/update.go
@@ -10,11 +10,11 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// UpdateService updates a document in Elasticsearch.
-// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-update.html
+// See https://www.elastic.co/guide/en/elasticsearch/reference/6.0/docs-update.html
// for details.
type UpdateService struct {
client *Client
@@ -105,7 +105,7 @@ func (b *UpdateService) Version(version int64) *UpdateService {
return b
}
-// VersionType is one of "internal" or "force".
+// VersionType is e.g. "internal".
func (b *UpdateService) VersionType(versionType string) *UpdateService {
b.versionType = versionType
return b
@@ -293,7 +293,12 @@ func (b *UpdateService) Do(ctx context.Context) (*UpdateResponse, error) {
}
// Get response
- res, err := b.client.PerformRequest(ctx, "POST", path, params, body)
+ res, err := b.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
@@ -308,12 +313,15 @@ func (b *UpdateService) Do(ctx context.Context) (*UpdateResponse, error) {
// UpdateResponse is the result of updating a document in Elasticsearch.
type UpdateResponse struct {
- Index string `json:"_index"`
- Type string `json:"_type"`
- Id string `json:"_id"`
- Version int `json:"_version"`
- Shards *shardsInfo `json:"_shards"`
+ Index string `json:"_index,omitempty"`
+ Type string `json:"_type,omitempty"`
+ Id string `json:"_id,omitempty"`
+ Version int64 `json:"_version,omitempty"`
Result string `json:"result,omitempty"`
+ Shards *shardsInfo `json:"_shards,omitempty"`
+ SeqNo int64 `json:"_seq_no,omitempty"`
+ PrimaryTerm int64 `json:"_primary_term,omitempty"`
+ Status int `json:"status,omitempty"`
ForcedRefresh bool `json:"forced_refresh,omitempty"`
GetResult *GetResult `json:"get,omitempty"`
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/update_by_query.go b/vendor/gopkg.in/olivere/elastic.v5/update_by_query.go
index 505e1e834..953d67388 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/update_by_query.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/update_by_query.go
@@ -10,7 +10,7 @@ import (
"net/url"
"strings"
- "gopkg.in/olivere/elastic.v5/uritemplates"
+ "github.com/olivere/elastic/uritemplates"
)
// UpdateByQueryService is documented at https://www.elastic.co/guide/en/elasticsearch/plugins/master/plugins-reindex.html.
@@ -447,7 +447,7 @@ func (s *UpdateByQueryService) buildURL() (string, url.Values, error) {
// Add query string parameters
params := url.Values{}
if s.pretty {
- params.Set("pretty", "1")
+ params.Set("pretty", "true")
}
if len(s.xSource) > 0 {
params.Set("_source", strings.Join(s.xSource, ","))
@@ -636,7 +636,12 @@ func (s *UpdateByQueryService) Do(ctx context.Context) (*BulkIndexByScrollRespon
}
// Get HTTP response
- res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
+ res, err := s.client.PerformRequest(ctx, PerformRequestOptions{
+ Method: "POST",
+ Path: path,
+ Params: params,
+ Body: body,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/update_by_query_test.go b/vendor/gopkg.in/olivere/elastic.v5/update_by_query_test.go
index 6ff56ccf6..fde924dd5 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/update_by_query_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/update_by_query_test.go
@@ -110,7 +110,7 @@ func TestUpdateByQueryBodyWithQueryAndScript(t *testing.T) {
t.Fatal(err)
}
got := string(b)
- want := `{"query":{"term":{"user":"olivere"}},"script":{"inline":"ctx._source.likes++"}}`
+ want := `{"query":{"term":{"user":"olivere"}},"script":{"source":"ctx._source.likes++"}}`
if got != want {
t.Fatalf("\ngot %s\nwant %s", got, want)
}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/update_integration_test.go b/vendor/gopkg.in/olivere/elastic.v5/update_integration_test.go
new file mode 100644
index 000000000..f36925298
--- /dev/null
+++ b/vendor/gopkg.in/olivere/elastic.v5/update_integration_test.go
@@ -0,0 +1,58 @@
+// Copyright 2012-present Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+)
+
+func TestUpdateWithScript(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
+
+ // Get original
+ getRes, err := client.Get().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ var original tweet
+ if err := json.Unmarshal(*getRes.Source, &original); err != nil {
+ t.Fatal(err)
+ }
+
+ // Update with script
+ updRes, err := client.Update().Index(testIndexName).Type("doc").Id("1").
+ Script(
+ NewScript(`ctx._source.message = "Updated message text."`).Lang("painless"),
+ ).
+ Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if updRes == nil {
+ t.Fatal("response is nil")
+ }
+ if want, have := "updated", updRes.Result; want != have {
+ t.Fatalf("want Result = %q, have %v", want, have)
+ }
+
+ // Get new version
+ getRes, err = client.Get().Index(testIndexName).Type("doc").Id("1").Do(context.TODO())
+ if err != nil {
+ t.Fatal(err)
+ }
+ var updated tweet
+ if err := json.Unmarshal(*getRes.Source, &updated); err != nil {
+ t.Fatal(err)
+ }
+
+ if want, have := original.User, updated.User; want != have {
+ t.Fatalf("want User = %q, have %v", want, have)
+ }
+ if want, have := "Updated message text.", updated.Message; want != have {
+ t.Fatalf("want Message = %q, have %v", want, have)
+ }
+}
diff --git a/vendor/gopkg.in/olivere/elastic.v5/update_test.go b/vendor/gopkg.in/olivere/elastic.v5/update_test.go
index 8801bacb2..1f04cedd6 100644
--- a/vendor/gopkg.in/olivere/elastic.v5/update_test.go
+++ b/vendor/gopkg.in/olivere/elastic.v5/update_test.go
@@ -12,7 +12,8 @@ import (
)
func TestUpdateViaScript(t *testing.T) {
- client := setupTestClient(t)
+ client := setupTestClient(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
+
update := client.Update().
Index("test").Type("type1").Id("1").
Script(NewScript("ctx._source.tags += tag").Params(map[string]interface{}{"tag": "blue"}).Lang("groovy"))
@@ -37,14 +38,14 @@ func TestUpdateViaScript(t *testing.T) {
t.Fatalf("expected to marshal body as JSON, got: %v", err)
}
got := string(data)
- expected := `{"script":{"inline":"ctx._source.tags += tag","lang":"groovy","params":{"tag":"blue"}}}`
+ expected := `{"script":{"lang":"groovy","params":{"tag":"blue"},"source":"ctx._source.tags += tag"}}`
if got != expected {
t.Errorf("expected\n%s\ngot:\n%s", expected, got)
}
}
func TestUpdateViaScriptId(t *testing.T) {
- client := setupTestClient(t)
+ client := setupTestClient(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
scriptParams := map[string]interface{}{
"pageViewEvent": map[string]interface{}{
@@ -53,7 +54,7 @@ func TestUpdateViaScriptId(t *testing.T) {
"time": "2014-01-01 12:32",
},
}
- script := NewScriptId("my_web_session_summariser").Params(scriptParams)
+ script := NewScriptStored("my_web_session_summariser").Params(scriptParams)
update := client.Update().
Index("sessions").Type("session").Id("dh3sgudg8gsrgl").
@@ -87,53 +88,9 @@ func TestUpdateViaScriptId(t *testing.T) {
}
}
-func TestUpdateViaScriptFile(t *testing.T) {
- client := setupTestClient(t)
-
- scriptParams := map[string]interface{}{
- "pageViewEvent": map[string]interface{}{
- "url": "foo.com/bar",
- "response": 404,
- "time": "2014-01-01 12:32",
- },
- }
- script := NewScriptFile("update_script").Params(scriptParams)
-
- update := client.Update().
- Index("sessions").Type("session").Id("dh3sgudg8gsrgl").
- Script(script).
- ScriptedUpsert(true).
- Upsert(map[string]interface{}{})
-
- path, params, err := update.url()
- if err != nil {
- t.Fatalf("expected to return URL, got: %v", err)
- }
- expectedPath := `/sessions/session/dh3sgudg8gsrgl/_update`
- if expectedPath != path {
- t.Errorf("expected URL path\n%s\ngot:\n%s", expectedPath, path)
- }
- expectedParams := url.Values{}
- if expectedParams.Encode() != params.Encode() {
- t.Errorf("expected URL parameters\n%s\ngot:\n%s", expectedParams.Encode(), params.Encode())
- }
- body, err := update.body()
- if err != nil {
- t.Fatalf("expected to return body, got: %v", err)
- }
- data, err := json.Marshal(body)
- if err != nil {
- t.Fatalf("expected to marshal body as JSON, got: %v", err)
- }
- got := string(data)
- expected := `{"script":{"file":"update_script","params":{"pageViewEvent":{"response":404,"time":"2014-01-01 12:32","url":"foo.com/bar"}}},"scripted_upsert":true,"upsert":{}}`
- if got != expected {
- t.Errorf("expected\n%s\ngot:\n%s", expected, got)
- }
-}
-
func TestUpdateViaScriptAndUpsert(t *testing.T) {
- client := setupTestClient(t)
+ client := setupTestClient(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
+
update := client.Update().
Index("test").Type("type1").Id("1").
Script(NewScript("ctx._source.counter += count").Params(map[string]interface{}{"count": 4})).
@@ -159,14 +116,15 @@ func TestUpdateViaScriptAndUpsert(t *testing.T) {
t.Fatalf("expected to marshal body as JSON, got: %v", err)
}
got := string(data)
- expected := `{"script":{"inline":"ctx._source.counter += count","params":{"count":4}},"upsert":{"counter":1}}`
+ expected := `{"script":{"params":{"count":4},"source":"ctx._source.counter += count"},"upsert":{"counter":1}}`
if got != expected {
t.Errorf("expected\n%s\ngot:\n%s", expected, got)
}
}
func TestUpdateViaDoc(t *testing.T) {
- client := setupTestClient(t)
+ client := setupTestClient(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
+
update := client.Update().
Index("test").Type("type1").Id("1").
Doc(map[string]interface{}{"name": "new_name"}).
@@ -199,7 +157,8 @@ func TestUpdateViaDoc(t *testing.T) {
}
func TestUpdateViaDocAndUpsert(t *testing.T) {
- client := setupTestClient(t)
+ client := setupTestClient(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
+
update := client.Update().
Index("test").Type("type1").Id("1").
Doc(map[string]interface{}{"name": "new_name"}).
@@ -234,7 +193,8 @@ func TestUpdateViaDocAndUpsert(t *testing.T) {
}
func TestUpdateViaDocAndUpsertAndFetchSource(t *testing.T) {
- client := setupTestClient(t)
+ client := setupTestClient(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
+
update := client.Update().
Index("test").Type("type1").Id("1").
Doc(map[string]interface{}{"name": "new_name"}).
@@ -274,8 +234,9 @@ func TestUpdateViaDocAndUpsertAndFetchSource(t *testing.T) {
func TestUpdateAndFetchSource(t *testing.T) {
client := setupTestClientAndCreateIndexAndAddDocs(t) // , SetTraceLog(log.New(os.Stdout, "", 0)))
+
res, err := client.Update().
- Index(testIndexName).Type("tweet").Id("1").
+ Index(testIndexName).Type("doc").Id("1").
Doc(map[string]interface{}{"user": "sandrae"}).
DetectNoop(true).
FetchSource(true).